code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
/**********************************************************************
* File: ocrblock.h (Formerly block.h)
* Description: Page block class definition.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef OCRBLOCK_H
#define OCRBLOCK_H
#include "ocrpara.h"
#include "ocrrow.h"
#include "pdblock.h"
namespace tesseract {
class BLOCK; // forward decl
ELISTIZEH(BLOCK)
class TESS_API BLOCK : public ELIST_LINK
// page block
{
friend class BLOCK_RECT_IT; // block iterator
public:
BLOCK() : re_rotation_(1.0f, 0.0f), classify_rotation_(1.0f, 0.0f), skew_(1.0f, 0.0f) {}
BLOCK(const char *name, ///< filename
bool prop, ///< proportional
int16_t kern, ///< kerning
int16_t space, ///< spacing
TDimension xmin, ///< bottom left
TDimension ymin,
TDimension xmax, ///< top right
TDimension ymax);
~BLOCK() = default;
/**
* set space size etc.
* @param prop proportional
* @param kern inter char size
* @param space inter word size
* @param ch_pitch pitch if fixed
*/
void set_stats(bool prop, int16_t kern, int16_t space, int16_t ch_pitch) {
proportional = prop;
kerning = static_cast<int8_t>(kern);
spacing = space;
pitch = ch_pitch;
}
/// set char size
void set_xheight(int32_t height) {
xheight = height;
}
/// set font class
void set_font_class(int16_t font) {
font_class = font;
}
/// return proportional
bool prop() const {
return proportional;
}
bool right_to_left() const {
return right_to_left_;
}
void set_right_to_left(bool value) {
right_to_left_ = value;
}
/// return pitch
int32_t fixed_pitch() const {
return pitch;
}
/// return kerning
int16_t kern() const {
return kerning;
}
/// return font class
int16_t font() const {
return font_class;
}
/// return spacing
int16_t space() const {
return spacing;
}
/// return filename
const char *name() const {
return filename.c_str();
}
/// return xheight
int32_t x_height() const {
return xheight;
}
float cell_over_xheight() const {
return cell_over_xheight_;
}
void set_cell_over_xheight(float ratio) {
cell_over_xheight_ = ratio;
}
/// get rows
ROW_LIST *row_list() {
return &rows;
}
// Compute the margins between the edges of each row and this block's
// polyblock, and store the results in the rows.
void compute_row_margins();
// get paragraphs
PARA_LIST *para_list() {
return ¶s_;
}
/// get blobs
C_BLOB_LIST *blob_list() {
return &c_blobs;
}
C_BLOB_LIST *reject_blobs() {
return &rej_blobs;
}
FCOORD re_rotation() const {
return re_rotation_; // How to transform coords back to image.
}
void set_re_rotation(const FCOORD &rotation) {
re_rotation_ = rotation;
}
FCOORD classify_rotation() const {
return classify_rotation_; // Apply this before classifying.
}
void set_classify_rotation(const FCOORD &rotation) {
classify_rotation_ = rotation;
}
FCOORD skew() const {
return skew_; // Direction of true horizontal.
}
void set_skew(const FCOORD &skew) {
skew_ = skew;
}
const ICOORD &median_size() const {
return median_size_;
}
void set_median_size(int x, int y) {
median_size_.set_x(x);
median_size_.set_y(y);
}
Image render_mask(TBOX *mask_box) {
return pdblk.render_mask(re_rotation_, mask_box);
}
// Returns the bounding box including the desired combination of upper and
// lower noise/diacritic elements.
TBOX restricted_bounding_box(bool upper_dots, bool lower_dots) const;
// Reflects the polygon in the y-axis and recomputes the bounding_box.
// Does nothing to any contained rows/words/blobs etc.
void reflect_polygon_in_y_axis();
void rotate(const FCOORD &rotation);
/// decreasing y order
void sort_rows();
/// shrink white space
void compress();
/// check proportional
void check_pitch();
/// shrink white space and move by vector
void compress(const ICOORD vec);
/// dump whole table
void print(FILE *fp, bool dump);
BLOCK &operator=(const BLOCK &source);
PDBLK pdblk; ///< Page Description Block
private:
bool proportional = false; ///< proportional
bool right_to_left_ = false; ///< major script is right to left.
int8_t kerning = 0; ///< inter blob gap
int16_t spacing = 0; ///< inter word gap
int16_t pitch = 0; ///< pitch of non-props
int16_t font_class = 0; ///< correct font class
int32_t xheight = 0; ///< height of chars
float cell_over_xheight_ = 0.0f; ///< Ratio of cell height to xheight.
std::string filename; ///< name of block
ROW_LIST rows; ///< rows in block
PARA_LIST paras_; ///< paragraphs of block
C_BLOB_LIST c_blobs; ///< before textord
C_BLOB_LIST rej_blobs; ///< duff stuff
FCOORD re_rotation_; ///< How to transform coords back to image.
FCOORD classify_rotation_; ///< Apply this before classifying.
FCOORD skew_; ///< Direction of true horizontal.
ICOORD median_size_; ///< Median size of blobs.
};
// A function to print segmentation stats for the given block list.
void PrintSegmentationStats(BLOCK_LIST *block_list);
// Extracts blobs fromo the given block list and adds them to the output list.
// The block list must have been created by performing a page segmentation.
void ExtractBlobsFromSegmentation(BLOCK_LIST *blocks, C_BLOB_LIST *output_blob_list);
// Refreshes the words in the block_list by using blobs in the
// new_blobs list.
// Block list must have word segmentation in it.
// It consumes the blobs provided in the new_blobs list. The blobs leftover in
// the new_blobs list after the call weren't matched to any blobs of the words
// in block list.
// The output not_found_blobs is a list of blobs from the original segmentation
// in the block_list for which no corresponding new blobs were found.
void RefreshWordBlobsFromNewBlobs(BLOCK_LIST *block_list, C_BLOB_LIST *new_blobs,
C_BLOB_LIST *not_found_blobs);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/ocrblock.h
|
C++
|
apache-2.0
| 6,914
|
/////////////////////////////////////////////////////////////////////
// File: ocrpara.cpp
// Description: OCR Paragraph Output Type
// Author: David Eger
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "ocrpara.h"
#include "host.h" // For NearlyEqual()
#include <cstdio>
namespace tesseract {
using tesseract::JUSTIFICATION_CENTER;
using tesseract::JUSTIFICATION_LEFT;
using tesseract::JUSTIFICATION_RIGHT;
using tesseract::JUSTIFICATION_UNKNOWN;
static const char *ParagraphJustificationToString(tesseract::ParagraphJustification justification) {
switch (justification) {
case JUSTIFICATION_LEFT:
return "LEFT";
case JUSTIFICATION_RIGHT:
return "RIGHT";
case JUSTIFICATION_CENTER:
return "CENTER";
default:
return "UNKNOWN";
}
}
bool ParagraphModel::ValidFirstLine(int lmargin, int lindent, int rindent, int rmargin) const {
switch (justification_) {
case JUSTIFICATION_LEFT:
return NearlyEqual(lmargin + lindent, margin_ + first_indent_, tolerance_);
case JUSTIFICATION_RIGHT:
return NearlyEqual(rmargin + rindent, margin_ + first_indent_, tolerance_);
case JUSTIFICATION_CENTER:
return NearlyEqual(lindent, rindent, tolerance_ * 2);
default:
// shouldn't happen
return false;
}
}
bool ParagraphModel::ValidBodyLine(int lmargin, int lindent, int rindent, int rmargin) const {
switch (justification_) {
case JUSTIFICATION_LEFT:
return NearlyEqual(lmargin + lindent, margin_ + body_indent_, tolerance_);
case JUSTIFICATION_RIGHT:
return NearlyEqual(rmargin + rindent, margin_ + body_indent_, tolerance_);
case JUSTIFICATION_CENTER:
return NearlyEqual(lindent, rindent, tolerance_ * 2);
default:
// shouldn't happen
return false;
}
}
bool ParagraphModel::Comparable(const ParagraphModel &other) const {
if (justification_ != other.justification_) {
return false;
}
if (justification_ == JUSTIFICATION_CENTER || justification_ == JUSTIFICATION_UNKNOWN) {
return true;
}
int tolerance = (tolerance_ + other.tolerance_) / 4;
return NearlyEqual(margin_ + first_indent_, other.margin_ + other.first_indent_, tolerance) &&
NearlyEqual(margin_ + body_indent_, other.margin_ + other.body_indent_, tolerance);
}
std::string ParagraphModel::ToString() const {
char buffer[200];
const char *alignment = ParagraphJustificationToString(justification_);
snprintf(buffer, sizeof(buffer), "margin: %d, first_indent: %d, body_indent: %d, alignment: %s",
margin_, first_indent_, body_indent_, alignment);
return std::string(buffer);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/ocrpara.cpp
|
C++
|
apache-2.0
| 3,276
|
/////////////////////////////////////////////////////////////////////
// File: ocrpara.h
// Description: OCR Paragraph Output Type
// Author: David Eger
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCSTRUCT_OCRPARA_H_
#define TESSERACT_CCSTRUCT_OCRPARA_H_
#include "elst.h"
#include <tesseract/publictypes.h>
namespace tesseract {
class ParagraphModel;
struct PARA : public ELIST_LINK {
public:
PARA()
: model(nullptr)
, is_list_item(false)
, is_very_first_or_continuation(false)
, has_drop_cap(false) {}
// We do not own the model, we just reference it.
// model may be nullptr if there is not a good model for this paragraph.
const ParagraphModel *model;
bool is_list_item;
// The first paragraph on a page often lacks a first line indent, but should
// still be modeled by the same model as other body text paragraphs on the
// page.
bool is_very_first_or_continuation;
// Does this paragraph begin with a drop cap?
bool has_drop_cap;
};
ELISTIZEH(PARA)
// A geometric model of paragraph indentation and alignment.
//
// Measurements are in pixels. The meaning of the integer arguments changes
// depending upon the value of justification. Distances less than or equal
// to tolerance apart we take as "equivalent" for the purpose of model
// matching, and in the examples below, we assume tolerance is zero.
//
// justification = LEFT:
// margin the "ignored" margin to the left block edge.
// first_indent indent from the left margin to a typical first text line.
// body_indent indent from the left margin of a typical body text line.
//
// justification = RIGHT:
// margin the "ignored" margin to the right block edge.
// first_indent indent from the right margin to a typical first text line.
// body_indent indent from the right margin of a typical body text line.
//
// justification = CENTER:
// margin ignored
// first_indent ignored
// body_indent ignored
//
// ====== Extended example, assuming each letter is ten pixels wide: =======
//
// +--------------------------------+
// | Awesome | ParagraphModel(CENTER, 0, 0, 0)
// | Centered Title |
// | Paragraph Detection |
// | OCR TEAM |
// | 10 November 2010 |
// | |
// | Look here, I have a paragraph.| ParagraphModel(LEFT, 0, 20, 0)
// |This paragraph starts at the top|
// |of the page and takes 3 lines. |
// | Here I have a second paragraph| ParagraphModel(LEFT, 0, 20, 0)
// |which indicates that the first |
// |paragraph is not a continuation |
// |from a previous page, as it is |
// |indented just like this second |
// |paragraph. |
// | Here is a block quote. It | ParagraphModel(LEFT, 30, 0, 0)
// | looks like the prior text |
// | but it is indented more |
// | and is fully justified. |
// | So how does one deal with | ParagraphModel(LEFT, 0, 20, 0)
// |centered text, block quotes, |
// |normal paragraphs, and lists |
// |like what follows? |
// |1. Make a plan. | ParagraphModel(LEFT, 0, 0, 30)
// |2. Use a heuristic, for example,| ParagraphModel(LEFT, 0, 0, 30)
// | looking for lines where the |
// | first word of the next line |
// | would fit on the previous |
// | line. |
// |8. Try to implement the plan in | ParagraphModel(LEFT, 0, 0, 30)
// | Python and try it out. |
// |4. Determine how to fix the | ParagraphModel(LEFT, 0, 0, 30)
// | mistakes. |
// |5. Repeat. | ParagraphModel(LEFT, 0, 0, 30)
// | For extra painful penalty work| ParagraphModel(LEFT, 0, 20, 0)
// |you can try to identify source |
// |code. Ouch! |
// +--------------------------------+
class TESS_API ParagraphModel {
public:
ParagraphModel(tesseract::ParagraphJustification justification, int margin, int first_indent,
int body_indent, int tolerance)
: justification_(justification)
, margin_(margin)
, first_indent_(first_indent)
, body_indent_(body_indent)
, tolerance_(tolerance) {
// Make one of {first_indent, body_indent} is 0.
int added_margin = first_indent;
if (body_indent < added_margin) {
added_margin = body_indent;
}
margin_ += added_margin;
first_indent_ -= added_margin;
body_indent_ -= added_margin;
}
ParagraphModel()
: justification_(tesseract::JUSTIFICATION_UNKNOWN)
, margin_(0)
, first_indent_(0)
, body_indent_(0)
, tolerance_(0) {}
// ValidFirstLine() and ValidBodyLine() take arguments describing a text line
// in a block of text which we are trying to model:
// lmargin, lindent: these add up to the distance from the leftmost ink
// in the text line to the surrounding text block's left
// edge.
// rmargin, rindent: these add up to the distance from the rightmost ink
// in the text line to the surrounding text block's right
// edge.
// The caller determines the division between "margin" and "indent", which
// only actually affect whether we think the line may be centered.
//
// If the amount of whitespace matches the amount of whitespace expected on
// the relevant side of the line (within tolerance_) we say it matches.
// Return whether a given text line could be a first paragraph line according
// to this paragraph model.
bool ValidFirstLine(int lmargin, int lindent, int rindent, int rmargin) const;
// Return whether a given text line could be a first paragraph line according
// to this paragraph model.
bool ValidBodyLine(int lmargin, int lindent, int rindent, int rmargin) const;
tesseract::ParagraphJustification justification() const {
return justification_;
}
int margin() const {
return margin_;
}
int first_indent() const {
return first_indent_;
}
int body_indent() const {
return body_indent_;
}
int tolerance() const {
return tolerance_;
}
bool is_flush() const {
return (justification_ == tesseract::JUSTIFICATION_LEFT ||
justification_ == tesseract::JUSTIFICATION_RIGHT) &&
abs(first_indent_ - body_indent_) <= tolerance_;
}
// Return whether this model is likely to agree with the other model on most
// paragraphs they are marked.
bool Comparable(const ParagraphModel &other) const;
std::string ToString() const;
private:
tesseract::ParagraphJustification justification_;
int margin_;
int first_indent_;
int body_indent_;
int tolerance_;
};
} // namespace tesseract
#endif // TESSERACT_CCSTRUCT_OCRPARA_H_
|
2301_81045437/tesseract
|
src/ccstruct/ocrpara.h
|
C++
|
apache-2.0
| 7,440
|
/**********************************************************************
* File: ocrrow.cpp (Formerly row.c)
* Description: Code for the ROW class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "blobbox.h"
#include "ocrrow.h"
namespace tesseract {
/**********************************************************************
* ROW::ROW
*
* Constructor to build a ROW. Only the stats stuff are given here.
* The words are added directly.
**********************************************************************/
ROW::ROW( // constructor
int32_t spline_size, // no of segments
int32_t *xstarts, // segment boundaries
double *coeffs, // coefficients
float x_height, // line height
float ascenders, // ascender size
float descenders, // descender drop
int16_t kern, // char gap
int16_t space // word gap
)
: baseline(spline_size, xstarts, coeffs), para_(nullptr) {
kerning = kern; // just store stuff
spacing = space;
xheight = x_height;
ascrise = ascenders;
bodysize = 0.0f;
descdrop = descenders;
has_drop_cap_ = false;
lmargin_ = 0;
rmargin_ = 0;
}
/**********************************************************************
* ROW::ROW
*
* Constructor to build a ROW. Only the stats stuff are given here.
* The words are added directly.
**********************************************************************/
ROW::ROW( // constructor
TO_ROW *to_row, // source row
int16_t kern, // char gap
int16_t space // word gap
)
: para_(nullptr) {
kerning = kern; // just store stuff
spacing = space;
xheight = to_row->xheight;
bodysize = to_row->body_size;
ascrise = to_row->ascrise;
descdrop = to_row->descdrop;
baseline = to_row->baseline;
has_drop_cap_ = false;
lmargin_ = 0;
rmargin_ = 0;
}
// Returns the bounding box including the desired combination of upper and
// lower noise/diacritic elements.
TBOX ROW::restricted_bounding_box(bool upper_dots, bool lower_dots) const {
TBOX box;
// This is a read-only iteration of the words in the row.
WERD_IT it(const_cast<WERD_LIST *>(&words));
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
box += it.data()->restricted_bounding_box(upper_dots, lower_dots);
}
return box;
}
/**********************************************************************
* ROW::recalc_bounding_box
*
* Set the bounding box correctly
**********************************************************************/
void ROW::recalc_bounding_box() { // recalculate BB
WERD *word; // current word
WERD_IT it = &words; // words of ROW
int16_t left; // of word
int16_t prev_left; // old left
if (!it.empty()) {
word = it.data();
prev_left = word->bounding_box().left();
it.forward();
while (!it.at_first()) {
word = it.data();
left = word->bounding_box().left();
if (left < prev_left) {
it.move_to_first();
// words in BB order
it.sort(word_comparator);
break;
}
prev_left = left;
it.forward();
}
}
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
word = it.data();
if (it.at_first()) {
word->set_flag(W_BOL, true);
} else {
// not start of line
word->set_flag(W_BOL, false);
}
if (it.at_last()) {
word->set_flag(W_EOL, true);
} else {
// not end of line
word->set_flag(W_EOL, false);
}
// extend BB as reqd
bound_box += word->bounding_box();
}
}
/**********************************************************************
* ROW::move
*
* Reposition row by vector
**********************************************************************/
void ROW::move( // reposition row
const ICOORD vec // by vector
) {
WERD_IT it(&words); // word iterator
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->move(vec);
}
bound_box.move(vec);
baseline.move(vec);
}
/**********************************************************************
* ROW::print
*
* Display members
**********************************************************************/
void ROW::print( // print
FILE *fp // file to print on
) const {
tprintf("Kerning= %d\n", kerning);
tprintf("Spacing= %d\n", spacing);
bound_box.print();
tprintf("Xheight= %f\n", xheight);
tprintf("Ascrise= %f\n", ascrise);
tprintf("Descdrop= %f\n", descdrop);
tprintf("has_drop_cap= %d\n", has_drop_cap_);
tprintf("lmargin= %d, rmargin= %d\n", lmargin_, rmargin_);
}
/**********************************************************************
* ROW::plot
*
* Draw the ROW in the given colour.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void ROW::plot( // draw it
ScrollView *window, // window to draw in
ScrollView::Color colour // colour to draw in
) {
WERD *word; // current word
WERD_IT it = &words; // words of ROW
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
word = it.data();
word->plot(window, colour); // all in one colour
}
}
/**********************************************************************
* ROW::plot
*
* Draw the ROW in rainbow colours.
**********************************************************************/
void ROW::plot( // draw it
ScrollView *window // window to draw in
) {
WERD *word; // current word
WERD_IT it = &words; // words of ROW
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
word = it.data();
word->plot(window); // in rainbow colours
}
}
#endif // !GRAPHICS_DISABLED
/**********************************************************************
* ROW::operator=
*
* Assign rows by duplicating the row structure but NOT the WERDLIST
**********************************************************************/
ROW &ROW::operator=(const ROW &source) {
this->ELIST_LINK::operator=(source);
kerning = source.kerning;
spacing = source.spacing;
xheight = source.xheight;
bodysize = source.bodysize;
ascrise = source.ascrise;
descdrop = source.descdrop;
if (!words.empty()) {
words.clear();
}
baseline = source.baseline; // QSPLINES must do =
bound_box = source.bound_box;
has_drop_cap_ = source.has_drop_cap_;
lmargin_ = source.lmargin_;
rmargin_ = source.rmargin_;
para_ = source.para_;
return *this;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/ocrrow.cpp
|
C++
|
apache-2.0
| 7,332
|
/**********************************************************************
* File: ocrrow.h (Formerly row.h)
* Description: Code for the ROW class.
* Author: Ray Smith
* Created: Tue Oct 08 15:58:04 BST 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef OCRROW_H
#define OCRROW_H
#include "elst.h" // for ELIST_ITERATOR, ELISTIZEH, ELIST_LINK
#include "quspline.h" // for QSPLINE
#include "rect.h" // for TBOX
#include "scrollview.h" // for ScrollView, ScrollView::Color
#include "werd.h" // for WERD_LIST
#include <cstdint> // for int16_t, int32_t
#include <cstdio> // for FILE
namespace tesseract {
class ICOORD;
class TO_ROW;
struct PARA;
class ROW : public ELIST_LINK {
friend void tweak_row_baseline(ROW *, double, double);
public:
ROW() = default;
ROW( // constructor
int32_t spline_size, // no of segments
int32_t *xstarts, // segment boundaries
double *coeffs, // coefficients //ascender size
float x_height, float ascenders,
float descenders, // descender size
int16_t kern, // char gap
int16_t space); // word gap
ROW( // constructor
TO_ROW *row, // textord row
int16_t kern, // char gap
int16_t space); // word gap
WERD_LIST *word_list() { // get words
return &words;
}
float base_line( // compute baseline
float xpos) const { // at the position
// get spline value
return static_cast<float>(baseline.y(xpos));
}
float x_height() const { // return x height
return xheight;
}
void set_x_height(float new_xheight) { // set x height
xheight = new_xheight;
}
int32_t kern() const { // return kerning
return kerning;
}
float body_size() const { // return body size
return bodysize;
}
void set_body_size(float new_size) { // set body size
bodysize = new_size;
}
int32_t space() const { // return spacing
return spacing;
}
float ascenders() const { // return size
return ascrise;
}
float descenders() const { // return size
return descdrop;
}
TBOX bounding_box() const { // return bounding box
return bound_box;
}
// Returns the bounding box including the desired combination of upper and
// lower noise/diacritic elements.
TBOX restricted_bounding_box(bool upper_dots, bool lower_dots) const;
void set_lmargin(int16_t lmargin) {
lmargin_ = lmargin;
}
void set_rmargin(int16_t rmargin) {
rmargin_ = rmargin;
}
int16_t lmargin() const {
return lmargin_;
}
int16_t rmargin() const {
return rmargin_;
}
void set_has_drop_cap(bool has) {
has_drop_cap_ = has;
}
bool has_drop_cap() const {
return has_drop_cap_;
}
void set_para(PARA *p) {
para_ = p;
}
PARA *para() const {
return para_;
}
void recalc_bounding_box(); // recalculate BB
void move( // reposition row
const ICOORD vec); // by vector
void print( // print
FILE *fp) const; // file to print on
#ifndef GRAPHICS_DISABLED
void plot( // draw one
ScrollView *window, // window to draw in
ScrollView::Color colour); // uniform colour
void plot( // draw one
ScrollView *window); // in rainbow colours
void plot_baseline( // draw the baseline
ScrollView *window, // window to draw in
ScrollView::Color colour) { // colour to draw
// draw it
baseline.plot(window, colour);
}
#endif // !GRAPHICS_DISABLED
ROW &operator=(const ROW &source);
private:
// Copy constructor (currently unused, therefore private).
ROW(const ROW &source) = delete;
int32_t kerning; // inter char gap
int32_t spacing; // inter word gap
TBOX bound_box; // bounding box
float xheight; // height of line
float ascrise; // size of ascenders
float descdrop; //-size of descenders
float bodysize; // CJK character size. (equals to
// xheight+ascrise by default)
WERD_LIST words; // words
QSPLINE baseline; // baseline spline
// These get set after blocks have been determined.
bool has_drop_cap_;
int16_t lmargin_; // Distance to left polyblock margin.
int16_t rmargin_; // Distance to right polyblock margin.
// This gets set during paragraph analysis.
PARA *para_; // Paragraph of which this row is part.
};
ELISTIZEH(ROW)
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/ocrrow.h
|
C++
|
apache-2.0
| 5,107
|
/**********************************************************************
* File: otsuthr.cpp
* Description: Simple Otsu thresholding for binarizing images.
* Author: Ray Smith
*
* (C) Copyright 2008, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "otsuthr.h"
#include <allheaders.h>
#include <cstring>
#include "helpers.h"
namespace tesseract {
// Computes the Otsu threshold(s) for the given image rectangle, making one
// for each channel. Each channel is always one byte per pixel.
// Returns an array of threshold values and an array of hi_values, such
// that a pixel value >threshold[channel] is considered foreground if
// hi_values[channel] is 0 or background if 1. A hi_value of -1 indicates
// that there is no apparent foreground. At least one hi_value will not be -1.
// The return value is the number of channels in the input image, being
// the size of the output thresholds and hi_values arrays.
int OtsuThreshold(Image src_pix, int left, int top, int width, int height, std::vector<int> &thresholds,
std::vector<int> &hi_values) {
int num_channels = pixGetDepth(src_pix) / 8;
// Of all channels with no good hi_value, keep the best so we can always
// produce at least one answer.
int best_hi_value = 1;
int best_hi_index = 0;
bool any_good_hivalue = false;
double best_hi_dist = 0.0;
thresholds.resize(num_channels);
hi_values.resize(num_channels);
for (int ch = 0; ch < num_channels; ++ch) {
thresholds[ch] = -1;
hi_values[ch] = -1;
// Compute the histogram of the image rectangle.
int histogram[kHistogramSize];
HistogramRect(src_pix, ch, left, top, width, height, histogram);
int H;
int best_omega_0;
int best_t = OtsuStats(histogram, &H, &best_omega_0);
if (best_omega_0 == 0 || best_omega_0 == H) {
// This channel is empty.
continue;
}
// To be a convincing foreground we must have a small fraction of H
// or to be a convincing background we must have a large fraction of H.
// In between we assume this channel contains no thresholding information.
int hi_value = best_omega_0 < H * 0.5;
thresholds[ch] = best_t;
if (best_omega_0 > H * 0.75) {
any_good_hivalue = true;
hi_values[ch] = 0;
} else if (best_omega_0 < H * 0.25) {
any_good_hivalue = true;
hi_values[ch] = 1;
} else {
// In case all channels are like this, keep the best of the bad lot.
double hi_dist = hi_value ? (H - best_omega_0) : best_omega_0;
if (hi_dist > best_hi_dist) {
best_hi_dist = hi_dist;
best_hi_value = hi_value;
best_hi_index = ch;
}
}
}
if (!any_good_hivalue) {
// Use the best of the ones that were not good enough.
hi_values[best_hi_index] = best_hi_value;
}
return num_channels;
}
// Computes the histogram for the given image rectangle, and the given
// single channel. Each channel is always one byte per pixel.
// Histogram is always a kHistogramSize(256) element array to count
// occurrences of each pixel value.
void HistogramRect(Image src_pix, int channel, int left, int top, int width, int height,
int *histogram) {
int num_channels = pixGetDepth(src_pix) / 8;
channel = ClipToRange(channel, 0, num_channels - 1);
int bottom = top + height;
memset(histogram, 0, sizeof(*histogram) * kHistogramSize);
int src_wpl = pixGetWpl(src_pix);
l_uint32 *srcdata = pixGetData(src_pix);
for (int y = top; y < bottom; ++y) {
const l_uint32 *linedata = srcdata + y * src_wpl;
for (int x = 0; x < width; ++x) {
int pixel = GET_DATA_BYTE(linedata, (x + left) * num_channels + channel);
++histogram[pixel];
}
}
}
// Computes the Otsu threshold(s) for the given histogram.
// Also returns H = total count in histogram, and
// omega0 = count of histogram below threshold.
int OtsuStats(const int *histogram, int *H_out, int *omega0_out) {
int H = 0;
double mu_T = 0.0;
for (int i = 0; i < kHistogramSize; ++i) {
H += histogram[i];
mu_T += static_cast<double>(i) * histogram[i];
}
// Now maximize sig_sq_B over t.
// http://www.ctie.monash.edu.au/hargreave/Cornall_Terry_328.pdf
int best_t = -1;
int omega_0, omega_1;
int best_omega_0 = 0;
double best_sig_sq_B = 0.0;
double mu_0, mu_1, mu_t;
omega_0 = 0;
mu_t = 0.0;
for (int t = 0; t < kHistogramSize - 1; ++t) {
omega_0 += histogram[t];
mu_t += t * static_cast<double>(histogram[t]);
if (omega_0 == 0) {
continue;
}
omega_1 = H - omega_0;
if (omega_1 == 0) {
break;
}
mu_0 = mu_t / omega_0;
mu_1 = (mu_T - mu_t) / omega_1;
double sig_sq_B = mu_1 - mu_0;
sig_sq_B *= sig_sq_B * omega_0 * omega_1;
if (best_t < 0 || sig_sq_B > best_sig_sq_B) {
best_sig_sq_B = sig_sq_B;
best_t = t;
best_omega_0 = omega_0;
}
}
if (H_out != nullptr) {
*H_out = H;
}
if (omega0_out != nullptr) {
*omega0_out = best_omega_0;
}
return best_t;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/ccstruct/otsuthr.cpp
|
C++
|
apache-2.0
| 5,639
|
///////////////////////////////////////////////////////////////////////
// File: otsuthr.h
// Description: Simple Otsu thresholding for binarizing images.
// Author: Ray Smith
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCMAIN_OTSUTHR_H_
#define TESSERACT_CCMAIN_OTSUTHR_H_
#include "image.h"
#include <vector> // for std::vector
struct Pix;
namespace tesseract {
const int kHistogramSize = 256; // The size of a histogram of pixel values.
// Computes the Otsu threshold(s) for the given image rectangle, making one
// for each channel. Each channel is always one byte per pixel.
// Returns an array of threshold values and an array of hi_values, such
// that a pixel value >threshold[channel] is considered foreground if
// hi_values[channel] is 0 or background if 1. A hi_value of -1 indicates
// that there is no apparent foreground. At least one hi_value will not be -1.
// The return value is the number of channels in the input image, being
// the size of the output thresholds and hi_values arrays.
int OtsuThreshold(Image src_pix, int left, int top, int width, int height,
std::vector<int> &thresholds,
std::vector<int> &hi_values);
// Computes the histogram for the given image rectangle, and the given
// single channel. Each channel is always one byte per pixel.
// Histogram is always a kHistogramSize(256) element array to count
// occurrences of each pixel value.
void HistogramRect(Image src_pix, int channel, int left, int top, int width, int height,
int *histogram);
// Computes the Otsu threshold(s) for the given histogram.
// Also returns H = total count in histogram, and
// omega0 = count of histogram below threshold.
int OtsuStats(const int *histogram, int *H_out, int *omega0_out);
} // namespace tesseract.
#endif // TESSERACT_CCMAIN_OTSUTHR_H_
|
2301_81045437/tesseract
|
src/ccstruct/otsuthr.h
|
C++
|
apache-2.0
| 2,481
|
/**********************************************************************
* File: pageres.cpp (Formerly page_res.c)
* Description: Hierarchy of results classes from PAGE_RES to WERD_RES
* and an iterator class to iterate over the words.
* Main purposes:
* Easy way to iterate over the words without a 3-nested loop.
* Holds data used during word recognition.
* Holds information about alternative spacing paths.
* Author: Phil Cheatle
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "pageres.h"
#include "blamer.h" // for BlamerBundle
#include "blobs.h" // for TWERD, TBLOB
#include "boxword.h" // for BoxWord
#include "errcode.h" // for ASSERT_HOST
#include "ocrblock.h" // for BLOCK_IT, BLOCK, BLOCK_LIST (ptr only)
#include "ocrrow.h" // for ROW, ROW_IT
#include "pdblock.h" // for PDBLK
#include "polyblk.h" // for POLY_BLOCK
#include "seam.h" // for SEAM, start_seam_list
#include "stepblob.h" // for C_BLOB_IT, C_BLOB, C_BLOB_LIST
#include "tprintf.h" // for tprintf
#include <tesseract/publictypes.h> // for OcrEngineMode, OEM_LSTM_ONLY
#include <cassert> // for assert
#include <cstdint> // for INT32_MAX
#include <cstring> // for strlen
struct Pix;
namespace tesseract {
// Gain factor for computing thresholds that determine the ambiguity of a
// word.
static const double kStopperAmbiguityThresholdGain = 8.0;
// Constant offset for computing thresholds that determine the ambiguity of a
// word.
static const double kStopperAmbiguityThresholdOffset = 1.5;
// Max number of broken pieces to associate.
const int kWordrecMaxNumJoinChunks = 4;
// Max ratio of word box height to line size to allow it to be processed as
// a line with other words.
const double kMaxWordSizeRatio = 1.25;
// Max ratio of line box height to line size to allow a new word to be added.
const double kMaxLineSizeRatio = 1.25;
// Max ratio of word gap to line size to allow a new word to be added.
const double kMaxWordGapRatio = 2.0;
// Computes and returns a threshold of certainty difference used to determine
// which words to keep, based on the adjustment factors of the two words.
// TODO(rays) This is horrible. Replace with an enhance params training model.
static double StopperAmbigThreshold(double f1, double f2) {
return (f2 - f1) * kStopperAmbiguityThresholdGain -
kStopperAmbiguityThresholdOffset;
}
/*************************************************************************
* PAGE_RES::PAGE_RES
*
* Constructor for page results
*************************************************************************/
PAGE_RES::PAGE_RES(bool merge_similar_words, BLOCK_LIST *the_block_list,
WERD_CHOICE **prev_word_best_choice_ptr) {
Init();
BLOCK_IT block_it(the_block_list);
BLOCK_RES_IT block_res_it(&block_res_list);
for (block_it.mark_cycle_pt(); !block_it.cycled_list(); block_it.forward()) {
block_res_it.add_to_end(
new BLOCK_RES(merge_similar_words, block_it.data()));
}
prev_word_best_choice = prev_word_best_choice_ptr;
}
/*************************************************************************
* BLOCK_RES::BLOCK_RES
*
* Constructor for BLOCK results
*************************************************************************/
BLOCK_RES::BLOCK_RES(bool merge_similar_words, BLOCK *the_block) {
ROW_IT row_it(the_block->row_list());
ROW_RES_IT row_res_it(&row_res_list);
char_count = 0;
rej_count = 0;
font_class = -1; // not assigned
x_height = -1.0;
font_assigned = false;
row_count = 0;
block = the_block;
for (row_it.mark_cycle_pt(); !row_it.cycled_list(); row_it.forward()) {
row_res_it.add_to_end(new ROW_RES(merge_similar_words, row_it.data()));
}
}
/*************************************************************************
* ROW_RES::ROW_RES
*
* Constructor for ROW results
*************************************************************************/
ROW_RES::ROW_RES(bool merge_similar_words, ROW *the_row) {
WERD_IT word_it(the_row->word_list());
WERD_RES_IT word_res_it(&word_res_list);
WERD_RES *combo = nullptr; // current combination of fuzzies
WERD *copy_word;
char_count = 0;
rej_count = 0;
whole_word_rej_count = 0;
row = the_row;
bool add_next_word = false;
TBOX union_box;
float line_height =
the_row->x_height() + the_row->ascenders() - the_row->descenders();
for (word_it.mark_cycle_pt(); !word_it.cycled_list(); word_it.forward()) {
auto *word_res = new WERD_RES(word_it.data());
word_res->x_height = the_row->x_height();
if (add_next_word) {
ASSERT_HOST(combo != nullptr);
// We are adding this word to the combination.
word_res->part_of_combo = true;
combo->copy_on(word_res);
} else if (merge_similar_words) {
union_box = word_res->word->bounding_box();
add_next_word = !word_res->word->flag(W_REP_CHAR) &&
union_box.height() <= line_height * kMaxWordSizeRatio;
word_res->odd_size = !add_next_word;
}
WERD *next_word = word_it.data_relative(1);
if (merge_similar_words) {
if (add_next_word && !next_word->flag(W_REP_CHAR)) {
// Next word will be added on if all of the following are true:
// Not a rep char.
// Box height small enough.
// Union box height small enough.
// Horizontal gap small enough.
TBOX next_box = next_word->bounding_box();
int prev_right = union_box.right();
union_box += next_box;
if (next_box.height() > line_height * kMaxWordSizeRatio ||
union_box.height() > line_height * kMaxLineSizeRatio ||
next_box.left() > prev_right + line_height * kMaxWordGapRatio) {
add_next_word = false;
}
}
next_word->set_flag(W_FUZZY_NON, add_next_word);
} else {
add_next_word = next_word->flag(W_FUZZY_NON);
}
if (add_next_word) {
if (combo == nullptr) {
copy_word = new WERD;
*copy_word = *(word_it.data()); // deep copy
combo = new WERD_RES(copy_word);
combo->x_height = the_row->x_height();
combo->combination = true;
word_res_it.add_to_end(combo);
}
word_res->part_of_combo = true;
} else {
combo = nullptr;
}
word_res_it.add_to_end(word_res);
}
}
WERD_RES &WERD_RES::operator=(const WERD_RES &source) {
this->ELIST_LINK::operator=(source);
Clear();
if (source.combination) {
word = new WERD;
*word = *(source.word); // deep copy
} else {
word = source.word; // pt to same word
}
if (source.bln_boxes != nullptr) {
bln_boxes = new tesseract::BoxWord(*source.bln_boxes);
}
if (source.chopped_word != nullptr) {
chopped_word = new TWERD(*source.chopped_word);
}
if (source.rebuild_word != nullptr) {
rebuild_word = new TWERD(*source.rebuild_word);
}
// TODO(rays) Do we ever need to copy the seam_array?
blob_row = source.blob_row;
denorm = source.denorm;
if (source.box_word != nullptr) {
box_word = new tesseract::BoxWord(*source.box_word);
}
best_state = source.best_state;
correct_text = source.correct_text;
blob_widths = source.blob_widths;
blob_gaps = source.blob_gaps;
// None of the uses of operator= require the ratings matrix to be copied,
// so don't as it would be really slow.
// Copy the cooked choices.
WERD_CHOICE_IT wc_it(const_cast<WERD_CHOICE_LIST *>(&source.best_choices));
WERD_CHOICE_IT wc_dest_it(&best_choices);
for (wc_it.mark_cycle_pt(); !wc_it.cycled_list(); wc_it.forward()) {
const WERD_CHOICE *choice = wc_it.data();
wc_dest_it.add_after_then_move(new WERD_CHOICE(*choice));
}
if (!wc_dest_it.empty()) {
wc_dest_it.move_to_first();
best_choice = wc_dest_it.data();
} else {
best_choice = nullptr;
}
if (source.raw_choice != nullptr) {
raw_choice = new WERD_CHOICE(*source.raw_choice);
} else {
raw_choice = nullptr;
}
if (source.ep_choice != nullptr) {
ep_choice = new WERD_CHOICE(*source.ep_choice);
} else {
ep_choice = nullptr;
}
reject_map = source.reject_map;
combination = source.combination;
part_of_combo = source.part_of_combo;
CopySimpleFields(source);
if (source.blamer_bundle != nullptr) {
blamer_bundle = new BlamerBundle(*(source.blamer_bundle));
}
return *this;
}
// Copies basic fields that don't involve pointers that might be useful
// to copy when making one WERD_RES from another.
void WERD_RES::CopySimpleFields(const WERD_RES &source) {
tess_failed = source.tess_failed;
tess_accepted = source.tess_accepted;
tess_would_adapt = source.tess_would_adapt;
done = source.done;
unlv_crunch_mode = source.unlv_crunch_mode;
small_caps = source.small_caps;
odd_size = source.odd_size;
fontinfo = source.fontinfo;
fontinfo2 = source.fontinfo2;
fontinfo_id_count = source.fontinfo_id_count;
fontinfo_id2_count = source.fontinfo_id2_count;
x_height = source.x_height;
caps_height = source.caps_height;
baseline_shift = source.baseline_shift;
guessed_x_ht = source.guessed_x_ht;
guessed_caps_ht = source.guessed_caps_ht;
reject_spaces = source.reject_spaces;
uch_set = source.uch_set;
tesseract = source.tesseract;
}
// Initializes a blank (default constructed) WERD_RES from one that has
// already been recognized.
// Use SetupFor*Recognition afterwards to complete the setup and make
// it ready for a retry recognition.
void WERD_RES::InitForRetryRecognition(const WERD_RES &source) {
word = source.word;
CopySimpleFields(source);
if (source.blamer_bundle != nullptr) {
blamer_bundle = new BlamerBundle();
blamer_bundle->CopyTruth(*source.blamer_bundle);
}
}
// Sets up the members used in recognition: bln_boxes, chopped_word,
// seam_array, denorm. Returns false if
// the word is empty and sets up fake results. If use_body_size is
// true and row->body_size is set, then body_size will be used for
// blob normalization instead of xheight + ascrise. This flag is for
// those languages that are using CJK pitch model and thus it has to
// be true if and only if tesseract->textord_use_cjk_fp_model is
// true.
// If allow_detailed_fx is true, the feature extractor will receive fine
// precision outline information, allowing smoother features and better
// features on low resolution images.
// The norm_mode_hint sets the default mode for normalization in absence
// of any of the above flags.
// norm_box is used to override the word bounding box to determine the
// normalization scale and offset.
// Returns false if the word is empty and sets up fake results.
bool WERD_RES::SetupForRecognition(const UNICHARSET &unicharset_in,
tesseract::Tesseract *tess, Image pix,
int norm_mode, const TBOX *norm_box,
bool numeric_mode, bool use_body_size,
bool allow_detailed_fx, ROW *row,
const BLOCK *block) {
auto norm_mode_hint = static_cast<tesseract::OcrEngineMode>(norm_mode);
tesseract = tess;
POLY_BLOCK *pb = block != nullptr ? block->pdblk.poly_block() : nullptr;
if ((norm_mode_hint != tesseract::OEM_LSTM_ONLY &&
word->cblob_list()->empty()) ||
(pb != nullptr && !pb->IsText())) {
// Empty words occur when all the blobs have been moved to the rej_blobs
// list, which seems to occur frequently in junk.
SetupFake(unicharset_in);
word->set_flag(W_REP_CHAR, false);
return false;
}
ClearResults();
SetupWordScript(unicharset_in);
chopped_word = TWERD::PolygonalCopy(allow_detailed_fx, word);
float word_xheight =
use_body_size && row != nullptr && row->body_size() > 0.0f
? row->body_size()
: x_height;
chopped_word->BLNormalize(block, row, pix, word->flag(W_INVERSE),
word_xheight, baseline_shift, numeric_mode,
norm_mode_hint, norm_box, &denorm);
blob_row = row;
SetupBasicsFromChoppedWord(unicharset_in);
SetupBlamerBundle();
int num_blobs = chopped_word->NumBlobs();
ratings = new MATRIX(num_blobs, kWordrecMaxNumJoinChunks);
tess_failed = false;
return true;
}
// Set up the seam array, bln_boxes, best_choice, and raw_choice to empty
// accumulators from a made chopped word. We presume the fields are already
// empty.
void WERD_RES::SetupBasicsFromChoppedWord(const UNICHARSET &unicharset_in) {
bln_boxes = tesseract::BoxWord::CopyFromNormalized(chopped_word);
start_seam_list(chopped_word, &seam_array);
SetupBlobWidthsAndGaps();
ClearWordChoices();
}
// Sets up the members used in recognition for an empty recognition result:
// bln_boxes, chopped_word, seam_array, denorm, best_choice, raw_choice.
void WERD_RES::SetupFake(const UNICHARSET &unicharset_in) {
ClearResults();
SetupWordScript(unicharset_in);
chopped_word = new TWERD;
rebuild_word = new TWERD;
bln_boxes = new tesseract::BoxWord;
box_word = new tesseract::BoxWord;
int blob_count = word->cblob_list()->length();
if (blob_count > 0) {
auto **fake_choices = new BLOB_CHOICE *[blob_count];
// For non-text blocks, just pass any blobs through to the box_word
// and call the word failed with a fake classification.
C_BLOB_IT b_it(word->cblob_list());
int blob_id = 0;
for (b_it.mark_cycle_pt(); !b_it.cycled_list(); b_it.forward()) {
TBOX box = b_it.data()->bounding_box();
box_word->InsertBox(box_word->length(), box);
fake_choices[blob_id++] = new BLOB_CHOICE;
}
FakeClassifyWord(blob_count, fake_choices);
delete[] fake_choices;
} else {
auto *word = new WERD_CHOICE(&unicharset_in);
word->make_bad();
LogNewRawChoice(word);
// Ownership of word is taken by *this WERD_RES in LogNewCookedChoice.
LogNewCookedChoice(1, false, word);
}
tess_failed = true;
done = true;
}
void WERD_RES::SetupWordScript(const UNICHARSET &uch) {
uch_set = &uch;
int script = uch.default_sid();
word->set_script_id(script);
word->set_flag(W_SCRIPT_HAS_XHEIGHT, uch.script_has_xheight());
word->set_flag(W_SCRIPT_IS_LATIN, script == uch.latin_sid());
}
// Sets up the blamer_bundle if it is not null, using the initialized denorm.
void WERD_RES::SetupBlamerBundle() {
if (blamer_bundle != nullptr) {
blamer_bundle->SetupNormTruthWord(denorm);
}
}
// Computes the blob_widths and blob_gaps from the chopped_word.
void WERD_RES::SetupBlobWidthsAndGaps() {
blob_widths.clear();
blob_gaps.clear();
int num_blobs = chopped_word->NumBlobs();
for (int b = 0; b < num_blobs; ++b) {
TBLOB *blob = chopped_word->blobs[b];
TBOX box = blob->bounding_box();
blob_widths.push_back(box.width());
if (b + 1 < num_blobs) {
blob_gaps.push_back(chopped_word->blobs[b + 1]->bounding_box().left() -
box.right());
}
}
}
// Updates internal data to account for a new SEAM (chop) at the given
// blob_number. Fixes the ratings matrix and states in the choices, as well
// as the blob widths and gaps.
void WERD_RES::InsertSeam(int blob_number, SEAM *seam) {
// Insert the seam into the SEAMS array.
seam->PrepareToInsertSeam(seam_array, chopped_word->blobs, blob_number, true);
seam_array.insert(seam_array.begin() + blob_number, seam);
if (ratings != nullptr) {
// Expand the ratings matrix.
ratings = ratings->ConsumeAndMakeBigger(blob_number);
// Fix all the segmentation states.
if (raw_choice != nullptr) {
raw_choice->UpdateStateForSplit(blob_number);
}
WERD_CHOICE_IT wc_it(&best_choices);
for (wc_it.mark_cycle_pt(); !wc_it.cycled_list(); wc_it.forward()) {
WERD_CHOICE *choice = wc_it.data();
choice->UpdateStateForSplit(blob_number);
}
SetupBlobWidthsAndGaps();
}
}
// Returns true if all the word choices except the first have adjust_factors
// worse than the given threshold.
bool WERD_RES::AlternativeChoiceAdjustmentsWorseThan(float threshold) const {
// The choices are not changed by this iteration.
WERD_CHOICE_IT wc_it(const_cast<WERD_CHOICE_LIST *>(&best_choices));
for (wc_it.forward(); !wc_it.at_first(); wc_it.forward()) {
WERD_CHOICE *choice = wc_it.data();
if (choice->adjust_factor() <= threshold) {
return false;
}
}
return true;
}
// Returns true if the current word is ambiguous (by number of answers or
// by dangerous ambigs.)
bool WERD_RES::IsAmbiguous() {
return !best_choices.singleton() || best_choice->dangerous_ambig_found();
}
// Returns true if the ratings matrix size matches the sum of each of the
// segmentation states.
bool WERD_RES::StatesAllValid() {
unsigned ratings_dim = ratings->dimension();
if (raw_choice->TotalOfStates() != ratings_dim) {
tprintf("raw_choice has total of states = %u vs ratings dim of %u\n",
raw_choice->TotalOfStates(), ratings_dim);
return false;
}
WERD_CHOICE_IT it(&best_choices);
unsigned index = 0;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward(), ++index) {
WERD_CHOICE *choice = it.data();
if (choice->TotalOfStates() != ratings_dim) {
tprintf("Cooked #%u has total of states = %u vs ratings dim of %u\n",
index, choice->TotalOfStates(), ratings_dim);
return false;
}
}
return true;
}
// Prints a list of words found if debug is true or the word result matches
// the word_to_debug.
void WERD_RES::DebugWordChoices(bool debug, const char *word_to_debug) {
if (debug || (word_to_debug != nullptr && *word_to_debug != '\0' &&
best_choice != nullptr &&
best_choice->unichar_string() == std::string(word_to_debug))) {
if (raw_choice != nullptr) {
raw_choice->print("\nBest Raw Choice");
}
WERD_CHOICE_IT it(&best_choices);
int index = 0;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward(), ++index) {
WERD_CHOICE *choice = it.data();
std::string label;
label += "\nCooked Choice #" + std::to_string(index);
choice->print(label.c_str());
}
}
}
// Prints the top choice along with the accepted/done flags.
void WERD_RES::DebugTopChoice(const char *msg) const {
tprintf("Best choice: accepted=%d, adaptable=%d, done=%d : ", tess_accepted,
tess_would_adapt, done);
if (best_choice == nullptr) {
tprintf("<Null choice>\n");
} else {
best_choice->print(msg);
}
}
// Removes from best_choices all choices which are not within a reasonable
// range of the best choice.
// TODO(rays) incorporate the information used here into the params training
// re-ranker, in place of this heuristic that is based on the previous
// adjustment factor.
void WERD_RES::FilterWordChoices(int debug_level) {
if (best_choice == nullptr || best_choices.singleton()) {
return;
}
if (debug_level >= 2) {
best_choice->print("\nFiltering against best choice");
}
WERD_CHOICE_IT it(&best_choices);
int index = 0;
for (it.forward(); !it.at_first(); it.forward(), ++index) {
WERD_CHOICE *choice = it.data();
float threshold = StopperAmbigThreshold(best_choice->adjust_factor(),
choice->adjust_factor());
// i, j index the blob choice in choice, best_choice.
// chunk is an index into the chopped_word blobs (AKA chunks).
// Since the two words may use different segmentations of the chunks, we
// iterate over the chunks to find out whether a comparable blob
// classification is much worse than the best result.
unsigned i = 0, j = 0, chunk = 0;
// Each iteration of the while deals with 1 chunk. On entry choice_chunk
// and best_chunk are the indices of the first chunk in the NEXT blob,
// i.e. we don't have to increment i, j while chunk < choice_chunk and
// best_chunk respectively.
auto choice_chunk = choice->state(0), best_chunk = best_choice->state(0);
while (i < choice->length() && j < best_choice->length()) {
if (choice->unichar_id(i) != best_choice->unichar_id(j) &&
choice->certainty(i) - best_choice->certainty(j) < threshold) {
if (debug_level >= 2) {
choice->print("WorstCertaintyDiffWorseThan");
tprintf(
"i %u j %u Choice->Blob[i].Certainty %.4g"
" WorstOtherChoiceCertainty %g Threshold %g\n",
i, j, choice->certainty(i), best_choice->certainty(j), threshold);
tprintf("Discarding bad choice #%d\n", index);
}
delete it.extract();
break;
}
++chunk;
// If needed, advance choice_chunk to keep up with chunk.
while (choice_chunk < chunk && ++i < choice->length()) {
choice_chunk += choice->state(i);
}
// If needed, advance best_chunk to keep up with chunk.
while (best_chunk < chunk && ++j < best_choice->length()) {
best_chunk += best_choice->state(j);
}
}
}
}
void WERD_RES::ComputeAdaptionThresholds(float certainty_scale,
float min_rating, float max_rating,
float rating_margin,
float *thresholds) {
int chunk = 0;
int end_chunk = best_choice->state(0);
int end_raw_chunk = raw_choice->state(0);
int raw_blob = 0;
for (unsigned i = 0; i < best_choice->length(); i++, thresholds++) {
float avg_rating = 0.0f;
int num_error_chunks = 0;
// For each chunk in best choice blob i, count non-matching raw results.
while (chunk < end_chunk) {
if (chunk >= end_raw_chunk) {
++raw_blob;
end_raw_chunk += raw_choice->state(raw_blob);
}
if (best_choice->unichar_id(i) != raw_choice->unichar_id(raw_blob)) {
avg_rating += raw_choice->certainty(raw_blob);
++num_error_chunks;
}
++chunk;
}
if (num_error_chunks > 0) {
avg_rating /= num_error_chunks;
*thresholds = (avg_rating / -certainty_scale) * (1.0 - rating_margin);
} else {
*thresholds = max_rating;
}
if (*thresholds > max_rating) {
*thresholds = max_rating;
}
if (*thresholds < min_rating) {
*thresholds = min_rating;
}
}
}
// Saves a copy of the word_choice if it has the best unadjusted rating.
// Returns true if the word_choice was the new best.
bool WERD_RES::LogNewRawChoice(WERD_CHOICE *word_choice) {
if (raw_choice == nullptr || word_choice->rating() < raw_choice->rating()) {
delete raw_choice;
raw_choice = new WERD_CHOICE(*word_choice);
raw_choice->set_permuter(TOP_CHOICE_PERM);
return true;
}
return false;
}
// Consumes word_choice by adding it to best_choices, (taking ownership) if
// the certainty for word_choice is some distance of the best choice in
// best_choices, or by deleting the word_choice and returning false.
// The best_choices list is kept in sorted order by rating. Duplicates are
// removed, and the list is kept no longer than max_num_choices in length.
// Returns true if the word_choice is still a valid pointer.
bool WERD_RES::LogNewCookedChoice(int max_num_choices, bool debug,
WERD_CHOICE *word_choice) {
if (best_choice != nullptr) {
// Throw out obviously bad choices to save some work.
// TODO(rays) Get rid of this! This piece of code produces different
// results according to the order in which words are found, which is an
// undesirable behavior. It would be better to keep all the choices and
// prune them later when more information is available.
float max_certainty_delta = StopperAmbigThreshold(
best_choice->adjust_factor(), word_choice->adjust_factor());
if (max_certainty_delta > -kStopperAmbiguityThresholdOffset) {
max_certainty_delta = -kStopperAmbiguityThresholdOffset;
}
if (word_choice->certainty() - best_choice->certainty() <
max_certainty_delta) {
if (debug) {
std::string bad_string;
word_choice->string_and_lengths(&bad_string, nullptr);
tprintf(
"Discarding choice \"%s\" with an overly low certainty"
" %.3f vs best choice certainty %.3f (Threshold: %.3f)\n",
bad_string.c_str(), word_choice->certainty(),
best_choice->certainty(),
max_certainty_delta + best_choice->certainty());
}
delete word_choice;
return false;
}
}
// Insert in the list in order of increasing rating, but knock out worse
// string duplicates.
WERD_CHOICE_IT it(&best_choices);
const std::string &new_str = word_choice->unichar_string();
bool inserted = false;
int num_choices = 0;
if (!it.empty()) {
do {
WERD_CHOICE *choice = it.data();
if (choice->rating() > word_choice->rating() && !inserted) {
// Time to insert.
it.add_before_stay_put(word_choice);
inserted = true;
if (num_choices == 0) {
best_choice = word_choice; // This is the new best.
}
++num_choices;
}
if (choice->unichar_string() == new_str) {
if (inserted) {
// New is better.
delete it.extract();
} else {
// Old is better.
if (debug) {
tprintf("Discarding duplicate choice \"%s\", rating %g vs %g\n",
new_str.c_str(), word_choice->rating(), choice->rating());
}
delete word_choice;
return false;
}
} else {
++num_choices;
if (num_choices > max_num_choices) {
delete it.extract();
}
}
it.forward();
} while (!it.at_first());
}
if (!inserted && num_choices < max_num_choices) {
it.add_to_end(word_choice);
inserted = true;
if (num_choices == 0) {
best_choice = word_choice; // This is the new best.
}
}
if (debug) {
if (inserted) {
tprintf("New %s", best_choice == word_choice ? "Best" : "Secondary");
} else {
tprintf("Poor");
}
word_choice->print(" Word Choice");
}
if (!inserted) {
delete word_choice;
return false;
}
return true;
}
// Simple helper moves the ownership of the pointer data from src to dest,
// first deleting anything in dest, and nulling out src afterwards.
template <class T>
static void MovePointerData(T **dest, T **src) {
delete *dest;
*dest = *src;
*src = nullptr;
}
// Prints a brief list of all the best choices.
void WERD_RES::PrintBestChoices() const {
std::string alternates_str;
WERD_CHOICE_IT it(const_cast<WERD_CHOICE_LIST *>(&best_choices));
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
if (!it.at_first()) {
alternates_str += "\", \"";
}
alternates_str += it.data()->unichar_string();
}
tprintf("Alternates for \"%s\": {\"%s\"}\n",
best_choice->unichar_string().c_str(), alternates_str.c_str());
}
// Returns the sum of the widths of the blob between start_blob and last_blob
// inclusive.
int WERD_RES::GetBlobsWidth(int start_blob, int last_blob) const {
int result = 0;
for (int b = start_blob; b <= last_blob; ++b) {
result += blob_widths[b];
if (b < last_blob) {
result += blob_gaps[b];
}
}
return result;
}
// Returns the width of a gap between the specified blob and the next one.
int WERD_RES::GetBlobsGap(unsigned blob_index) const {
if (blob_index >= blob_gaps.size()) {
return 0;
}
return blob_gaps[blob_index];
}
// Returns the BLOB_CHOICE corresponding to the given index in the
// best choice word taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete. May return nullptr if there is no
// BLOB_CHOICE matching the unichar_id at the given index.
BLOB_CHOICE *WERD_RES::GetBlobChoice(unsigned index) const {
if (index >= best_choice->length()) {
return nullptr;
}
BLOB_CHOICE_LIST *choices = GetBlobChoices(index);
return FindMatchingChoice(best_choice->unichar_id(index), choices);
}
// Returns the BLOB_CHOICE_LIST corresponding to the given index in the
// best choice word taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete.
BLOB_CHOICE_LIST *WERD_RES::GetBlobChoices(int index) const {
return best_choice->blob_choices(index, ratings);
}
// Moves the results fields from word to this. This takes ownership of all
// the data, so src can be destructed.
void WERD_RES::ConsumeWordResults(WERD_RES *word) {
denorm = word->denorm;
blob_row = word->blob_row;
MovePointerData(&chopped_word, &word->chopped_word);
MovePointerData(&rebuild_word, &word->rebuild_word);
MovePointerData(&box_word, &word->box_word);
for (auto data : seam_array) {
delete data;
}
seam_array = word->seam_array;
word->seam_array.clear();
// TODO: optimize moves.
best_state = word->best_state;
word->best_state.clear();
correct_text = word->correct_text;
word->correct_text.clear();
blob_widths = word->blob_widths;
word->blob_widths.clear();
blob_gaps = word->blob_gaps;
word->blob_gaps.clear();
if (ratings != nullptr) {
ratings->delete_matrix_pointers();
}
MovePointerData(&ratings, &word->ratings);
best_choice = word->best_choice;
MovePointerData(&raw_choice, &word->raw_choice);
best_choices.clear();
WERD_CHOICE_IT wc_it(&best_choices);
wc_it.add_list_after(&word->best_choices);
reject_map = word->reject_map;
if (word->blamer_bundle != nullptr) {
assert(blamer_bundle != nullptr);
blamer_bundle->CopyResults(*(word->blamer_bundle));
}
CopySimpleFields(*word);
}
// Replace the best choice and rebuild box word.
// choice must be from the current best_choices list.
void WERD_RES::ReplaceBestChoice(WERD_CHOICE *choice) {
best_choice = choice;
RebuildBestState();
SetupBoxWord();
// Make up a fake reject map of the right length to keep the
// rejection pass happy.
reject_map.initialise(best_state.size());
done = tess_accepted = tess_would_adapt = true;
SetScriptPositions();
}
// Builds the rebuild_word and sets the best_state from the chopped_word and
// the best_choice->state.
void WERD_RES::RebuildBestState() {
ASSERT_HOST(best_choice != nullptr);
delete rebuild_word;
rebuild_word = new TWERD;
if (seam_array.empty()) {
start_seam_list(chopped_word, &seam_array);
}
best_state.clear();
int start = 0;
for (unsigned i = 0; i < best_choice->length(); ++i) {
int length = best_choice->state(i);
best_state.push_back(length);
if (length > 1) {
SEAM::JoinPieces(seam_array, chopped_word->blobs, start,
start + length - 1);
}
TBLOB *blob = chopped_word->blobs[start];
rebuild_word->blobs.push_back(new TBLOB(*blob));
if (length > 1) {
SEAM::BreakPieces(seam_array, chopped_word->blobs, start,
start + length - 1);
}
start += length;
}
}
// Copies the chopped_word to the rebuild_word, faking a best_state as well.
// Also sets up the output box_word.
void WERD_RES::CloneChoppedToRebuild() {
delete rebuild_word;
rebuild_word = new TWERD(*chopped_word);
SetupBoxWord();
auto word_len = box_word->length();
best_state.reserve(word_len);
correct_text.reserve(word_len);
for (unsigned i = 0; i < word_len; ++i) {
best_state.push_back(1);
correct_text.emplace_back("");
}
}
// Sets/replaces the box_word with one made from the rebuild_word.
void WERD_RES::SetupBoxWord() {
delete box_word;
rebuild_word->ComputeBoundingBoxes();
box_word = tesseract::BoxWord::CopyFromNormalized(rebuild_word);
box_word->ClipToOriginalWord(denorm.block(), word);
}
// Sets up the script positions in the output best_choice using the best_choice
// to get the unichars, and the unicharset to get the target positions.
void WERD_RES::SetScriptPositions() {
best_choice->SetScriptPositions(small_caps, chopped_word);
}
// Sets all the blobs in all the words (raw choice and best choices) to be
// the given position. (When a sub/superscript is recognized as a separate
// word, it falls victim to the rule that a whole word cannot be sub or
// superscript, so this function overrides that problem.)
void WERD_RES::SetAllScriptPositions(tesseract::ScriptPos position) {
raw_choice->SetAllScriptPositions(position);
WERD_CHOICE_IT wc_it(&best_choices);
for (wc_it.mark_cycle_pt(); !wc_it.cycled_list(); wc_it.forward()) {
wc_it.data()->SetAllScriptPositions(position);
}
}
// Classifies the word with some already-calculated BLOB_CHOICEs.
// The choices are an array of blob_count pointers to BLOB_CHOICE,
// providing a single classifier result for each blob.
// The BLOB_CHOICEs are consumed and the word takes ownership.
// The number of blobs in the box_word must match blob_count.
void WERD_RES::FakeClassifyWord(unsigned blob_count, BLOB_CHOICE **choices) {
// Setup the WERD_RES.
ASSERT_HOST(box_word != nullptr);
ASSERT_HOST(blob_count == box_word->length());
ClearWordChoices();
ClearRatings();
ratings = new MATRIX(blob_count, 1);
for (unsigned c = 0; c < blob_count; ++c) {
auto *choice_list = new BLOB_CHOICE_LIST;
BLOB_CHOICE_IT choice_it(choice_list);
choice_it.add_after_then_move(choices[c]);
ratings->put(c, c, choice_list);
}
FakeWordFromRatings(TOP_CHOICE_PERM);
reject_map.initialise(blob_count);
best_state.clear();
best_state.resize(blob_count, 1);
done = true;
}
// Creates a WERD_CHOICE for the word using the top choices from the leading
// diagonal of the ratings matrix.
void WERD_RES::FakeWordFromRatings(PermuterType permuter) {
int num_blobs = ratings->dimension();
auto *word_choice = new WERD_CHOICE(uch_set, num_blobs);
word_choice->set_permuter(permuter);
for (int b = 0; b < num_blobs; ++b) {
UNICHAR_ID unichar_id = UNICHAR_SPACE;
// Initialize rating and certainty like in WERD_CHOICE::make_bad().
float rating = WERD_CHOICE::kBadRating;
float certainty = -FLT_MAX;
BLOB_CHOICE_LIST *choices = ratings->get(b, b);
if (choices != nullptr && !choices->empty()) {
BLOB_CHOICE_IT bc_it(choices);
BLOB_CHOICE *choice = bc_it.data();
unichar_id = choice->unichar_id();
rating = choice->rating();
certainty = choice->certainty();
}
word_choice->append_unichar_id_space_allocated(unichar_id, 1, rating,
certainty);
}
LogNewRawChoice(word_choice);
// Ownership of word_choice taken by word here.
LogNewCookedChoice(1, false, word_choice);
}
// Copies the best_choice strings to the correct_text for adaption/training.
void WERD_RES::BestChoiceToCorrectText() {
correct_text.clear();
ASSERT_HOST(best_choice != nullptr);
for (unsigned i = 0; i < best_choice->length(); ++i) {
UNICHAR_ID choice_id = best_choice->unichar_id(i);
const char *blob_choice = uch_set->id_to_unichar(choice_id);
correct_text.emplace_back(blob_choice);
}
}
// Merges 2 adjacent blobs in the result if the permanent callback
// class_cb returns other than INVALID_UNICHAR_ID, AND the permanent
// callback box_cb is nullptr or returns true, setting the merged blob
// result to the class returned from class_cb.
// Returns true if anything was merged.
bool WERD_RES::ConditionalBlobMerge(
const std::function<UNICHAR_ID(UNICHAR_ID, UNICHAR_ID)> &class_cb,
const std::function<bool(const TBOX &, const TBOX &)> &box_cb) {
ASSERT_HOST(best_choice->empty() || ratings != nullptr);
bool modified = false;
for (unsigned i = 0; i + 1 < best_choice->length(); ++i) {
UNICHAR_ID new_id =
class_cb(best_choice->unichar_id(i), best_choice->unichar_id(i + 1));
if (new_id != INVALID_UNICHAR_ID &&
(box_cb == nullptr ||
box_cb(box_word->BlobBox(i), box_word->BlobBox(i + 1)))) {
// Raw choice should not be fixed.
best_choice->set_unichar_id(new_id, i);
modified = true;
MergeAdjacentBlobs(i);
const MATRIX_COORD &coord = best_choice->MatrixCoord(i);
if (!coord.Valid(*ratings)) {
ratings->IncreaseBandSize(coord.row + 1 - coord.col);
}
BLOB_CHOICE_LIST *blob_choices = GetBlobChoices(i);
if (FindMatchingChoice(new_id, blob_choices) == nullptr) {
// Insert a fake result.
auto *blob_choice = new BLOB_CHOICE;
blob_choice->set_unichar_id(new_id);
BLOB_CHOICE_IT bc_it(blob_choices);
bc_it.add_before_then_move(blob_choice);
}
}
}
return modified;
}
// Merges 2 adjacent blobs in the result (index and index+1) and corrects
// all the data to account for the change.
void WERD_RES::MergeAdjacentBlobs(unsigned index) {
if (reject_map.length() == best_choice->length()) {
reject_map.remove_pos(index);
}
best_choice->remove_unichar_id(index + 1);
rebuild_word->MergeBlobs(index, index + 2);
box_word->MergeBoxes(index, index + 2);
if (index + 1 < best_state.size()) {
best_state[index] += best_state[index + 1];
best_state.erase(best_state.begin() + index + 1);
}
}
// TODO(tkielbus) Decide between keeping this behavior here or modifying the
// training data.
// Utility function for fix_quotes
// Return true if the next character in the string (given the UTF8 length in
// bytes) is a quote character.
static int is_simple_quote(const char *signed_str, int length) {
const auto *str = reinterpret_cast<const unsigned char *>(signed_str);
// Standard 1 byte quotes.
return (length == 1 && (*str == '\'' || *str == '`')) ||
// UTF-8 3 bytes curved quotes.
(length == 3 &&
((*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x98) ||
(*str == 0xe2 && *(str + 1) == 0x80 && *(str + 2) == 0x99)));
}
// Callback helper for fix_quotes returns a double quote if both
// arguments are quote, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID WERD_RES::BothQuotes(UNICHAR_ID id1, UNICHAR_ID id2) {
const char *ch = uch_set->id_to_unichar(id1);
const char *next_ch = uch_set->id_to_unichar(id2);
if (is_simple_quote(ch, strlen(ch)) &&
is_simple_quote(next_ch, strlen(next_ch))) {
return uch_set->unichar_to_id("\"");
}
return INVALID_UNICHAR_ID;
}
// Change pairs of quotes to double quotes.
void WERD_RES::fix_quotes() {
if (!uch_set->contains_unichar("\"") ||
!uch_set->get_enabled(uch_set->unichar_to_id("\""))) {
return; // Don't create it if it is disallowed.
}
using namespace std::placeholders; // for _1, _2
ConditionalBlobMerge(std::bind(&WERD_RES::BothQuotes, this, _1, _2), nullptr);
}
// Callback helper for fix_hyphens returns UNICHAR_ID of - if both
// arguments are hyphen, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID WERD_RES::BothHyphens(UNICHAR_ID id1, UNICHAR_ID id2) {
const char *ch = uch_set->id_to_unichar(id1);
const char *next_ch = uch_set->id_to_unichar(id2);
if (strlen(ch) == 1 && strlen(next_ch) == 1 && (*ch == '-' || *ch == '~') &&
(*next_ch == '-' || *next_ch == '~')) {
return uch_set->unichar_to_id("-");
}
return INVALID_UNICHAR_ID;
}
// Callback helper for fix_hyphens returns true if box1 and box2 overlap
// (assuming both on the same textline, are in order and a chopped em dash.)
bool WERD_RES::HyphenBoxesOverlap(const TBOX &box1, const TBOX &box2) {
return box1.right() >= box2.left();
}
// Change pairs of hyphens to a single hyphen if the bounding boxes touch
// Typically a long dash which has been segmented.
void WERD_RES::fix_hyphens() {
if (!uch_set->contains_unichar("-") ||
!uch_set->get_enabled(uch_set->unichar_to_id("-"))) {
return; // Don't create it if it is disallowed.
}
using namespace std::placeholders; // for _1, _2
ConditionalBlobMerge(std::bind(&WERD_RES::BothHyphens, this, _1, _2),
std::bind(&WERD_RES::HyphenBoxesOverlap, this, _1, _2));
}
// Callback helper for merge_tess_fails returns a space if both
// arguments are space, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID WERD_RES::BothSpaces(UNICHAR_ID id1, UNICHAR_ID id2) {
if (id1 == id2 && id1 == uch_set->unichar_to_id(" ")) {
return id1;
} else {
return INVALID_UNICHAR_ID;
}
}
// Change pairs of tess failures to a single one
void WERD_RES::merge_tess_fails() {
using namespace std::placeholders; // for _1, _2
if (ConditionalBlobMerge(std::bind(&WERD_RES::BothSpaces, this, _1, _2),
nullptr)) {
unsigned len = best_choice->length();
ASSERT_HOST(reject_map.length() == len);
ASSERT_HOST(box_word->length() == len);
}
}
// Returns true if the collection of count pieces, starting at start, are all
// natural connected components, ie there are no real chops involved.
bool WERD_RES::PiecesAllNatural(int start, int count) const {
// all seams must have no splits.
for (int index = start; index < start + count - 1; ++index) {
if (index >= 0 && static_cast<size_t>(index) < seam_array.size()) {
SEAM *seam = seam_array[index];
if (seam != nullptr && seam->HasAnySplits()) {
return false;
}
}
}
return true;
}
WERD_RES::~WERD_RES() {
Clear();
}
void WERD_RES::Clear() {
if (combination) {
delete word;
}
word = nullptr;
delete blamer_bundle;
blamer_bundle = nullptr;
ClearResults();
}
void WERD_RES::ClearResults() {
done = false;
fontinfo = nullptr;
fontinfo2 = nullptr;
fontinfo_id_count = 0;
fontinfo_id2_count = 0;
delete bln_boxes;
bln_boxes = nullptr;
blob_row = nullptr;
delete chopped_word;
chopped_word = nullptr;
delete rebuild_word;
rebuild_word = nullptr;
delete box_word;
box_word = nullptr;
best_state.clear();
correct_text.clear();
for (auto data : seam_array) {
delete data;
}
seam_array.clear();
blob_widths.clear();
blob_gaps.clear();
ClearRatings();
ClearWordChoices();
if (blamer_bundle != nullptr) {
blamer_bundle->ClearResults();
}
}
void WERD_RES::ClearWordChoices() {
best_choice = nullptr;
delete raw_choice;
raw_choice = nullptr;
best_choices.clear();
delete ep_choice;
ep_choice = nullptr;
}
void WERD_RES::ClearRatings() {
if (ratings != nullptr) {
ratings->delete_matrix_pointers();
delete ratings;
ratings = nullptr;
}
}
int PAGE_RES_IT::cmp(const PAGE_RES_IT &other) const {
ASSERT_HOST(page_res == other.page_res);
if (other.block_res == nullptr) {
// other points to the end of the page.
if (block_res == nullptr) {
return 0;
}
return -1;
}
if (block_res == nullptr) {
return 1; // we point to the end of the page.
}
if (block_res == other.block_res) {
if (other.row_res == nullptr || row_res == nullptr) {
// this should only happen if we hit an image block.
return 0;
}
if (row_res == other.row_res) {
// we point to the same block and row.
ASSERT_HOST(other.word_res != nullptr && word_res != nullptr);
if (word_res == other.word_res) {
// we point to the same word!
return 0;
}
WERD_RES_IT word_res_it(&row_res->word_res_list);
for (word_res_it.mark_cycle_pt(); !word_res_it.cycled_list();
word_res_it.forward()) {
if (word_res_it.data() == word_res) {
return -1;
} else if (word_res_it.data() == other.word_res) {
return 1;
}
}
ASSERT_HOST("Error: Incomparable PAGE_RES_ITs" == nullptr);
}
// we both point to the same block, but different rows.
ROW_RES_IT row_res_it(&block_res->row_res_list);
for (row_res_it.mark_cycle_pt(); !row_res_it.cycled_list();
row_res_it.forward()) {
if (row_res_it.data() == row_res) {
return -1;
} else if (row_res_it.data() == other.row_res) {
return 1;
}
}
ASSERT_HOST("Error: Incomparable PAGE_RES_ITs" == nullptr);
}
// We point to different blocks.
BLOCK_RES_IT block_res_it(&page_res->block_res_list);
for (block_res_it.mark_cycle_pt(); !block_res_it.cycled_list();
block_res_it.forward()) {
if (block_res_it.data() == block_res) {
return -1;
} else if (block_res_it.data() == other.block_res) {
return 1;
}
}
// Shouldn't happen...
ASSERT_HOST("Error: Incomparable PAGE_RES_ITs" == nullptr);
return 0;
}
// Inserts the new_word as a combination owned by a corresponding WERD_RES
// before the current position. The simple fields of the WERD_RES are copied
// from clone_res and the resulting WERD_RES is returned for further setup
// with best_choice etc.
WERD_RES *PAGE_RES_IT::InsertSimpleCloneWord(const WERD_RES &clone_res,
WERD *new_word) {
// Make a WERD_RES for the new_word.
auto *new_res = new WERD_RES(new_word);
new_res->CopySimpleFields(clone_res);
new_res->combination = true;
// Insert into the appropriate place in the ROW_RES.
WERD_RES_IT wr_it(&row()->word_res_list);
for (wr_it.mark_cycle_pt(); !wr_it.cycled_list(); wr_it.forward()) {
WERD_RES *word = wr_it.data();
if (word == word_res) {
break;
}
}
ASSERT_HOST(!wr_it.cycled_list());
wr_it.add_before_then_move(new_res);
if (wr_it.at_first()) {
// This is the new first word, so reset the member iterator so it
// detects the cycled_list state correctly.
ResetWordIterator();
}
return new_res;
}
// Helper computes the boundaries between blobs in the word. The blob bounds
// are likely very poor, if they come from LSTM, where it only outputs the
// character at one pixel within it, so we find the midpoints between them.
static void ComputeBlobEnds(const WERD_RES &word, const TBOX &clip_box,
C_BLOB_LIST *next_word_blobs,
std::vector<int> *blob_ends) {
C_BLOB_IT blob_it(word.word->cblob_list());
for (int length : word.best_state) {
// Get the bounding box of the fake blobs
TBOX blob_box = blob_it.data()->bounding_box();
blob_it.forward();
for (int b = 1; b < length; ++b) {
blob_box += blob_it.data()->bounding_box();
blob_it.forward();
}
// This blob_box is crap, so for now we are only looking for the
// boundaries between them.
int blob_end = INT32_MAX;
if (!blob_it.at_first() || next_word_blobs != nullptr) {
if (blob_it.at_first()) {
blob_it.set_to_list(next_word_blobs);
}
blob_end = (blob_box.right() + blob_it.data()->bounding_box().left()) / 2;
}
blob_end = ClipToRange<int>(blob_end, clip_box.left(), clip_box.right());
blob_ends->push_back(blob_end);
}
blob_ends->back() = clip_box.right();
}
// Helper computes the bounds of a word by restricting it to existing words
// that significantly overlap.
static TBOX ComputeWordBounds(const tesseract::PointerVector<WERD_RES> &words,
int w_index, TBOX prev_box, WERD_RES_IT w_it) {
constexpr int kSignificantOverlapFraction = 4;
TBOX clipped_box;
TBOX current_box = words[w_index]->word->bounding_box();
TBOX next_box;
if (static_cast<size_t>(w_index + 1) < words.size() &&
words[w_index + 1] != nullptr && words[w_index + 1]->word != nullptr) {
next_box = words[w_index + 1]->word->bounding_box();
}
for (w_it.forward(); !w_it.at_first() && w_it.data()->part_of_combo;
w_it.forward()) {
if (w_it.data() == nullptr || w_it.data()->word == nullptr) {
continue;
}
TBOX w_box = w_it.data()->word->bounding_box();
int height_limit = std::min<int>(w_box.height(), w_box.width() / 2);
int width_limit = w_box.width() / kSignificantOverlapFraction;
int min_significant_overlap = std::max(height_limit, width_limit);
int overlap = w_box.intersection(current_box).width();
int prev_overlap = w_box.intersection(prev_box).width();
int next_overlap = w_box.intersection(next_box).width();
if (overlap > min_significant_overlap) {
if (prev_overlap > min_significant_overlap) {
// We have no choice but to use the LSTM word edge.
clipped_box.set_left(current_box.left());
} else if (next_overlap > min_significant_overlap) {
// We have no choice but to use the LSTM word edge.
clipped_box.set_right(current_box.right());
} else {
clipped_box += w_box;
}
}
}
if (clipped_box.height() <= 0) {
clipped_box.set_top(current_box.top());
clipped_box.set_bottom(current_box.bottom());
}
if (clipped_box.width() <= 0) {
clipped_box = current_box;
}
return clipped_box;
}
// Helper moves the blob from src to dest. If it isn't contained by clip_box,
// the blob is replaced by a fake that is contained.
static TBOX MoveAndClipBlob(C_BLOB_IT *src_it, C_BLOB_IT *dest_it,
const TBOX &clip_box) {
C_BLOB *src_blob = src_it->extract();
TBOX box = src_blob->bounding_box();
if (!clip_box.contains(box)) {
int left =
ClipToRange<int>(box.left(), clip_box.left(), clip_box.right() - 1);
int right =
ClipToRange<int>(box.right(), clip_box.left() + 1, clip_box.right());
int top =
ClipToRange<int>(box.top(), clip_box.bottom() + 1, clip_box.top());
int bottom =
ClipToRange<int>(box.bottom(), clip_box.bottom(), clip_box.top() - 1);
box = TBOX(left, bottom, right, top);
delete src_blob;
src_blob = C_BLOB::FakeBlob(box);
}
dest_it->add_after_then_move(src_blob);
return box;
}
// Replaces the current WERD/WERD_RES with the given words. The given words
// contain fake blobs that indicate the position of the characters. These are
// replaced with real blobs from the current word as much as possible.
void PAGE_RES_IT::ReplaceCurrentWord(
tesseract::PointerVector<WERD_RES> *words) {
if (words->empty()) {
DeleteCurrentWord();
return;
}
WERD_RES *input_word = word();
// Set the BOL/EOL flags on the words from the input word.
if (input_word->word->flag(W_BOL)) {
(*words)[0]->word->set_flag(W_BOL, true);
} else {
(*words)[0]->word->set_blanks(input_word->word->space());
}
words->back()->word->set_flag(W_EOL, input_word->word->flag(W_EOL));
// Move the blobs from the input word to the new set of words.
// If the input word_res is a combination, then the replacements will also be
// combinations, and will own their own words. If the input word_res is not a
// combination, then the final replacements will not be either, (although it
// is allowed for the input words to be combinations) and their words
// will get put on the row list. This maintains the ownership rules.
WERD_IT w_it(row()->row->word_list());
if (!input_word->combination) {
for (w_it.mark_cycle_pt(); !w_it.cycled_list(); w_it.forward()) {
WERD *word = w_it.data();
if (word == input_word->word) {
break;
}
}
// w_it is now set to the input_word's word.
ASSERT_HOST(!w_it.cycled_list());
}
// Insert into the appropriate place in the ROW_RES.
WERD_RES_IT wr_it(&row()->word_res_list);
for (wr_it.mark_cycle_pt(); !wr_it.cycled_list(); wr_it.forward()) {
WERD_RES *word = wr_it.data();
if (word == input_word) {
break;
}
}
ASSERT_HOST(!wr_it.cycled_list());
// Since we only have an estimate of the bounds between blobs, use the blob
// x-middle as the determiner of where to put the blobs
C_BLOB_IT src_b_it(input_word->word->cblob_list());
src_b_it.sort(&C_BLOB::SortByXMiddle);
C_BLOB_IT rej_b_it(input_word->word->rej_cblob_list());
rej_b_it.sort(&C_BLOB::SortByXMiddle);
TBOX clip_box;
for (size_t w = 0; w < words->size(); ++w) {
WERD_RES *word_w = (*words)[w];
clip_box = ComputeWordBounds(*words, w, clip_box, wr_it_of_current_word);
// Compute blob boundaries.
std::vector<int> blob_ends;
C_BLOB_LIST *next_word_blobs =
w + 1 < words->size() ? (*words)[w + 1]->word->cblob_list() : nullptr;
ComputeBlobEnds(*word_w, clip_box, next_word_blobs, &blob_ends);
// Remove the fake blobs on the current word, but keep safe for back-up if
// no blob can be found.
C_BLOB_LIST fake_blobs;
C_BLOB_IT fake_b_it(&fake_blobs);
fake_b_it.add_list_after(word_w->word->cblob_list());
fake_b_it.move_to_first();
word_w->word->cblob_list()->clear();
C_BLOB_IT dest_it(word_w->word->cblob_list());
// Build the box word as we move the blobs.
auto *box_word = new tesseract::BoxWord;
for (size_t i = 0; i < blob_ends.size(); ++i, fake_b_it.forward()) {
int end_x = blob_ends[i];
TBOX blob_box;
// Add the blobs up to end_x.
while (!src_b_it.empty() &&
src_b_it.data()->bounding_box().x_middle() < end_x) {
blob_box += MoveAndClipBlob(&src_b_it, &dest_it, clip_box);
src_b_it.forward();
}
while (!rej_b_it.empty() &&
rej_b_it.data()->bounding_box().x_middle() < end_x) {
blob_box += MoveAndClipBlob(&rej_b_it, &dest_it, clip_box);
rej_b_it.forward();
}
if (blob_box.null_box()) {
// Use the original box as a back-up.
blob_box = MoveAndClipBlob(&fake_b_it, &dest_it, clip_box);
}
box_word->InsertBox(i, blob_box);
}
delete word_w->box_word;
word_w->box_word = box_word;
if (!input_word->combination) {
// Insert word_w->word into the ROW. It doesn't own its word, so the
// ROW needs to own it.
w_it.add_before_stay_put(word_w->word);
word_w->combination = false;
}
(*words)[w] = nullptr; // We are taking ownership.
wr_it.add_before_stay_put(word_w);
}
// We have taken ownership of the words.
words->clear();
// Delete the current word, which has been replaced. We could just call
// DeleteCurrentWord, but that would iterate both lists again, and we know
// we are already in the right place.
if (!input_word->combination) {
delete w_it.extract();
}
delete wr_it.extract();
ResetWordIterator();
}
// Deletes the current WERD_RES and its underlying WERD.
void PAGE_RES_IT::DeleteCurrentWord() {
// Check that this word is as we expect. part_of_combos are NEVER iterated
// by the normal iterator, so we should never be trying to delete them.
ASSERT_HOST(!word_res->part_of_combo);
if (!word_res->combination) {
// Combinations own their own word, so we won't find the word on the
// row's word_list, but it is legitimate to try to delete them.
// Delete word from the ROW when not a combination.
WERD_IT w_it(row()->row->word_list());
for (w_it.mark_cycle_pt(); !w_it.cycled_list(); w_it.forward()) {
if (w_it.data() == word_res->word) {
break;
}
}
ASSERT_HOST(!w_it.cycled_list());
delete w_it.extract();
}
// Remove the WERD_RES for the new_word.
// Remove the WORD_RES from the ROW_RES.
WERD_RES_IT wr_it(&row()->word_res_list);
for (wr_it.mark_cycle_pt(); !wr_it.cycled_list(); wr_it.forward()) {
if (wr_it.data() == word_res) {
word_res = nullptr;
break;
}
}
ASSERT_HOST(!wr_it.cycled_list());
delete wr_it.extract();
ResetWordIterator();
}
// Makes the current word a fuzzy space if not already fuzzy. Updates
// corresponding part of combo if required.
void PAGE_RES_IT::MakeCurrentWordFuzzy() {
WERD *real_word = word_res->word;
if (!real_word->flag(W_FUZZY_SP) && !real_word->flag(W_FUZZY_NON)) {
real_word->set_flag(W_FUZZY_SP, true);
if (word_res->combination) {
// The next word should be the corresponding part of combo, but we have
// already stepped past it, so find it by search.
WERD_RES_IT wr_it(&row()->word_res_list);
for (wr_it.mark_cycle_pt();
!wr_it.cycled_list() && wr_it.data() != word_res; wr_it.forward()) {
}
wr_it.forward();
ASSERT_HOST(wr_it.data()->part_of_combo);
real_word = wr_it.data()->word;
ASSERT_HOST(!real_word->flag(W_FUZZY_SP) &&
!real_word->flag(W_FUZZY_NON));
real_word->set_flag(W_FUZZY_SP, true);
}
}
}
/*************************************************************************
* PAGE_RES_IT::restart_page
*
* Set things up at the start of the page
*************************************************************************/
WERD_RES *PAGE_RES_IT::start_page(bool empty_ok) {
block_res_it.set_to_list(&page_res->block_res_list);
block_res_it.mark_cycle_pt();
prev_block_res = nullptr;
prev_row_res = nullptr;
prev_word_res = nullptr;
block_res = nullptr;
row_res = nullptr;
word_res = nullptr;
next_block_res = nullptr;
next_row_res = nullptr;
next_word_res = nullptr;
internal_forward(true, empty_ok);
return internal_forward(false, empty_ok);
}
// Recovers from operations on the current word, such as in InsertCloneWord
// and DeleteCurrentWord.
// Resets the word_res_it so that it is one past the next_word_res, as
// it should be after internal_forward. If next_row_res != row_res,
// then the next_word_res is in the next row, so there is no need to do
// anything to word_res_it, but it is still a good idea to reset the pointers
// word_res and prev_word_res, which are still in the current row.
void PAGE_RES_IT::ResetWordIterator() {
if (row_res == next_row_res) {
// Reset the member iterator so it can move forward and detect the
// cycled_list state correctly.
word_res_it.move_to_first();
for (word_res_it.mark_cycle_pt();
!word_res_it.cycled_list() && word_res_it.data() != next_word_res;
word_res_it.forward()) {
if (!word_res_it.data()->part_of_combo) {
if (prev_row_res == row_res) {
prev_word_res = word_res;
}
word_res = word_res_it.data();
}
}
ASSERT_HOST(!word_res_it.cycled_list());
wr_it_of_next_word = word_res_it;
word_res_it.forward();
} else {
// word_res_it is OK, but reset word_res and prev_word_res if needed.
WERD_RES_IT wr_it(&row_res->word_res_list);
for (wr_it.mark_cycle_pt(); !wr_it.cycled_list(); wr_it.forward()) {
if (!wr_it.data()->part_of_combo) {
if (prev_row_res == row_res) {
prev_word_res = word_res;
}
word_res = wr_it.data();
}
}
}
}
/*************************************************************************
* PAGE_RES_IT::internal_forward
*
* Find the next word on the page. If empty_ok is true, then non-text blocks
* and text blocks with no text are visited as if they contain a single
* imaginary word in a single imaginary row. (word() and row() both return
*nullptr in such a block and the return value is nullptr.) If empty_ok is
*false, the old behaviour is maintained. Each real word is visited and empty
*and non-text blocks and rows are skipped. new_block is used to initialize the
*iterators for a new block. The iterator maintains pointers to block, row and
*word for the previous, current and next words. These are correct, regardless
*of block/row boundaries. nullptr values denote start and end of the page.
*************************************************************************/
WERD_RES *PAGE_RES_IT::internal_forward(bool new_block, bool empty_ok) {
bool new_row = false;
prev_block_res = block_res;
prev_row_res = row_res;
prev_word_res = word_res;
block_res = next_block_res;
row_res = next_row_res;
word_res = next_word_res;
wr_it_of_current_word = wr_it_of_next_word;
next_block_res = nullptr;
next_row_res = nullptr;
next_word_res = nullptr;
while (!block_res_it.cycled_list()) {
if (new_block) {
new_block = false;
row_res_it.set_to_list(&block_res_it.data()->row_res_list);
row_res_it.mark_cycle_pt();
if (row_res_it.empty() && empty_ok) {
next_block_res = block_res_it.data();
break;
}
new_row = true;
}
while (!row_res_it.cycled_list()) {
if (new_row) {
new_row = false;
word_res_it.set_to_list(&row_res_it.data()->word_res_list);
word_res_it.mark_cycle_pt();
}
// Skip any part_of_combo words.
while (!word_res_it.cycled_list() && word_res_it.data()->part_of_combo) {
word_res_it.forward();
}
if (!word_res_it.cycled_list()) {
next_block_res = block_res_it.data();
next_row_res = row_res_it.data();
next_word_res = word_res_it.data();
wr_it_of_next_word = word_res_it;
word_res_it.forward();
goto foundword;
}
// end of row reached
row_res_it.forward();
new_row = true;
}
// end of block reached
block_res_it.forward();
new_block = true;
}
foundword:
// Update prev_word_best_choice pointer.
if (page_res != nullptr && page_res->prev_word_best_choice != nullptr) {
*page_res->prev_word_best_choice = (new_block || prev_word_res == nullptr)
? nullptr
: prev_word_res->best_choice;
}
return word_res;
}
/*************************************************************************
* PAGE_RES_IT::restart_row()
*
* Move to the beginning (leftmost word) of the current row.
*************************************************************************/
WERD_RES *PAGE_RES_IT::restart_row() {
ROW_RES *row = this->row();
if (!row) {
return nullptr;
}
for (restart_page(); this->row() != row; forward()) {
// pass
}
return word();
}
/*************************************************************************
* PAGE_RES_IT::forward_paragraph
*
* Move to the beginning of the next paragraph, allowing empty blocks.
*************************************************************************/
WERD_RES *PAGE_RES_IT::forward_paragraph() {
while (block_res == next_block_res &&
(next_row_res != nullptr && next_row_res->row != nullptr &&
row_res->row->para() == next_row_res->row->para())) {
internal_forward(false, true);
}
return internal_forward(false, true);
}
/*************************************************************************
* PAGE_RES_IT::forward_block
*
* Move to the beginning of the next block, allowing empty blocks.
*************************************************************************/
WERD_RES *PAGE_RES_IT::forward_block() {
while (block_res == next_block_res) {
internal_forward(false, true);
}
return internal_forward(false, true);
}
void PAGE_RES_IT::rej_stat_word() {
int16_t chars_in_word;
int16_t rejects_in_word = 0;
chars_in_word = word_res->reject_map.length();
page_res->char_count += chars_in_word;
block_res->char_count += chars_in_word;
row_res->char_count += chars_in_word;
rejects_in_word = word_res->reject_map.reject_count();
page_res->rej_count += rejects_in_word;
block_res->rej_count += rejects_in_word;
row_res->rej_count += rejects_in_word;
if (chars_in_word == rejects_in_word) {
row_res->whole_word_rej_count += rejects_in_word;
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/pageres.cpp
|
C++
|
apache-2.0
| 63,469
|
/**********************************************************************
* File: pageres.h (Formerly page_res.h)
* Description: Results classes used by control.c
* Author: Phil Cheatle
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef PAGERES_H
#define PAGERES_H
#include "blamer.h" // for BlamerBundle (ptr only), IRR_NUM_REASONS
#include "clst.h" // for CLIST_ITERATOR, CLISTIZEH
#include "elst.h" // for ELIST_ITERATOR, ELIST_LINK, ELISTIZEH
#include "genericvector.h" // for PointerVector
#include "matrix.h" // for MATRIX
#include "normalis.h" // for DENORM
#include "ratngs.h" // for WERD_CHOICE, BLOB_CHOICE (ptr only)
#include "rect.h" // for TBOX
#include "rejctmap.h" // for REJMAP
#include "unicharset.h" // for UNICHARSET, UNICHARSET::Direction, UNI...
#include "werd.h" // for WERD, W_BOL, W_EOL
#include <tesseract/unichar.h> // for UNICHAR_ID, INVALID_UNICHAR_ID
#include <cstdint> // for int32_t, int16_t
#include <functional> // for std::function
#include <set> // for std::pair
#include <vector> // for std::vector
#include <sys/types.h> // for int8_t
struct Pix;
namespace tesseract {
class BLOCK;
class BLOCK_LIST;
class BLOCK_RES;
class ROW;
class ROW_RES;
class SEAM;
class WERD_RES;
struct TWERD;
class BoxWord;
class Tesseract;
struct FontInfo;
/* Forward declarations */
class BLOCK_RES;
ELISTIZEH(BLOCK_RES)
CLISTIZEH(BLOCK_RES)
class ROW_RES;
ELISTIZEH(ROW_RES)
class WERD_RES;
ELISTIZEH(WERD_RES)
/*************************************************************************
* PAGE_RES - Page results
*************************************************************************/
class PAGE_RES { // page result
public:
int32_t char_count;
int32_t rej_count;
BLOCK_RES_LIST block_res_list;
bool rejected;
// Updated every time PAGE_RES_IT iterating on this PAGE_RES moves to
// the next word. This pointer is not owned by PAGE_RES class.
WERD_CHOICE **prev_word_best_choice;
// Sums of blame reasons computed by the blamer.
std::vector<int> blame_reasons;
// Debug information about all the misadaptions on this page.
// Each BlamerBundle contains an index into this vector, so that words that
// caused misadaption could be marked. However, since words could be
// deleted/split/merged, the log is stored on the PAGE_RES level.
std::vector<std::string> misadaption_log;
inline void Init() {
char_count = 0;
rej_count = 0;
rejected = false;
prev_word_best_choice = nullptr;
blame_reasons.clear();
blame_reasons.resize(IRR_NUM_REASONS);
}
PAGE_RES() {
Init();
} // empty constructor
PAGE_RES(bool merge_similar_words,
BLOCK_LIST *block_list, // real blocks
WERD_CHOICE **prev_word_best_choice_ptr);
~PAGE_RES() = default;
};
/*************************************************************************
* BLOCK_RES - Block results
*************************************************************************/
class BLOCK_RES : public ELIST_LINK {
public:
BLOCK *block; // real block
int32_t char_count; // chars in block
int32_t rej_count; // rejected chars
int16_t font_class; //
int16_t row_count;
float x_height;
bool font_assigned; // block already
// processed
ROW_RES_LIST row_res_list;
BLOCK_RES() = default;
BLOCK_RES(bool merge_similar_words, BLOCK *the_block); // real block
~BLOCK_RES() = default;
};
/*************************************************************************
* ROW_RES - Row results
*************************************************************************/
class ROW_RES : public ELIST_LINK {
public:
ROW *row; // real row
int32_t char_count; // chars in block
int32_t rej_count; // rejected chars
int32_t whole_word_rej_count; // rejs in total rej wds
WERD_RES_LIST word_res_list;
ROW_RES() = default;
ROW_RES(bool merge_similar_words, ROW *the_row); // real row
~ROW_RES() = default;
};
/*************************************************************************
* WERD_RES - Word results
*************************************************************************/
enum CRUNCH_MODE { CR_NONE, CR_KEEP_SPACE, CR_LOOSE_SPACE, CR_DELETE };
// WERD_RES is a collection of publicly accessible members that gathers
// information about a word result.
class TESS_API WERD_RES : public ELIST_LINK {
public:
// Which word is which?
// There are 3 coordinate spaces in use here: a possibly rotated pixel space,
// the original image coordinate space, and the BLN space in which the
// baseline of a word is at kBlnBaselineOffset, the xheight is kBlnXHeight,
// and the x-middle of the word is at 0.
// In the rotated pixel space, coordinates correspond to the input image,
// but may be rotated about the origin by a multiple of 90 degrees,
// and may therefore be negative.
// In any case a rotation by denorm.block()->re_rotation() will take them
// back to the original image.
// The other differences between words all represent different stages of
// processing during recognition.
// ---------------------------INPUT-------------------------------------
// The word is the input C_BLOBs in the rotated pixel space.
// word is NOT owned by the WERD_RES unless combination is true.
// All the other word pointers ARE owned by the WERD_RES.
WERD *word = nullptr; // Input C_BLOB word.
// -------------SETUP BY SetupFor*Recognition---READONLY-INPUT------------
// The bln_boxes contains the bounding boxes (only) of the input word, in the
// BLN space. The lengths of word and bln_boxes
// match as they are both before any chopping.
// TODO(rays) determine if docqual does anything useful and delete bln_boxes
// if it doesn't.
tesseract::BoxWord *bln_boxes = nullptr; // BLN input bounding boxes.
// The ROW that this word sits in. NOT owned by the WERD_RES.
ROW *blob_row = nullptr;
// The denorm provides the transformation to get back to the rotated image
// coords from the chopped_word/rebuild_word BLN coords, but each blob also
// has its own denorm.
DENORM denorm; // For use on chopped_word.
// Unicharset used by the classifier output in best_choice and raw_choice.
const UNICHARSET *uch_set = nullptr; // For converting back to utf8.
// ----Initialized by SetupFor*Recognition---BUT OUTPUT FROM RECOGNITION----
// ----Setup to a (different!) state expected by the various classifiers----
// TODO(rays) Tidy and make more consistent.
// The chopped_word is also in BLN space, and represents the fully chopped
// character fragments that make up the word.
// The length of chopped_word matches length of seam_array + 1 (if set).
TWERD *chopped_word = nullptr; // BLN chopped fragments output.
// Vector of SEAM* holding chopping points matching chopped_word.
std::vector<SEAM *> seam_array;
// Widths of blobs in chopped_word.
std::vector<int> blob_widths;
// Gaps between blobs in chopped_word. blob_gaps[i] is the gap between
// blob i and blob i+1.
std::vector<int> blob_gaps;
// Stores the lstm choices of every timestep
std::vector<std::vector<std::pair<const char *, float>>> timesteps;
// Stores the lstm choices of every timestep segmented by character
std::vector<std::vector<std::vector<std::pair<const char *, float>>>>
segmented_timesteps;
// Symbolchoices acquired during CTC
std::vector<std::vector<std::pair<const char *, float>>> CTC_symbol_choices;
// Stores if the timestep vector starts with a space
bool leading_space = false;
// Stores value when the word ends
int end = 0;
// Ratings matrix contains classifier choices for each classified combination
// of blobs. The dimension is the same as the number of blobs in chopped_word
// and the leading diagonal corresponds to classifier results of the blobs
// in chopped_word. The state_ members of best_choice, raw_choice and
// best_choices all correspond to this ratings matrix and allow extraction
// of the blob choices for any given WERD_CHOICE.
MATRIX *ratings = nullptr; // Owned pointer.
// Pointer to the first WERD_CHOICE in best_choices. This is the result that
// will be output from Tesseract. Note that this is now a borrowed pointer
// and should NOT be deleted.
WERD_CHOICE *best_choice = nullptr; // Borrowed pointer.
// The best raw_choice found during segmentation search. Differs from the
// best_choice by being the best result according to just the character
// classifier, not taking any language model information into account.
// Unlike best_choice, the pointer IS owned by this WERD_RES.
WERD_CHOICE *raw_choice = nullptr; // Owned pointer.
// Alternative results found during chopping/segmentation search stages.
// Note that being an ELIST, best_choices owns the WERD_CHOICEs.
WERD_CHOICE_LIST best_choices;
// Truth bounding boxes, text and incorrect choice reason.
BlamerBundle *blamer_bundle = nullptr;
// --------------OUTPUT FROM RECOGNITION-------------------------------
// --------------Not all fields are necessarily set.-------------------
// ---best_choice, raw_choice *must* end up set, with a box_word-------
// ---In complete output, the number of blobs in rebuild_word matches---
// ---the number of boxes in box_word, the number of unichar_ids in---
// ---best_choice, the number of ints in best_state, and the number---
// ---of strings in correct_text--------------------------------------
// ---SetupFake Sets everything to appropriate values if the word is---
// ---known to be bad before recognition.------------------------------
// The rebuild_word is also in BLN space, but represents the final best
// segmentation of the word. Its length is therefore the same as box_word.
TWERD *rebuild_word = nullptr; // BLN best segmented word.
// The box_word is in the original image coordinate space. It is the
// bounding boxes of the rebuild_word, after denormalization.
// The length of box_word matches rebuild_word, best_state (if set) and
// correct_text (if set), as well as best_choice and represents the
// number of classified units in the output.
tesseract::BoxWord *box_word = nullptr; // Denormalized output boxes.
// The Tesseract that was used to recognize this word. Just a borrowed
// pointer. Note: Tesseract's class definition is in a higher-level library.
// We avoid introducing a cyclic dependency by not using the Tesseract
// within WERD_RES. We are just storing it to provide access to it
// for the top-level multi-language controller, and maybe for output of
// the recognized language.
// tesseract points to data owned elsewhere.
tesseract::Tesseract *tesseract = nullptr;
// The best_state stores the relationship between chopped_word and
// rebuild_word. Each blob[i] in rebuild_word is composed of best_state[i]
// adjacent blobs in chopped_word. The seams in seam_array are hidden
// within a rebuild_word blob and revealed between them.
std::vector<int> best_state; // Number of blobs in each best blob.
// The correct_text is used during training and adaption to carry the
// text to the training system without the need for a unicharset. There
// is one entry in the vector for each blob in rebuild_word and box_word.
std::vector<std::string> correct_text;
// Less-well documented members.
// TODO(rays) Add more documentation here.
WERD_CHOICE *ep_choice = nullptr; // ep text TODO(rays) delete this.
REJMAP reject_map; // best_choice rejects
bool tess_failed = false;
/*
If tess_failed is true, one of the following tests failed when Tess
returned:
- The outword blob list was not the same length as the best_choice string;
- The best_choice string contained ALL blanks;
- The best_choice string was zero length
*/
bool tess_accepted = false; // Tess thinks its ok?
bool tess_would_adapt = false; // Tess would adapt?
bool done = false; // ready for output?
bool small_caps = false; // word appears to be small caps
bool odd_size = false; // word is bigger than line or leader dots.
// The fontinfos are pointers to data owned by the classifier.
const FontInfo *fontinfo = nullptr;
const FontInfo *fontinfo2 = nullptr;
int8_t fontinfo_id_count = 0; // number of votes
int8_t fontinfo_id2_count = 0; // number of votes
bool guessed_x_ht = true;
bool guessed_caps_ht = true;
CRUNCH_MODE unlv_crunch_mode = CR_NONE;
float x_height = 0.0f; // post match estimate
float caps_height = 0.0f; // post match estimate
float baseline_shift = 0.0f; // post match estimate.
// Certainty score for the spaces either side of this word (LSTM mode).
// MIN this value with the actual word certainty.
float space_certainty = 0.0f;
/*
To deal with fuzzy spaces we need to be able to combine "words" to form
combinations when we suspect that the gap is a non-space. The (new) text
ord code generates separate words for EVERY fuzzy gap - flags in the word
indicate whether the gap is below the threshold (fuzzy kern) and is thus
NOT a real word break by default, or above the threshold (fuzzy space) and
this is a real word break by default.
The WERD_RES list contains all these words PLUS "combination" words built
out of (copies of) the words split by fuzzy kerns. The separate parts have
their "part_of_combo" flag set true and should be IGNORED on a default
reading of the list.
Combination words are FOLLOWED by the sequence of part_of_combo words
which they combine.
*/
bool combination = false; // of two fuzzy gap wds
bool part_of_combo = false; // part of a combo
bool reject_spaces = false; // Reject spacing?
WERD_RES() = default;
WERD_RES(WERD *the_word) {
word = the_word;
}
// Deep copies everything except the ratings MATRIX.
// To get that use deep_copy below.
WERD_RES(const WERD_RES &source) : ELIST_LINK(source) {
// combination is used in function Clear which is called from operator=.
combination = false;
*this = source; // see operator=
}
~WERD_RES();
// Returns the UTF-8 string for the given blob index in the best_choice word,
// given that we know whether we are in a right-to-left reading context.
// This matters for mirrorable characters such as parentheses. We recognize
// characters purely based on their shape on the page, and by default produce
// the corresponding unicode for a left-to-right context.
const char *BestUTF8(unsigned blob_index, bool in_rtl_context) const {
if (best_choice == nullptr || blob_index >= best_choice->length()) {
return nullptr;
}
UNICHAR_ID id = best_choice->unichar_id(blob_index);
if (static_cast<unsigned>(id) >= uch_set->size()) {
return nullptr;
}
UNICHAR_ID mirrored = uch_set->get_mirror(id);
if (in_rtl_context && mirrored > 0) {
id = mirrored;
}
return uch_set->id_to_unichar_ext(id);
}
// Returns the UTF-8 string for the given blob index in the raw_choice word.
const char *RawUTF8(unsigned blob_index) const {
if (blob_index >= raw_choice->length()) {
return nullptr;
}
UNICHAR_ID id = raw_choice->unichar_id(blob_index);
if (static_cast<unsigned>(id) >= uch_set->size()) {
return nullptr;
}
return uch_set->id_to_unichar(id);
}
UNICHARSET::Direction SymbolDirection(unsigned blob_index) const {
if (best_choice == nullptr || blob_index >= best_choice->length()) {
return UNICHARSET::U_OTHER_NEUTRAL;
}
return uch_set->get_direction(best_choice->unichar_id(blob_index));
}
bool AnyRtlCharsInWord() const {
if (uch_set == nullptr || best_choice == nullptr ||
best_choice->length() < 1) {
return false;
}
for (unsigned id = 0; id < best_choice->length(); id++) {
unsigned unichar_id = best_choice->unichar_id(id);
if (unichar_id >= uch_set->size()) {
continue; // Ignore illegal chars.
}
UNICHARSET::Direction dir = uch_set->get_direction(unichar_id);
if (dir == UNICHARSET::U_RIGHT_TO_LEFT ||
dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC) {
return true;
}
}
return false;
}
bool AnyLtrCharsInWord() const {
if (uch_set == nullptr || best_choice == nullptr ||
best_choice->length() < 1) {
return false;
}
for (unsigned id = 0; id < best_choice->length(); id++) {
unsigned unichar_id = best_choice->unichar_id(id);
if (unichar_id >= uch_set->size()) {
continue; // Ignore illegal chars.
}
UNICHARSET::Direction dir = uch_set->get_direction(unichar_id);
if (dir == UNICHARSET::U_LEFT_TO_RIGHT ||
dir == UNICHARSET::U_ARABIC_NUMBER) {
return true;
}
}
return false;
}
// Return whether the blobs in this WERD_RES 0, 1,... come from an engine
// that gave us the unichars in reading order (as opposed to strict left
// to right).
bool UnicharsInReadingOrder() const {
return best_choice->unichars_in_script_order();
}
void Clear();
void ClearResults();
void ClearWordChoices();
void ClearRatings();
// Deep copies everything except the ratings MATRIX.
// To get that use deep_copy below.
WERD_RES &operator=(const WERD_RES &source); // from this
void CopySimpleFields(const WERD_RES &source);
// Initializes a blank (default constructed) WERD_RES from one that has
// already been recognized.
// Use SetupFor*Recognition afterwards to complete the setup and make
// it ready for a retry recognition.
void InitForRetryRecognition(const WERD_RES &source);
// Sets up the members used in recognition: bln_boxes, chopped_word,
// seam_array, denorm. Returns false if
// the word is empty and sets up fake results. If use_body_size is
// true and row->body_size is set, then body_size will be used for
// blob normalization instead of xheight + ascrise. This flag is for
// those languages that are using CJK pitch model and thus it has to
// be true if and only if tesseract->textord_use_cjk_fp_model is
// true.
// If allow_detailed_fx is true, the feature extractor will receive fine
// precision outline information, allowing smoother features and better
// features on low resolution images.
// The norm_mode sets the default mode for normalization in absence
// of any of the above flags. It should really be a tesseract::OcrEngineMode
// but is declared as int for ease of use with tessedit_ocr_engine_mode.
// Returns false if the word is empty and sets up fake results.
bool SetupForRecognition(const UNICHARSET &unicharset_in,
tesseract::Tesseract *tesseract, Image pix,
int norm_mode, const TBOX *norm_box,
bool numeric_mode, bool use_body_size,
bool allow_detailed_fx, ROW *row,
const BLOCK *block);
// Set up the seam array, bln_boxes, best_choice, and raw_choice to empty
// accumulators from a made chopped word. We presume the fields are already
// empty.
void SetupBasicsFromChoppedWord(const UNICHARSET &unicharset_in);
// Sets up the members used in recognition for an empty recognition result:
// bln_boxes, chopped_word, seam_array, denorm, best_choice, raw_choice.
void SetupFake(const UNICHARSET &uch);
// Set the word as having the script of the input unicharset.
void SetupWordScript(const UNICHARSET &unicharset_in);
// Sets up the blamer_bundle if it is not null, using the initialized denorm.
void SetupBlamerBundle();
// Computes the blob_widths and blob_gaps from the chopped_word.
void SetupBlobWidthsAndGaps();
// Updates internal data to account for a new SEAM (chop) at the given
// blob_number. Fixes the ratings matrix and states in the choices, as well
// as the blob widths and gaps.
void InsertSeam(int blob_number, SEAM *seam);
// Returns true if all the word choices except the first have adjust_factors
// worse than the given threshold.
bool AlternativeChoiceAdjustmentsWorseThan(float threshold) const;
// Returns true if the current word is ambiguous (by number of answers or
// by dangerous ambigs.)
bool IsAmbiguous();
// Returns true if the ratings matrix size matches the sum of each of the
// segmentation states.
bool StatesAllValid();
// Prints a list of words found if debug is true or the word result matches
// the word_to_debug.
void DebugWordChoices(bool debug, const char *word_to_debug);
// Prints the top choice along with the accepted/done flags.
void DebugTopChoice(const char *msg) const;
// Removes from best_choices all choices which are not within a reasonable
// range of the best choice.
void FilterWordChoices(int debug_level);
// Computes a set of distance thresholds used to control adaption.
// Compares the best choice for the current word to the best raw choice
// to determine which characters were classified incorrectly by the
// classifier. Then places a separate threshold into thresholds for each
// character in the word. If the classifier was correct, max_rating is placed
// into thresholds. If the classifier was incorrect, the mean match rating
// (error percentage) of the classifier's incorrect choice minus some margin
// is placed into thresholds. This can then be used by the caller to try to
// create a new template for the desired class that will classify the
// character with a rating better than the threshold value. The match rating
// placed into thresholds is never allowed to be below min_rating in order to
// prevent trying to make overly tight templates.
// min_rating limits how tight to make a template.
// max_rating limits how loose to make a template.
// rating_margin denotes the amount of margin to put in template.
void ComputeAdaptionThresholds(float certainty_scale, float min_rating,
float max_rating, float rating_margin,
float *thresholds);
// Saves a copy of the word_choice if it has the best unadjusted rating.
// Returns true if the word_choice was the new best.
bool LogNewRawChoice(WERD_CHOICE *word_choice);
// Consumes word_choice by adding it to best_choices, (taking ownership) if
// the certainty for word_choice is some distance of the best choice in
// best_choices, or by deleting the word_choice and returning false.
// The best_choices list is kept in sorted order by rating. Duplicates are
// removed, and the list is kept no longer than max_num_choices in length.
// Returns true if the word_choice is still a valid pointer.
bool LogNewCookedChoice(int max_num_choices, bool debug,
WERD_CHOICE *word_choice);
// Prints a brief list of all the best choices.
void PrintBestChoices() const;
// Returns the sum of the widths of the blob between start_blob and last_blob
// inclusive.
int GetBlobsWidth(int start_blob, int last_blob) const;
// Returns the width of a gap between the specified blob and the next one.
int GetBlobsGap(unsigned blob_index) const;
// Returns the BLOB_CHOICE corresponding to the given index in the
// best choice word taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete. May return nullptr if there is no
// BLOB_CHOICE matching the unichar_id at the given index.
BLOB_CHOICE *GetBlobChoice(unsigned index) const;
// Returns the BLOB_CHOICE_LIST corresponding to the given index in the
// best choice word taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete.
BLOB_CHOICE_LIST *GetBlobChoices(int index) const;
// Moves the results fields from word to this. This takes ownership of all
// the data, so src can be destructed.
// word1.ConsumeWordResult(word);
// delete word;
// is simpler and faster than:
// word1 = *word;
// delete word;
// as it doesn't need to copy and reallocate anything.
void ConsumeWordResults(WERD_RES *word);
// Replace the best choice and rebuild box word.
// choice must be from the current best_choices list.
void ReplaceBestChoice(WERD_CHOICE *choice);
// Builds the rebuild_word and sets the best_state from the chopped_word and
// the best_choice->state.
void RebuildBestState();
// Copies the chopped_word to the rebuild_word, faking a best_state as well.
// Also sets up the output box_word.
void CloneChoppedToRebuild();
// Sets/replaces the box_word with one made from the rebuild_word.
void SetupBoxWord();
// Sets up the script positions in the best_choice using the best_choice
// to get the unichars, and the unicharset to get the target positions.
void SetScriptPositions();
// Sets all the blobs in all the words (best choice and alternates) to be
// the given position. (When a sub/superscript is recognized as a separate
// word, it falls victim to the rule that a whole word cannot be sub or
// superscript, so this function overrides that problem.)
void SetAllScriptPositions(tesseract::ScriptPos position);
// Classifies the word with some already-calculated BLOB_CHOICEs.
// The choices are an array of blob_count pointers to BLOB_CHOICE,
// providing a single classifier result for each blob.
// The BLOB_CHOICEs are consumed and the word takes ownership.
// The number of blobs in the box_word must match blob_count.
void FakeClassifyWord(unsigned blob_count, BLOB_CHOICE **choices);
// Creates a WERD_CHOICE for the word using the top choices from the leading
// diagonal of the ratings matrix.
void FakeWordFromRatings(PermuterType permuter);
// Copies the best_choice strings to the correct_text for adaption/training.
void BestChoiceToCorrectText();
// Merges 2 adjacent blobs in the result if the permanent callback
// class_cb returns other than INVALID_UNICHAR_ID, AND the permanent
// callback box_cb is nullptr or returns true, setting the merged blob
// result to the class returned from class_cb.
// Returns true if anything was merged.
bool ConditionalBlobMerge(
const std::function<UNICHAR_ID(UNICHAR_ID, UNICHAR_ID)> &class_cb,
const std::function<bool(const TBOX &, const TBOX &)> &box_cb);
// Merges 2 adjacent blobs in the result (index and index+1) and corrects
// all the data to account for the change.
void MergeAdjacentBlobs(unsigned index);
// Callback helper for fix_quotes returns a double quote if both
// arguments are quote, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID BothQuotes(UNICHAR_ID id1, UNICHAR_ID id2);
void fix_quotes();
// Callback helper for fix_hyphens returns UNICHAR_ID of - if both
// arguments are hyphen, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID BothHyphens(UNICHAR_ID id1, UNICHAR_ID id2);
// Callback helper for fix_hyphens returns true if box1 and box2 overlap
// (assuming both on the same textline, are in order and a chopped em dash.)
bool HyphenBoxesOverlap(const TBOX &box1, const TBOX &box2);
void fix_hyphens();
// Callback helper for merge_tess_fails returns a space if both
// arguments are space, otherwise INVALID_UNICHAR_ID.
UNICHAR_ID BothSpaces(UNICHAR_ID id1, UNICHAR_ID id2);
void merge_tess_fails();
// Returns a really deep copy of *src, including the ratings MATRIX.
static WERD_RES *deep_copy(const WERD_RES *src) {
auto *result = new WERD_RES(*src);
// That didn't copy the ratings, but we want a copy if there is one to
// begin with.
if (src->ratings != nullptr) {
result->ratings = src->ratings->DeepCopy();
}
return result;
}
// Copy blobs from word_res onto this word (eliminating spaces between).
// Since this may be called bidirectionally OR both the BOL and EOL flags.
void copy_on(WERD_RES *word_res) { // from this word
word->set_flag(W_BOL, word->flag(W_BOL) || word_res->word->flag(W_BOL));
word->set_flag(W_EOL, word->flag(W_EOL) || word_res->word->flag(W_EOL));
word->copy_on(word_res->word);
}
// Returns true if the collection of count pieces, starting at start, are all
// natural connected components, ie there are no real chops involved.
bool PiecesAllNatural(int start, int count) const;
};
/*************************************************************************
* PAGE_RES_IT - Page results iterator
*************************************************************************/
class TESS_API PAGE_RES_IT {
public:
PAGE_RES *page_res; // page being iterated
PAGE_RES_IT() = default;
PAGE_RES_IT(PAGE_RES *the_page_res) { // page result
page_res = the_page_res;
restart_page(); // ready to scan
}
// Do two PAGE_RES_ITs point at the same word?
// This is much cheaper than cmp().
bool operator==(const PAGE_RES_IT &other) const {
return word_res == other.word_res && row_res == other.row_res &&
block_res == other.block_res;
}
bool operator!=(const PAGE_RES_IT &other) const {
return !(*this == other);
}
// Given another PAGE_RES_IT to the same page,
// this before other: -1
// this equal to other: 0
// this later than other: 1
int cmp(const PAGE_RES_IT &other) const;
WERD_RES *restart_page() {
return start_page(false); // Skip empty blocks.
}
WERD_RES *restart_page_with_empties() {
return start_page(true); // Allow empty blocks.
}
WERD_RES *start_page(bool empty_ok);
WERD_RES *restart_row();
// ============ Methods that mutate the underling structures ===========
// Note that these methods will potentially invalidate other PAGE_RES_ITs
// and are intended to be used only while a single PAGE_RES_IT is active.
// This problem needs to be taken into account if these mutation operators
// are ever provided to PageIterator or its subclasses.
// Inserts the new_word and a corresponding WERD_RES before the current
// position. The simple fields of the WERD_RES are copied from clone_res and
// the resulting WERD_RES is returned for further setup with best_choice etc.
WERD_RES *InsertSimpleCloneWord(const WERD_RES &clone_res, WERD *new_word);
// Replaces the current WERD/WERD_RES with the given words. The given words
// contain fake blobs that indicate the position of the characters. These are
// replaced with real blobs from the current word as much as possible.
void ReplaceCurrentWord(PointerVector<WERD_RES> *words);
// Deletes the current WERD_RES and its underlying WERD.
void DeleteCurrentWord();
// Makes the current word a fuzzy space if not already fuzzy. Updates
// corresponding part of combo if required.
void MakeCurrentWordFuzzy();
WERD_RES *forward() { // Get next word.
return internal_forward(false, false);
}
// Move forward, but allow empty blocks to show as single nullptr words.
WERD_RES *forward_with_empties() {
return internal_forward(false, true);
}
WERD_RES *forward_paragraph(); // get first word in next non-empty paragraph
WERD_RES *forward_block(); // get first word in next non-empty block
WERD_RES *prev_word() const { // previous word
return prev_word_res;
}
ROW_RES *prev_row() const { // row of prev word
return prev_row_res;
}
BLOCK_RES *prev_block() const { // block of prev word
return prev_block_res;
}
WERD_RES *word() const { // current word
return word_res;
}
ROW_RES *row() const { // row of current word
return row_res;
}
BLOCK_RES *block() const { // block of cur. word
return block_res;
}
WERD_RES *next_word() const { // next word
return next_word_res;
}
ROW_RES *next_row() const { // row of next word
return next_row_res;
}
BLOCK_RES *next_block() const { // block of next word
return next_block_res;
}
void rej_stat_word(); // for page/block/row
void ResetWordIterator();
private:
WERD_RES *internal_forward(bool new_block, bool empty_ok);
WERD_RES *prev_word_res; // previous word
ROW_RES *prev_row_res; // row of prev word
BLOCK_RES *prev_block_res; // block of prev word
WERD_RES *word_res; // current word
ROW_RES *row_res; // row of current word
BLOCK_RES *block_res; // block of cur. word
WERD_RES *next_word_res; // next word
ROW_RES *next_row_res; // row of next word
BLOCK_RES *next_block_res; // block of next word
BLOCK_RES_IT block_res_it; // iterators
ROW_RES_IT row_res_it;
WERD_RES_IT word_res_it;
// Iterators used to get the state of word_res_it for the current word.
// Since word_res_it is 2 words further on, this is otherwise hard to do.
WERD_RES_IT wr_it_of_current_word;
WERD_RES_IT wr_it_of_next_word;
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/pageres.h
|
C++
|
apache-2.0
| 33,470
|
///////////////////////////////////////////////////////////////////////
// File: params_training_featdef.cpp
// Description: Utility functions for params training features.
// Author: David Eger
// Created: Mon Jun 11 11:26:42 PDT 2012
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <cstring>
#include "params_training_featdef.h"
namespace tesseract {
int ParamsTrainingFeatureByName(const char *name) {
if (name == nullptr) {
return -1;
}
int array_size =
sizeof(kParamsTrainingFeatureTypeName) / sizeof(kParamsTrainingFeatureTypeName[0]);
for (int i = 0; i < array_size; i++) {
if (kParamsTrainingFeatureTypeName[i] == nullptr) {
continue;
}
if (strcmp(name, kParamsTrainingFeatureTypeName[i]) == 0) {
return i;
}
}
return -1;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/params_training_featdef.cpp
|
C++
|
apache-2.0
| 1,447
|
///////////////////////////////////////////////////////////////////////
// File: params_training_featdef.h
// Description: Feature definitions for params training.
// Author: Rika Antonova
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_WORDREC_PARAMS_TRAINING_FEATDEF_H_
#define TESSERACT_WORDREC_PARAMS_TRAINING_FEATDEF_H_
#include <cstring> // for memset
#include <string>
#include <vector>
namespace tesseract {
// Maximum number of unichars in the small and medium sized words
static const int kMaxSmallWordUnichars = 3;
static const int kMaxMediumWordUnichars = 6;
// Raw features extracted from a single OCR hypothesis.
// The features are normalized (by outline length or number of unichars as
// appropriate) real-valued quantities with unbounded range and
// unknown distribution.
// Normalization / binarization of these features is done at a later stage.
// Note: when adding new fields to this enum make sure to modify
// kParamsTrainingFeatureTypeName
enum kParamsTrainingFeatureType {
// Digits
PTRAIN_DIGITS_SHORT, // 0
PTRAIN_DIGITS_MED, // 1
PTRAIN_DIGITS_LONG, // 2
// Number or pattern (NUMBER_PERM, USER_PATTERN_PERM)
PTRAIN_NUM_SHORT, // 3
PTRAIN_NUM_MED, // 4
PTRAIN_NUM_LONG, // 5
// Document word (DOC_DAWG_PERM)
PTRAIN_DOC_SHORT, // 6
PTRAIN_DOC_MED, // 7
PTRAIN_DOC_LONG, // 8
// Word (SYSTEM_DAWG_PERM, USER_DAWG_PERM, COMPOUND_PERM)
PTRAIN_DICT_SHORT, // 9
PTRAIN_DICT_MED, // 10
PTRAIN_DICT_LONG, // 11
// Frequent word (FREQ_DAWG_PERM)
PTRAIN_FREQ_SHORT, // 12
PTRAIN_FREQ_MED, // 13
PTRAIN_FREQ_LONG, // 14
PTRAIN_SHAPE_COST_PER_CHAR, // 15
PTRAIN_NGRAM_COST_PER_CHAR, // 16
PTRAIN_NUM_BAD_PUNC, // 17
PTRAIN_NUM_BAD_CASE, // 18
PTRAIN_XHEIGHT_CONSISTENCY, // 19
PTRAIN_NUM_BAD_CHAR_TYPE, // 20
PTRAIN_NUM_BAD_SPACING, // 21
PTRAIN_NUM_BAD_FONT, // 22
PTRAIN_RATING_PER_CHAR, // 23
PTRAIN_NUM_FEATURE_TYPES
};
static const char *const kParamsTrainingFeatureTypeName[] = {
"PTRAIN_DIGITS_SHORT", // 0
"PTRAIN_DIGITS_MED", // 1
"PTRAIN_DIGITS_LONG", // 2
"PTRAIN_NUM_SHORT", // 3
"PTRAIN_NUM_MED", // 4
"PTRAIN_NUM_LONG", // 5
"PTRAIN_DOC_SHORT", // 6
"PTRAIN_DOC_MED", // 7
"PTRAIN_DOC_LONG", // 8
"PTRAIN_DICT_SHORT", // 9
"PTRAIN_DICT_MED", // 10
"PTRAIN_DICT_LONG", // 11
"PTRAIN_FREQ_SHORT", // 12
"PTRAIN_FREQ_MED", // 13
"PTRAIN_FREQ_LONG", // 14
"PTRAIN_SHAPE_COST_PER_CHAR", // 15
"PTRAIN_NGRAM_COST_PER_CHAR", // 16
"PTRAIN_NUM_BAD_PUNC", // 17
"PTRAIN_NUM_BAD_CASE", // 18
"PTRAIN_XHEIGHT_CONSISTENCY", // 19
"PTRAIN_NUM_BAD_CHAR_TYPE", // 20
"PTRAIN_NUM_BAD_SPACING", // 21
"PTRAIN_NUM_BAD_FONT", // 22
"PTRAIN_RATING_PER_CHAR", // 23
};
// Returns the index of the given feature (by name),
// or -1 meaning the feature is unknown.
int ParamsTrainingFeatureByName(const char *name);
// Entry with features extracted from a single OCR hypothesis for a word.
struct ParamsTrainingHypothesis {
ParamsTrainingHypothesis() : cost(0.0) {
memset(features, 0, sizeof(features));
}
ParamsTrainingHypothesis(const ParamsTrainingHypothesis &other) {
memcpy(features, other.features, sizeof(features));
str = other.str;
cost = other.cost;
}
ParamsTrainingHypothesis &operator=(const ParamsTrainingHypothesis &other) {
memcpy(features, other.features, sizeof(features));
str = other.str;
cost = other.cost;
return *this;
}
std::string str; // string corresponding to word hypothesis (for debugging)
float features[PTRAIN_NUM_FEATURE_TYPES];
float cost; // path cost computed by segsearch
};
// A list of hypotheses explored during one run of segmentation search.
using ParamsTrainingHypothesisList = std::vector<ParamsTrainingHypothesis>;
// A bundle that accumulates all of the hypothesis lists explored during all
// of the runs of segmentation search on a word (e.g. a list of hypotheses
// explored on PASS1, PASS2, fix xheight pass, etc).
class ParamsTrainingBundle {
public:
ParamsTrainingBundle() = default;
// Starts a new hypothesis list.
// Should be called at the beginning of a new run of the segmentation search.
void StartHypothesisList() {
hyp_list_vec.emplace_back();
}
// Adds a new ParamsTrainingHypothesis to the current hypothesis list
// and returns the reference to the newly added entry.
ParamsTrainingHypothesis &AddHypothesis(const ParamsTrainingHypothesis &other) {
if (hyp_list_vec.empty()) {
StartHypothesisList();
}
hyp_list_vec.back().push_back(ParamsTrainingHypothesis(other));
return hyp_list_vec.back().back();
}
std::vector<ParamsTrainingHypothesisList> hyp_list_vec;
};
} // namespace tesseract
#endif // TESSERACT_WORDREC_PARAMS_TRAINING_FEATDEF_H_
|
2301_81045437/tesseract
|
src/ccstruct/params_training_featdef.h
|
C++
|
apache-2.0
| 5,674
|
/**********************************************************************
* File: pdblock.cpp
* Description: PDBLK member functions and iterator functions.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "pdblock.h"
#include <allheaders.h>
#include <cinttypes> // for PRId32
#include <cstdlib>
#include <memory> // std::unique_ptr
namespace tesseract {
#define BLOCK_LABEL_HEIGHT 150 // char height of block id
constexpr ERRCODE BADBLOCKLINE("Y coordinate in block out of bounds");
constexpr ERRCODE LOSTBLOCKLINE("Can't find rectangle for line");
/**********************************************************************
* PDBLK::PDBLK
*
* Constructor for a simple rectangular block.
**********************************************************************/
PDBLK::PDBLK( // rectangular block
TDimension xmin, // bottom left
TDimension ymin,
TDimension xmax, // top right
TDimension ymax)
: box(ICOORD(xmin, ymin), ICOORD(xmax, ymax)) {
// boundaries
ICOORDELT_IT left_it = &leftside;
ICOORDELT_IT right_it = &rightside;
hand_poly = nullptr;
left_it.set_to_list(&leftside);
right_it.set_to_list(&rightside);
// make default box
left_it.add_to_end(new ICOORDELT(xmin, ymin));
left_it.add_to_end(new ICOORDELT(xmin, ymax));
right_it.add_to_end(new ICOORDELT(xmax, ymin));
right_it.add_to_end(new ICOORDELT(xmax, ymax));
index_ = 0;
}
/**********************************************************************
* PDBLK::set_sides
*
* Sets left and right vertex lists
**********************************************************************/
void PDBLK::set_sides( // set vertex lists
ICOORDELT_LIST *left, // left vertices
ICOORDELT_LIST *right // right vertices
) {
// boundaries
ICOORDELT_IT left_it = &leftside;
ICOORDELT_IT right_it = &rightside;
leftside.clear();
left_it.move_to_first();
left_it.add_list_before(left);
rightside.clear();
right_it.move_to_first();
right_it.add_list_before(right);
}
/**********************************************************************
* PDBLK::contains
*
* Return true if the given point is within the block.
**********************************************************************/
bool PDBLK::contains( // test containment
ICOORD pt // point to test
) {
BLOCK_RECT_IT it = this; // rectangle iterator
ICOORD bleft, tright; // corners of rectangle
for (it.start_block(); !it.cycled_rects(); it.forward()) {
// get rectangle
it.bounding_box(bleft, tright);
// inside rect
if (pt.x() >= bleft.x() && pt.x() <= tright.x() && pt.y() >= bleft.y() &&
pt.y() <= tright.y()) {
return true; // is inside
}
}
return false; // not inside
}
/**********************************************************************
* PDBLK::move
*
* Reposition block
**********************************************************************/
void PDBLK::move( // reposition block
const ICOORD vec // by vector
) {
ICOORDELT_IT it(&leftside);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
*(it.data()) += vec;
}
it.set_to_list(&rightside);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
*(it.data()) += vec;
}
box.move(vec);
}
// Returns a binary Pix mask with a 1 pixel for every pixel within the
// block. Rotates the coordinate system by rerotation prior to rendering.
Image PDBLK::render_mask(const FCOORD &rerotation, TBOX *mask_box) {
TBOX rotated_box(box);
rotated_box.rotate(rerotation);
Image pix = pixCreate(rotated_box.width(), rotated_box.height(), 1);
if (hand_poly != nullptr) {
// We are going to rotate, so get a deep copy of the points and
// make a new POLY_BLOCK with it.
ICOORDELT_LIST polygon;
polygon.deep_copy(hand_poly->points(), ICOORDELT::deep_copy);
POLY_BLOCK image_block(&polygon, hand_poly->isA());
image_block.rotate(rerotation);
// Block outline is a polygon, so use a PB_LINE_IT to get the
// rasterized interior. (Runs of interior pixels on a line.)
auto *lines = new PB_LINE_IT(&image_block);
for (int y = box.bottom(); y < box.top(); ++y) {
const std::unique_ptr</*non-const*/ ICOORDELT_LIST> segments(lines->get_line(y));
if (!segments->empty()) {
ICOORDELT_IT s_it(segments.get());
// Each element of segments is a start x and x size of the
// run of interior pixels.
for (s_it.mark_cycle_pt(); !s_it.cycled_list(); s_it.forward()) {
int start = s_it.data()->x();
int xext = s_it.data()->y();
// Set the run of pixels to 1.
pixRasterop(pix, start - rotated_box.left(),
rotated_box.height() - 1 - (y - rotated_box.bottom()), xext, 1, PIX_SET,
nullptr, 0, 0);
}
}
}
delete lines;
} else {
// Just fill the whole block as there is only a bounding box.
pixRasterop(pix, 0, 0, rotated_box.width(), rotated_box.height(), PIX_SET, nullptr, 0, 0);
}
if (mask_box != nullptr) {
*mask_box = rotated_box;
}
return pix;
}
/**********************************************************************
* PDBLK::plot
*
* Plot the outline of a block in the given colour.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void PDBLK::plot( // draw outline
ScrollView *window, // window to draw in
int32_t serial, // serial number
ScrollView::Color colour // colour to draw in
) {
ICOORD startpt; // start of outline
ICOORD endpt; // end of outline
ICOORD prevpt; // previous point
ICOORDELT_IT it = &leftside; // iterator
// set the colour
window->Pen(colour);
window->TextAttributes("Times", BLOCK_LABEL_HEIGHT, false, false, false);
if (hand_poly != nullptr) {
hand_poly->plot(window, serial);
} else if (!leftside.empty()) {
startpt = *(it.data()); // bottom left corner
// tprintf("Block %d bottom left is (%d,%d)\n",
// serial,startpt.x(),startpt.y());
char temp_buff[34];
# if !defined(_WIN32) || defined(__MINGW32__)
snprintf(temp_buff, sizeof(temp_buff), "%" PRId32, serial);
# else
_ultoa(serial, temp_buff, 10);
# endif
window->Text(startpt.x(), startpt.y(), temp_buff);
window->SetCursor(startpt.x(), startpt.y());
do {
prevpt = *(it.data()); // previous point
it.forward(); // move to next point
// draw round corner
window->DrawTo(prevpt.x(), it.data()->y());
window->DrawTo(it.data()->x(), it.data()->y());
} while (!it.at_last()); // until end of list
endpt = *(it.data()); // end point
// other side of boundary
window->SetCursor(startpt.x(), startpt.y());
it.set_to_list(&rightside);
prevpt = startpt;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
// draw round corner
window->DrawTo(prevpt.x(), it.data()->y());
window->DrawTo(it.data()->x(), it.data()->y());
prevpt = *(it.data()); // previous point
}
// close boundary
window->DrawTo(endpt.x(), endpt.y());
}
}
#endif
/**********************************************************************
* PDBLK::operator=
*
* Assignment - duplicate the block structure, but with an EMPTY row list.
**********************************************************************/
PDBLK &PDBLK::operator=( // assignment
const PDBLK &source // from this
) {
// this->ELIST_LINK::operator=(source);
if (!leftside.empty()) {
leftside.clear();
}
if (!rightside.empty()) {
rightside.clear();
}
leftside.deep_copy(&source.leftside, &ICOORDELT::deep_copy);
rightside.deep_copy(&source.rightside, &ICOORDELT::deep_copy);
box = source.box;
return *this;
}
/**********************************************************************
* BLOCK_RECT_IT::BLOCK_RECT_IT
*
* Construct a block rectangle iterator.
**********************************************************************/
BLOCK_RECT_IT::BLOCK_RECT_IT(
// iterate rectangles
PDBLK *blkptr // from block
)
: left_it(&blkptr->leftside), right_it(&blkptr->rightside) {
block = blkptr; // remember block
// non empty list
if (!blkptr->leftside.empty()) {
start_block(); // ready for iteration
}
}
/**********************************************************************
* BLOCK_RECT_IT::set_to_block
*
* Start a new block.
**********************************************************************/
void BLOCK_RECT_IT::set_to_block( // start (new) block
PDBLK *blkptr) { // block to start
block = blkptr; // remember block
// set iterators
left_it.set_to_list(&blkptr->leftside);
right_it.set_to_list(&blkptr->rightside);
if (!blkptr->leftside.empty()) {
start_block(); // ready for iteration
}
}
/**********************************************************************
* BLOCK_RECT_IT::start_block
*
* Restart a block.
**********************************************************************/
void BLOCK_RECT_IT::start_block() { // start (new) block
left_it.move_to_first();
right_it.move_to_first();
left_it.mark_cycle_pt();
right_it.mark_cycle_pt();
ymin = left_it.data()->y(); // bottom of first box
ymax = left_it.data_relative(1)->y();
if (right_it.data_relative(1)->y() < ymax) {
// smallest step
ymax = right_it.data_relative(1)->y();
}
}
/**********************************************************************
* BLOCK_RECT_IT::forward
*
* Move to the next rectangle in the block.
**********************************************************************/
void BLOCK_RECT_IT::forward() { // next rectangle
if (!left_it.empty()) { // non-empty list
if (left_it.data_relative(1)->y() == ymax) {
left_it.forward(); // move to meet top
}
if (right_it.data_relative(1)->y() == ymax) {
right_it.forward();
}
// last is special
if (left_it.at_last() || right_it.at_last()) {
left_it.move_to_first(); // restart
right_it.move_to_first();
// now at bottom
ymin = left_it.data()->y();
} else {
ymin = ymax; // new bottom
}
// next point
ymax = left_it.data_relative(1)->y();
if (right_it.data_relative(1)->y() < ymax) {
// least step forward
ymax = right_it.data_relative(1)->y();
}
}
}
/**********************************************************************
* BLOCK_LINE_IT::get_line
*
* Get the start and width of a line in the block.
**********************************************************************/
TDimension BLOCK_LINE_IT::get_line( // get a line
TDimension y, // line to get
TDimension &xext // output extent
) {
ICOORD bleft; // bounding box
ICOORD tright; // of block & rect
// get block box
block->bounding_box(bleft, tright);
if (y < bleft.y() || y >= tright.y()) {
// block->print(stderr,false);
BADBLOCKLINE.error("BLOCK_LINE_IT::get_line", ABORT, "Y=%d", y);
}
// get rectangle box
rect_it.bounding_box(bleft, tright);
// inside rectangle
if (y >= bleft.y() && y < tright.y()) {
// width of line
xext = tright.x() - bleft.x();
return bleft.x(); // start of line
}
for (rect_it.start_block(); !rect_it.cycled_rects(); rect_it.forward()) {
// get rectangle box
rect_it.bounding_box(bleft, tright);
// inside rectangle
if (y >= bleft.y() && y < tright.y()) {
// width of line
xext = tright.x() - bleft.x();
return bleft.x(); // start of line
}
}
LOSTBLOCKLINE.error("BLOCK_LINE_IT::get_line", ABORT, "Y=%d", y);
return 0; // dummy to stop warning
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/pdblock.cpp
|
C++
|
apache-2.0
| 12,713
|
/**********************************************************************
* File: pdblock.h (Formerly pdblk.h)
* Description: Page block class definition.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef PDBLOCK_H
#define PDBLOCK_H
#include "clst.h"
#include "polyblk.h"
struct Pix;
namespace tesseract {
class PDBLK; // forward decl
CLISTIZEH(PDBLK)
/// page block
class PDBLK {
friend class BLOCK_RECT_IT; ///< block iterator
friend class BLOCK; ///< Page Block
public:
/// empty constructor
PDBLK() {
hand_poly = nullptr;
index_ = 0;
}
/// simple constructor
PDBLK(TDimension xmin, ///< bottom left
TDimension ymin,
TDimension xmax, ///< top right
TDimension ymax);
/// set vertex lists
///@param left list of left vertices
///@param right list of right vertices
void set_sides(ICOORDELT_LIST *left, ICOORDELT_LIST *right);
/// destructor
~PDBLK() {
delete hand_poly;
}
POLY_BLOCK *poly_block() const {
return hand_poly;
}
/// set the poly block
void set_poly_block(POLY_BLOCK *blk) {
hand_poly = blk;
}
/// get box
void bounding_box(ICOORD &bottom_left, // bottom left
ICOORD &top_right) const { // topright
bottom_left = box.botleft();
top_right = box.topright();
}
/// get real box
const TBOX &bounding_box() const {
return box;
}
int index() const {
return index_;
}
void set_index(int value) {
index_ = value;
}
/// is pt inside block
bool contains(ICOORD pt);
/// reposition block
void move(const ICOORD vec); // by vector
// Returns a binary Pix mask with a 1 pixel for every pixel within the
// block. Rotates the coordinate system by rerotation prior to rendering.
// If not nullptr, mask_box is filled with the position box of the returned
// mask image.
Image render_mask(const FCOORD &rerotation, TBOX *mask_box);
#ifndef GRAPHICS_DISABLED
/// draw histogram
///@param window window to draw in
///@param serial serial number
///@param colour colour to draw in
void plot(ScrollView *window, int32_t serial, ScrollView::Color colour);
#endif // !GRAPHICS_DISABLED
/// assignment
///@param source from this
PDBLK &operator=(const PDBLK &source);
protected:
POLY_BLOCK *hand_poly; ///< weird as well
ICOORDELT_LIST leftside; ///< left side vertices
ICOORDELT_LIST rightside; ///< right side vertices
TBOX box; ///< bounding box
int index_; ///< Serial number of this block.
};
class BLOCK_RECT_IT // rectangle iterator
{
public:
/// constructor
///@param blkptr block to iterate
BLOCK_RECT_IT(PDBLK *blkptr);
/// start (new) block
void set_to_block(PDBLK *blkptr); // block to iterate
/// start iteration
void start_block();
/// next rectangle
void forward();
/// test end
bool cycled_rects() const {
return left_it.cycled_list() && right_it.cycled_list();
}
/// current rectangle
///@param bleft bottom left
///@param tright top right
void bounding_box(ICOORD &bleft, ICOORD &tright) {
// bottom left
bleft = ICOORD(left_it.data()->x(), ymin);
// top right
tright = ICOORD(right_it.data()->x(), ymax);
}
private:
TDimension ymin = 0; ///< bottom of rectangle
TDimension ymax = 0; ///< top of rectangle
PDBLK *block = nullptr; ///< block to iterate
ICOORDELT_IT left_it; ///< boundary iterators
ICOORDELT_IT right_it;
};
/// rectangle iterator
class BLOCK_LINE_IT {
public:
/// constructor
///@param blkptr from block
BLOCK_LINE_IT(PDBLK *blkptr) : rect_it(blkptr) {
block = blkptr; // remember block
}
/// start (new) block
///@param blkptr block to start
void set_to_block(PDBLK *blkptr) {
block = blkptr; // remember block
// set iterator
rect_it.set_to_block(blkptr);
}
/// get a line
///@param y line to get
///@param xext output extent
TDimension get_line(TDimension y, TDimension &xext);
private:
PDBLK *block; ///< block to iterate
BLOCK_RECT_IT rect_it; ///< rectangle iterator
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/pdblock.h
|
C++
|
apache-2.0
| 4,808
|
/**********************************************************************
* File: points.cpp (Formerly coords.c)
* Description: Member functions for coordinate classes.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#define _USE_MATH_DEFINES // for M_PI
#include "points.h"
#include "helpers.h"
#include "serialis.h"
#include <algorithm>
#include <cmath> // for M_PI
#include <cstdlib>
namespace tesseract {
bool FCOORD::normalise() { // Convert to unit vec
float len = length();
if (len < 0.0000000001) {
return false;
}
xcoord /= len;
ycoord /= len;
return true;
}
bool ICOORD::DeSerialize(TFile *f) {
return f->DeSerialize(&xcoord) && f->DeSerialize(&ycoord);
}
bool ICOORD::Serialize(TFile *f) const {
return f->Serialize(&xcoord) && f->Serialize(&ycoord);
}
// Set from the given x,y, shrinking the vector to fit if needed.
void ICOORD::set_with_shrink(int x, int y) {
// Fit the vector into an ICOORD, which is 16 bit.
int factor = 1;
int max_extent = std::max(abs(x), abs(y));
if (max_extent > INT16_MAX) {
factor = max_extent / INT16_MAX + 1;
}
xcoord = x / factor;
ycoord = y / factor;
}
// The fortran/basic sgn function returns -1, 0, 1 if x < 0, x == 0, x > 0
// respectively.
static int sign(int x) {
if (x < 0) {
return -1;
} else {
return x > 0 ? 1 : 0;
}
}
// Writes to the given file. Returns false in case of error.
bool ICOORD::Serialize(FILE *fp) const {
return tesseract::Serialize(fp, &xcoord) && tesseract::Serialize(fp, &ycoord);
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool ICOORD::DeSerialize(bool swap, FILE *fp) {
if (!tesseract::DeSerialize(fp, &xcoord)) {
return false;
}
if (!tesseract::DeSerialize(fp, &ycoord)) {
return false;
}
if (swap) {
ReverseN(&xcoord, sizeof(xcoord));
ReverseN(&ycoord, sizeof(ycoord));
}
return true;
}
// Setup for iterating over the pixels in a vector by the well-known
// Bresenham rendering algorithm.
// Starting with major/2 in the accumulator, on each step add major_step,
// and then add minor to the accumulator. When the accumulator >= major
// subtract major and step a minor step.
void ICOORD::setup_render(ICOORD *major_step, ICOORD *minor_step, int *major, int *minor) const {
int abs_x = abs(xcoord);
int abs_y = abs(ycoord);
if (abs_x >= abs_y) {
// X-direction is major.
major_step->xcoord = sign(xcoord);
major_step->ycoord = 0;
minor_step->xcoord = 0;
minor_step->ycoord = sign(ycoord);
*major = abs_x;
*minor = abs_y;
} else {
// Y-direction is major.
major_step->xcoord = 0;
major_step->ycoord = sign(ycoord);
minor_step->xcoord = sign(xcoord);
minor_step->ycoord = 0;
*major = abs_y;
*minor = abs_x;
}
}
// Returns the standard feature direction corresponding to this.
// See binary_angle_plus_pi below for a description of the direction.
uint8_t FCOORD::to_direction() const {
return binary_angle_plus_pi(angle());
}
// Sets this with a unit vector in the given standard feature direction.
void FCOORD::from_direction(uint8_t direction) {
double radians = angle_from_direction(direction);
xcoord = cos(radians);
ycoord = sin(radians);
}
// Converts an angle in radians (from ICOORD::angle or FCOORD::angle) to a
// standard feature direction as an unsigned angle in 256ths of a circle
// measured anticlockwise from (-1, 0).
uint8_t FCOORD::binary_angle_plus_pi(double radians) {
return Modulo(IntCastRounded((radians + M_PI) * 128.0 / M_PI), 256);
}
// Inverse of binary_angle_plus_pi returns an angle in radians for the
// given standard feature direction.
double FCOORD::angle_from_direction(uint8_t direction) {
return direction * M_PI / 128.0 - M_PI;
}
// Returns the point on the given line nearest to this, ie the point such
// that the vector point->this is perpendicular to the line.
// The line is defined as a line_point and a dir_vector for its direction.
FCOORD FCOORD::nearest_pt_on_line(const FCOORD &line_point, const FCOORD &dir_vector) const {
FCOORD point_vector(*this - line_point);
// The dot product (%) is |dir_vector||point_vector|cos theta, so dividing by
// the square of the length of dir_vector gives us the fraction of dir_vector
// to add to line1 to get the appropriate point, so
// result = line1 + lambda dir_vector.
double lambda = point_vector % dir_vector / dir_vector.sqlength();
return line_point + (dir_vector * lambda);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/points.cpp
|
C++
|
apache-2.0
| 5,214
|
/**********************************************************************
* File: points.h (Formerly coords.h)
* Description: Coordinate class definitions.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef POINTS_H
#define POINTS_H
#include "elst.h"
#include "errcode.h" // for ASSERT_HOST
#include "tesstypes.h" // for TDimension
#include <tesseract/export.h> // for DLLSYM
#include <cmath> // for sqrt, atan2
#include <cstdio>
namespace tesseract {
class FCOORD;
/// integer coordinate
class ICOORD {
friend class FCOORD;
public:
/// empty constructor
ICOORD() {
xcoord = ycoord = 0; // default zero
}
/// constructor
///@param xin x value
///@param yin y value
ICOORD(TDimension xin, TDimension yin) {
xcoord = xin;
ycoord = yin;
}
/// destructor
~ICOORD() = default;
bool DeSerialize(TFile *f);
bool Serialize(TFile *f) const;
/// access function
TDimension x() const {
return xcoord;
}
/// access_function
TDimension y() const {
return ycoord;
}
/// rewrite function
void set_x(TDimension xin) {
xcoord = xin; // write new value
}
/// rewrite function
void set_y(TDimension yin) { // value to set
ycoord = yin;
}
/// Set from the given x,y, shrinking the vector to fit if needed.
void set_with_shrink(int x, int y);
/// find sq length
float sqlength() const {
return static_cast<float>(xcoord * xcoord + ycoord * ycoord);
}
/// find length
float length() const {
return std::sqrt(sqlength());
}
/// sq dist between pts
float pt_to_pt_sqdist(const ICOORD &pt) const {
ICOORD gap;
gap.xcoord = xcoord - pt.xcoord;
gap.ycoord = ycoord - pt.ycoord;
return gap.sqlength();
}
/// Distance between pts
float pt_to_pt_dist(const ICOORD &pt) const {
return std::sqrt(pt_to_pt_sqdist(pt));
}
/// find angle
float angle() const {
return std::atan2(static_cast<float>(ycoord), static_cast<float>(xcoord));
}
/// test equality
bool operator==(const ICOORD &other) const {
return xcoord == other.xcoord && ycoord == other.ycoord;
}
/// test inequality
bool operator!=(const ICOORD &other) const {
return xcoord != other.xcoord || ycoord != other.ycoord;
}
/// rotate 90 deg anti
friend ICOORD operator!(const ICOORD &);
/// unary minus
friend ICOORD operator-(const ICOORD &);
/// add
friend ICOORD operator+(const ICOORD &, const ICOORD &);
/// add
friend ICOORD &operator+=(ICOORD &, const ICOORD &);
/// subtract
friend ICOORD operator-(const ICOORD &, const ICOORD &);
/// subtract
friend ICOORD &operator-=(ICOORD &, const ICOORD &);
/// scalar product
friend int32_t operator%(const ICOORD &, const ICOORD &);
/// cross product
friend int32_t operator*(const ICOORD &, const ICOORD &);
/// multiply
friend ICOORD operator*(const ICOORD &, TDimension);
/// multiply
friend ICOORD operator*(TDimension, const ICOORD &);
/// multiply
friend ICOORD &operator*=(ICOORD &, TDimension);
/// divide
friend ICOORD operator/(const ICOORD &, TDimension);
/// divide
friend ICOORD &operator/=(ICOORD &, TDimension);
/// rotate
///@param vec by vector
void rotate(const FCOORD &vec);
/// Setup for iterating over the pixels in a vector by the well-known
/// Bresenham rendering algorithm.
/// Starting with major/2 in the accumulator, on each step move by
/// major_step, and then add minor to the accumulator. When
/// accumulator >= major subtract major and also move by minor_step.
void setup_render(ICOORD *major_step, ICOORD *minor_step, int *major, int *minor) const;
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
protected:
TDimension xcoord; ///< x value
TDimension ycoord; ///< y value
};
class ICOORDELT : public ELIST_LINK,
public ICOORD
// embedded coord list
{
public:
/// empty constructor
ICOORDELT() = default;
/// constructor from ICOORD
ICOORDELT(ICOORD icoord) : ICOORD(icoord) {}
/// constructor
///@param xin x value
///@param yin y value
ICOORDELT(TDimension xin, TDimension yin) {
xcoord = xin;
ycoord = yin;
}
static ICOORDELT *deep_copy(const ICOORDELT *src) {
auto *elt = new ICOORDELT;
*elt = *src;
return elt;
}
};
ELISTIZEH(ICOORDELT)
class TESS_API FCOORD {
public:
/// empty constructor
FCOORD() = default;
/// constructor
///@param xvalue x value
///@param yvalue y value
FCOORD(float xvalue, float yvalue) {
xcoord = xvalue; // set coords
ycoord = yvalue;
}
FCOORD( // make from ICOORD
ICOORD icoord) { // coords to set
xcoord = icoord.xcoord;
ycoord = icoord.ycoord;
}
float x() const { // get coords
return xcoord;
}
float y() const {
return ycoord;
}
/// rewrite function
void set_x(float xin) {
xcoord = xin; // write new value
}
/// rewrite function
void set_y(float yin) { // value to set
ycoord = yin;
}
/// find sq length
float sqlength() const {
return xcoord * xcoord + ycoord * ycoord;
}
/// find length
float length() const {
return std::sqrt(sqlength());
}
/// sq dist between pts
float pt_to_pt_sqdist(const FCOORD &pt) const {
FCOORD gap;
gap.xcoord = xcoord - pt.xcoord;
gap.ycoord = ycoord - pt.ycoord;
return gap.sqlength();
}
/// Distance between pts
float pt_to_pt_dist(const FCOORD &pt) const {
return std::sqrt(pt_to_pt_sqdist(pt));
}
/// find angle
float angle() const {
return std::atan2(ycoord, xcoord);
}
// Returns the standard feature direction corresponding to this.
// See binary_angle_plus_pi below for a description of the direction.
uint8_t to_direction() const;
// Sets this with a unit vector in the given standard feature direction.
void from_direction(uint8_t direction);
// Converts an angle in radians (from ICOORD::angle or FCOORD::angle) to a
// standard feature direction as an unsigned angle in 256ths of a circle
// measured anticlockwise from (-1, 0).
static uint8_t binary_angle_plus_pi(double angle);
// Inverse of binary_angle_plus_pi returns an angle in radians for the
// given standard feature direction.
static double angle_from_direction(uint8_t direction);
// Returns the point on the given line nearest to this, ie the point such
// that the vector point->this is perpendicular to the line.
// The line is defined as a line_point and a dir_vector for its direction.
// dir_vector need not be a unit vector.
FCOORD nearest_pt_on_line(const FCOORD &line_point, const FCOORD &dir_vector) const;
/// Convert to unit vec
bool normalise();
/// test equality
bool operator==(const FCOORD &other) const {
return xcoord == other.xcoord && ycoord == other.ycoord;
}
/// test inequality
bool operator!=(const FCOORD &other) const {
return xcoord != other.xcoord || ycoord != other.ycoord;
}
/// rotate 90 deg anti
friend FCOORD operator!(const FCOORD &);
/// unary minus
friend FCOORD operator-(const FCOORD &);
/// add
friend FCOORD operator+(const FCOORD &, const FCOORD &);
/// add
friend FCOORD &operator+=(FCOORD &, const FCOORD &);
/// subtract
friend FCOORD operator-(const FCOORD &, const FCOORD &);
/// subtract
friend FCOORD &operator-=(FCOORD &, const FCOORD &);
/// scalar product
friend float operator%(const FCOORD &, const FCOORD &);
/// cross product
friend float operator*(const FCOORD &, const FCOORD &);
/// multiply
friend FCOORD operator*(const FCOORD &, float);
/// multiply
friend FCOORD operator*(float, const FCOORD &);
/// multiply
friend FCOORD &operator*=(FCOORD &, float);
/// divide
friend FCOORD operator/(const FCOORD &, float);
/// rotate
///@param vec by vector
void rotate(const FCOORD vec);
// unrotate - undo a rotate(vec)
// @param vec by vector
void unrotate(const FCOORD &vec);
/// divide
friend FCOORD &operator/=(FCOORD &, float);
private:
float xcoord; // 2 floating coords
float ycoord;
};
/**********************************************************************
* operator!
*
* Rotate an ICOORD 90 degrees anticlockwise.
**********************************************************************/
inline ICOORD operator!( // rotate 90 deg anti
const ICOORD &src // thing to rotate
) {
ICOORD result; // output
result.xcoord = -src.ycoord;
result.ycoord = src.xcoord;
return result;
}
/**********************************************************************
* operator-
*
* Unary minus of an ICOORD.
**********************************************************************/
inline ICOORD operator-( // unary minus
const ICOORD &src // thing to minus
) {
ICOORD result; // output
result.xcoord = -src.xcoord;
result.ycoord = -src.ycoord;
return result;
}
/**********************************************************************
* operator+
*
* Add 2 ICOORDS.
**********************************************************************/
inline ICOORD operator+( // sum vectors
const ICOORD &op1, // operands
const ICOORD &op2) {
ICOORD sum; // result
sum.xcoord = op1.xcoord + op2.xcoord;
sum.ycoord = op1.ycoord + op2.ycoord;
return sum;
}
/**********************************************************************
* operator+=
*
* Add 2 ICOORDS.
**********************************************************************/
inline ICOORD &operator+=( // sum vectors
ICOORD &op1, // operands
const ICOORD &op2) {
op1.xcoord += op2.xcoord;
op1.ycoord += op2.ycoord;
return op1;
}
/**********************************************************************
* operator-
*
* Subtract 2 ICOORDS.
**********************************************************************/
inline ICOORD operator-( // subtract vectors
const ICOORD &op1, // operands
const ICOORD &op2) {
ICOORD sum; // result
sum.xcoord = op1.xcoord - op2.xcoord;
sum.ycoord = op1.ycoord - op2.ycoord;
return sum;
}
/**********************************************************************
* operator-=
*
* Subtract 2 ICOORDS.
**********************************************************************/
inline ICOORD &operator-=( // subtract vectors
ICOORD &op1, // operands
const ICOORD &op2) {
op1.xcoord -= op2.xcoord;
op1.ycoord -= op2.ycoord;
return op1;
}
/**********************************************************************
* operator%
*
* Scalar product of 2 ICOORDS.
**********************************************************************/
inline int32_t operator%( // scalar product
const ICOORD &op1, // operands
const ICOORD &op2) {
return op1.xcoord * op2.xcoord + op1.ycoord * op2.ycoord;
}
/**********************************************************************
* operator*
*
* Cross product of 2 ICOORDS.
**********************************************************************/
inline int32_t operator*( // cross product
const ICOORD &op1, // operands
const ICOORD &op2) {
return op1.xcoord * op2.ycoord - op1.ycoord * op2.xcoord;
}
/**********************************************************************
* operator*
*
* Scalar multiply of an ICOORD.
**********************************************************************/
inline ICOORD operator*( // scalar multiply
const ICOORD &op1, // operands
TDimension scale) {
ICOORD result; // output
result.xcoord = op1.xcoord * scale;
result.ycoord = op1.ycoord * scale;
return result;
}
inline ICOORD operator*( // scalar multiply
TDimension scale,
const ICOORD &op1 // operands
) {
ICOORD result; // output
result.xcoord = op1.xcoord * scale;
result.ycoord = op1.ycoord * scale;
return result;
}
/**********************************************************************
* operator*=
*
* Scalar multiply of an ICOORD.
**********************************************************************/
inline ICOORD &operator*=( // scalar multiply
ICOORD &op1, // operands
TDimension scale) {
op1.xcoord *= scale;
op1.ycoord *= scale;
return op1;
}
/**********************************************************************
* operator/
*
* Scalar divide of an ICOORD.
**********************************************************************/
inline ICOORD operator/( // scalar divide
const ICOORD &op1, // operands
TDimension scale) {
ICOORD result; // output
result.xcoord = op1.xcoord / scale;
result.ycoord = op1.ycoord / scale;
return result;
}
/**********************************************************************
* operator/=
*
* Scalar divide of an ICOORD.
**********************************************************************/
inline ICOORD &operator/=( // scalar divide
ICOORD &op1, // operands
TDimension scale) {
op1.xcoord /= scale;
op1.ycoord /= scale;
return op1;
}
/**********************************************************************
* ICOORD::rotate
*
* Rotate an ICOORD by the given (normalized) (cos,sin) vector.
**********************************************************************/
inline void ICOORD::rotate( // rotate by vector
const FCOORD &vec) {
auto tmp = static_cast<TDimension>(std::floor(xcoord * vec.x() - ycoord * vec.y() + 0.5f));
ycoord = static_cast<TDimension>(std::floor(ycoord * vec.x() + xcoord * vec.y() + 0.5f));
xcoord = tmp;
}
/**********************************************************************
* operator!
*
* Rotate an FCOORD 90 degrees anticlockwise.
**********************************************************************/
inline FCOORD operator!( // rotate 90 deg anti
const FCOORD &src // thing to rotate
) {
FCOORD result; // output
result.xcoord = -src.ycoord;
result.ycoord = src.xcoord;
return result;
}
/**********************************************************************
* operator-
*
* Unary minus of an FCOORD.
**********************************************************************/
inline FCOORD operator-( // unary minus
const FCOORD &src // thing to minus
) {
FCOORD result; // output
result.xcoord = -src.xcoord;
result.ycoord = -src.ycoord;
return result;
}
/**********************************************************************
* operator+
*
* Add 2 FCOORDS.
**********************************************************************/
inline FCOORD operator+( // sum vectors
const FCOORD &op1, // operands
const FCOORD &op2) {
FCOORD sum; // result
sum.xcoord = op1.xcoord + op2.xcoord;
sum.ycoord = op1.ycoord + op2.ycoord;
return sum;
}
/**********************************************************************
* operator+=
*
* Add 2 FCOORDS.
**********************************************************************/
inline FCOORD &operator+=( // sum vectors
FCOORD &op1, // operands
const FCOORD &op2) {
op1.xcoord += op2.xcoord;
op1.ycoord += op2.ycoord;
return op1;
}
/**********************************************************************
* operator-
*
* Subtract 2 FCOORDS.
**********************************************************************/
inline FCOORD operator-( // subtract vectors
const FCOORD &op1, // operands
const FCOORD &op2) {
FCOORD sum; // result
sum.xcoord = op1.xcoord - op2.xcoord;
sum.ycoord = op1.ycoord - op2.ycoord;
return sum;
}
/**********************************************************************
* operator-=
*
* Subtract 2 FCOORDS.
**********************************************************************/
inline FCOORD &operator-=( // subtract vectors
FCOORD &op1, // operands
const FCOORD &op2) {
op1.xcoord -= op2.xcoord;
op1.ycoord -= op2.ycoord;
return op1;
}
/**********************************************************************
* operator%
*
* Scalar product of 2 FCOORDS.
**********************************************************************/
inline float operator%( // scalar product
const FCOORD &op1, // operands
const FCOORD &op2) {
return op1.xcoord * op2.xcoord + op1.ycoord * op2.ycoord;
}
/**********************************************************************
* operator*
*
* Cross product of 2 FCOORDS.
**********************************************************************/
inline float operator*( // cross product
const FCOORD &op1, // operands
const FCOORD &op2) {
return op1.xcoord * op2.ycoord - op1.ycoord * op2.xcoord;
}
/**********************************************************************
* operator*
*
* Scalar multiply of an FCOORD.
**********************************************************************/
inline FCOORD operator*( // scalar multiply
const FCOORD &op1, // operands
float scale) {
FCOORD result; // output
result.xcoord = op1.xcoord * scale;
result.ycoord = op1.ycoord * scale;
return result;
}
inline FCOORD operator*( // scalar multiply
float scale,
const FCOORD &op1 // operands
) {
FCOORD result; // output
result.xcoord = op1.xcoord * scale;
result.ycoord = op1.ycoord * scale;
return result;
}
/**********************************************************************
* operator*=
*
* Scalar multiply of an FCOORD.
**********************************************************************/
inline FCOORD &operator*=( // scalar multiply
FCOORD &op1, // operands
float scale) {
op1.xcoord *= scale;
op1.ycoord *= scale;
return op1;
}
/**********************************************************************
* operator/
*
* Scalar divide of an FCOORD.
**********************************************************************/
inline FCOORD operator/( // scalar divide
const FCOORD &op1, // operands
float scale) {
FCOORD result; // output
ASSERT_HOST(scale != 0.0f);
result.xcoord = op1.xcoord / scale;
result.ycoord = op1.ycoord / scale;
return result;
}
/**********************************************************************
* operator/=
*
* Scalar divide of an FCOORD.
**********************************************************************/
inline FCOORD &operator/=( // scalar divide
FCOORD &op1, // operands
float scale) {
ASSERT_HOST(scale != 0.0f);
op1.xcoord /= scale;
op1.ycoord /= scale;
return op1;
}
/**********************************************************************
* rotate
*
* Rotate an FCOORD by the given (normalized) (cos,sin) vector.
**********************************************************************/
inline void FCOORD::rotate( // rotate by vector
const FCOORD vec) {
float tmp;
tmp = xcoord * vec.x() - ycoord * vec.y();
ycoord = ycoord * vec.x() + xcoord * vec.y();
xcoord = tmp;
}
inline void FCOORD::unrotate(const FCOORD &vec) {
rotate(FCOORD(vec.x(), -vec.y()));
}
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/points.h
|
C++
|
apache-2.0
| 19,735
|
/**********************************************************************
* File: polyaprx.cpp
* Description: Code for polygonal approximation from old edgeprog.
* Author: Ray Smith
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "polyaprx.h"
#include "blobs.h" // for EDGEPT, TPOINT, VECTOR, TESSLINE
#include "coutln.h" // for C_OUTLINE
#include "errcode.h" // for ASSERT_HOST
#include "mod128.h" // for DIR128
#include "params.h" // for BoolParam, BOOL_VAR
#include "points.h" // for ICOORD
#include "rect.h" // for TBOX
#include "tprintf.h" // for tprintf
#include <cstdint> // for INT16_MAX, int8_t
namespace tesseract {
#define FASTEDGELENGTH 256
static BOOL_VAR(poly_debug, false, "Debug old poly");
static BOOL_VAR(poly_wide_objects_better, true,
"More accurate approx on wide things");
#define fixed_dist 20 // really an int_variable
#define approx_dist 15 // really an int_variable
const int par1 = 4500 / (approx_dist * approx_dist);
const int par2 = 6750 / (approx_dist * approx_dist);
/**********************************************************************
*cutline(first,last,area) straightens out a line by partitioning
*and joining the ends by a straight line*
**********************************************************************/
static void cutline( // recursive refine
EDGEPT *first, // ends of line
EDGEPT *last, int area // area of object
) {
EDGEPT *edge; // current edge
TPOINT vecsum; // vector sum
int vlen; // approx length of vecsum
TPOINT vec; // accumulated vector
EDGEPT *maxpoint; // worst point
int maxperp; // max deviation
int perp; // perp distance
int ptcount; // no of points
int squaresum; // sum of perps
edge = first; // start of line
if (edge->next == last) {
return; // simple line
}
// vector sum
vecsum.x = last->pos.x - edge->pos.x;
vecsum.y = last->pos.y - edge->pos.y;
if (vecsum.x == 0 && vecsum.y == 0) {
// special case
vecsum.x = -edge->prev->vec.x;
vecsum.y = -edge->prev->vec.y;
}
// absolute value
vlen = vecsum.x > 0 ? vecsum.x : -vecsum.x;
if (vecsum.y > vlen) {
vlen = vecsum.y; // maximum
} else if (-vecsum.y > vlen) {
vlen = -vecsum.y; // absolute value
}
vec.x = edge->vec.x; // accumulated vector
vec.y = edge->vec.y;
maxperp = 0; // none yet
squaresum = ptcount = 0;
edge = edge->next; // move to actual point
maxpoint = edge; // in case there isn't one
do {
perp = vec.cross(vecsum); // get perp distance
if (perp != 0) {
perp *= perp; // squared deviation
}
squaresum += perp; // sum squares
ptcount++; // count points
if (poly_debug) {
tprintf("Cutline:Final perp=%d\n", perp);
}
if (perp > maxperp) {
maxperp = perp;
maxpoint = edge; // find greatest deviation
}
vec.x += edge->vec.x; // accumulate vectors
vec.y += edge->vec.y;
edge = edge->next;
} while (edge != last); // test all line
perp = vecsum.length2();
ASSERT_HOST(perp != 0);
if (maxperp < 256 * INT16_MAX) {
maxperp <<= 8;
maxperp /= perp; // true max perp
} else {
maxperp /= perp;
maxperp <<= 8; // avoid overflow
}
if (squaresum < 256 * INT16_MAX) {
// mean squared perp
perp = (squaresum << 8) / (perp * ptcount);
} else {
// avoid overflow
perp = (squaresum / perp << 8) / ptcount;
}
if (poly_debug) {
tprintf("Cutline:A=%d, max=%.2f(%.2f%%), msd=%.2f(%.2f%%)\n", area,
maxperp / 256.0, maxperp * 200.0 / area, perp / 256.0,
perp * 300.0 / area);
}
if (maxperp * par1 >= 10 * area || perp * par2 >= 10 * area || vlen >= 126) {
maxpoint->fixed = true;
// partitions
cutline(first, maxpoint, area);
cutline(maxpoint, last, area);
}
}
/**********************************************************************
* edgesteps_to_edgepts
*
* Convert a C_OUTLINE to EDGEPTs.
**********************************************************************/
static EDGEPT *edgesteps_to_edgepts( // convert outline
C_OUTLINE *c_outline, // input
EDGEPT edgepts[] // output is array
) {
int32_t length; // steps in path
ICOORD pos; // current coords
int32_t stepindex; // current step
int32_t stepinc; // increment
int32_t epindex; // current EDGEPT
ICOORD vec; // for this 8 step
ICOORD prev_vec;
int8_t epdir; // of this step
DIR128 prevdir; // previous dir
DIR128 dir; // of this step
pos = c_outline->start_pos(); // start of loop
length = c_outline->pathlength();
stepindex = 0;
epindex = 0;
prevdir = -1;
// repeated steps
uint32_t count = 0;
int prev_stepindex = 0;
do {
dir = c_outline->step_dir(stepindex);
vec = c_outline->step(stepindex);
if (stepindex < length - 1 &&
c_outline->step_dir(stepindex + 1) - dir == -32) {
dir += 128 - 16;
vec += c_outline->step(stepindex + 1);
stepinc = 2;
} else {
stepinc = 1;
}
if (count == 0) {
prevdir = dir;
prev_vec = vec;
}
if (prevdir.get_dir() != dir.get_dir()) {
edgepts[epindex].pos.x = pos.x();
edgepts[epindex].pos.y = pos.y();
prev_vec *= count;
edgepts[epindex].vec.x = prev_vec.x();
edgepts[epindex].vec.y = prev_vec.y();
pos += prev_vec;
edgepts[epindex].runlength = count;
edgepts[epindex].prev = &edgepts[epindex - 1];
// TODO: reset is_hidden, too?
edgepts[epindex].fixed = false;
edgepts[epindex].next = &edgepts[epindex + 1];
prevdir += 64;
epdir = DIR128(0) - prevdir;
epdir >>= 4;
epdir &= 7;
edgepts[epindex].dir = epdir;
edgepts[epindex].src_outline = c_outline;
edgepts[epindex].start_step = prev_stepindex;
edgepts[epindex].step_count = stepindex - prev_stepindex;
epindex++;
prevdir = dir;
prev_vec = vec;
count = 1;
prev_stepindex = stepindex;
} else {
count++;
}
stepindex += stepinc;
} while (stepindex < length);
edgepts[epindex].pos.x = pos.x();
edgepts[epindex].pos.y = pos.y();
prev_vec *= count;
edgepts[epindex].vec.x = prev_vec.x();
edgepts[epindex].vec.y = prev_vec.y();
pos += prev_vec;
edgepts[epindex].runlength = count;
// TODO: reset is_hidden, too?
edgepts[epindex].fixed = false;
edgepts[epindex].src_outline = c_outline;
edgepts[epindex].start_step = prev_stepindex;
edgepts[epindex].step_count = stepindex - prev_stepindex;
edgepts[epindex].prev = &edgepts[epindex - 1];
edgepts[epindex].next = &edgepts[0];
prevdir += 64;
epdir = DIR128(0) - prevdir;
epdir >>= 4;
epdir &= 7;
edgepts[epindex].dir = epdir;
edgepts[0].prev = &edgepts[epindex];
ASSERT_HOST(pos.x() == c_outline->start_pos().x() &&
pos.y() == c_outline->start_pos().y());
return &edgepts[0];
}
/**********************************************************************
*fix2(start,area) fixes points on the outline according to a trial method*
**********************************************************************/
static void fix2( // polygonal approx
EDGEPT *start, // loop to approximate
int area) {
EDGEPT *edgept; // current point
EDGEPT *edgept1;
EDGEPT *loopstart; // modified start of loop
EDGEPT *linestart; // start of line segment
int fixed_count; // no of fixed points
int8_t dir;
int d01, d12, d23, gapmin;
TPOINT d01vec, d12vec, d23vec;
EDGEPT *edgefix, *startfix;
EDGEPT *edgefix0, *edgefix1, *edgefix2, *edgefix3;
edgept = start; // start of loop
while (((edgept->dir - edgept->prev->dir + 1) & 7) < 3 &&
(dir = (edgept->prev->dir - edgept->next->dir) & 7) != 2 && dir != 6) {
edgept = edgept->next; // find suitable start
}
loopstart = edgept; // remember start
// completed flag
bool stopped = false;
edgept->fixed = true; // fix it
do {
linestart = edgept; // possible start of line
auto dir1 = edgept->dir; // first direction
// length of dir1
auto sum1 = edgept->runlength;
edgept = edgept->next;
auto dir2 = edgept->dir; // 2nd direction
// length in dir2
auto sum2 = edgept->runlength;
if (((dir1 - dir2 + 1) & 7) < 3) {
while (edgept->prev->dir == edgept->next->dir) {
edgept = edgept->next; // look at next
if (edgept->dir == dir1) {
// sum lengths
sum1 += edgept->runlength;
} else {
sum2 += edgept->runlength;
}
}
if (edgept == loopstart) {
// finished
stopped = true;
}
if (sum2 + sum1 > 2 && linestart->prev->dir == dir2 &&
(linestart->prev->runlength > linestart->runlength || sum2 > sum1)) {
// start is back one
linestart = linestart->prev;
linestart->fixed = true;
}
if (((edgept->next->dir - edgept->dir + 1) & 7) >= 3 ||
(edgept->dir == dir1 && sum1 >= sum2) ||
((edgept->prev->runlength < edgept->runlength ||
(edgept->dir == dir2 && sum2 >= sum1)) &&
linestart->next != edgept)) {
edgept = edgept->next;
}
}
// sharp bend
edgept->fixed = true;
}
// do whole loop
while (edgept != loopstart && !stopped);
edgept = start;
do {
if (((edgept->runlength >= 8) && (edgept->dir != 2) &&
(edgept->dir != 6)) ||
((edgept->runlength >= 8) &&
((edgept->dir == 2) || (edgept->dir == 6)))) {
edgept->fixed = true;
edgept1 = edgept->next;
edgept1->fixed = true;
}
edgept = edgept->next;
} while (edgept != start);
edgept = start;
do {
// single fixed step
if (edgept->fixed &&
edgept->runlength == 1
// and neighbours free
&& edgept->next->fixed &&
!edgept->prev->fixed
// same pair of dirs
&& !edgept->next->next->fixed &&
edgept->prev->dir == edgept->next->dir &&
edgept->prev->prev->dir == edgept->next->next->dir &&
((edgept->prev->dir - edgept->dir + 1) & 7) < 3) {
// unfix it
edgept->fixed = false;
edgept->next->fixed = false;
}
edgept = edgept->next; // do all points
} while (edgept != start); // until finished
stopped = false;
if (area < 450) {
area = 450;
}
gapmin = area * fixed_dist * fixed_dist / 44000;
edgept = start;
fixed_count = 0;
do {
if (edgept->fixed) {
fixed_count++;
}
edgept = edgept->next;
} while (edgept != start);
while (!edgept->fixed) {
edgept = edgept->next;
}
edgefix0 = edgept;
edgept = edgept->next;
while (!edgept->fixed) {
edgept = edgept->next;
}
edgefix1 = edgept;
edgept = edgept->next;
while (!edgept->fixed) {
edgept = edgept->next;
}
edgefix2 = edgept;
edgept = edgept->next;
while (!edgept->fixed) {
edgept = edgept->next;
}
edgefix3 = edgept;
startfix = edgefix2;
do {
if (fixed_count <= 3) {
break; // already too few
}
d12vec.diff(edgefix1->pos, edgefix2->pos);
d12 = d12vec.length2();
// TODO(rays) investigate this change:
// Only unfix a point if it is part of a low-curvature section
// of outline and the total angle change of the outlines is
// less than 90 degrees, ie the scalar product is positive.
// if (d12 <= gapmin && edgefix0->vec.dot(edgefix2->vec) > 0) {
if (d12 <= gapmin) {
d01vec.diff(edgefix0->pos, edgefix1->pos);
d01 = d01vec.length2();
d23vec.diff(edgefix2->pos, edgefix3->pos);
d23 = d23vec.length2();
if (d01 > d23) {
edgefix2->fixed = false;
fixed_count--;
} else {
edgefix1->fixed = false;
fixed_count--;
edgefix1 = edgefix2;
}
} else {
edgefix0 = edgefix1;
edgefix1 = edgefix2;
}
edgefix2 = edgefix3;
edgept = edgept->next;
while (!edgept->fixed) {
if (edgept == startfix) {
stopped = true;
}
edgept = edgept->next;
}
edgefix3 = edgept;
edgefix = edgefix2;
} while ((edgefix != startfix) && (!stopped));
}
/**********************************************************************
*poly2(startpt,area,path) applies a second approximation to the outline
*using the points which have been fixed by the first approximation*
**********************************************************************/
static EDGEPT *poly2( // second poly
EDGEPT *startpt, // start of loop
int area // area of blob box
) {
EDGEPT *edgept; // current outline point
EDGEPT *loopstart; // starting point
EDGEPT *linestart; // start of line
int edgesum; // correction count
if (area < 1200) {
area = 1200; // minimum value
}
loopstart = nullptr; // not found it yet
edgept = startpt; // start of loop
do {
// current point fixed and next not
if (edgept->fixed && !edgept->next->fixed) {
loopstart = edgept; // start of repoly
break;
}
edgept = edgept->next; // next point
} while (edgept != startpt); // until found or finished
if (loopstart == nullptr && !startpt->fixed) {
// fixed start of loop
startpt->fixed = true;
loopstart = startpt; // or start of loop
}
if (loopstart) {
do {
edgept = loopstart; // first to do
do {
linestart = edgept;
edgesum = 0; // sum of lengths
do {
// sum lengths
edgesum += edgept->runlength;
edgept = edgept->next; // move on
} while (!edgept->fixed && edgept != loopstart && edgesum < 126);
if (poly_debug) {
tprintf("Poly2:starting at (%d,%d)+%d=(%d,%d),%d to (%d,%d)\n",
linestart->pos.x, linestart->pos.y, linestart->dir,
linestart->vec.x, linestart->vec.y, edgesum, edgept->pos.x,
edgept->pos.y);
}
// reapproximate
cutline(linestart, edgept, area);
while (edgept->next->fixed && edgept != loopstart) {
edgept = edgept->next; // look for next non-fixed
}
}
// do all the loop
while (edgept != loopstart);
edgesum = 0;
do {
if (edgept->fixed) {
edgesum++;
}
edgept = edgept->next;
}
// count fixed pts
while (edgept != loopstart);
if (edgesum < 3) {
area /= 2; // must have 3 pts
}
} while (edgesum < 3);
do {
linestart = edgept;
do {
edgept = edgept->next;
} while (!edgept->fixed);
linestart->next = edgept;
edgept->prev = linestart;
linestart->vec.x = edgept->pos.x - linestart->pos.x;
linestart->vec.y = edgept->pos.y - linestart->pos.y;
} while (edgept != loopstart);
} else {
edgept = startpt; // start of loop
}
loopstart = edgept; // new start
return loopstart; // correct exit
}
/**********************************************************************
* tesspoly_outline
*
* Approximate an outline from chain codes form using the old tess algorithm.
* If allow_detailed_fx is true, the EDGEPTs in the returned TBLOB
* contain pointers to the input C_OUTLINEs that enable higher-resolution
* feature extraction that does not use the polygonal approximation.
**********************************************************************/
TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline) {
EDGEPT stack_edgepts[FASTEDGELENGTH]; // converted path
EDGEPT *edgepts = stack_edgepts;
// Use heap memory if the stack buffer is not big enough.
if (c_outline->pathlength() > FASTEDGELENGTH) {
edgepts = new EDGEPT[c_outline->pathlength()];
}
// bounding box
const auto &loop_box = c_outline->bounding_box();
int32_t area = loop_box.height();
if (!poly_wide_objects_better && loop_box.width() > area) {
area = loop_box.width();
}
area *= area;
edgesteps_to_edgepts(c_outline, edgepts);
fix2(edgepts, area);
EDGEPT *edgept = poly2(edgepts, area); // 2nd approximation.
EDGEPT *startpt = edgept;
EDGEPT *result = nullptr;
EDGEPT *prev_result = nullptr;
do {
auto *new_pt = new EDGEPT;
new_pt->pos = edgept->pos;
new_pt->prev = prev_result;
if (prev_result == nullptr) {
result = new_pt;
} else {
prev_result->next = new_pt;
new_pt->prev = prev_result;
}
if (allow_detailed_fx) {
new_pt->src_outline = edgept->src_outline;
new_pt->start_step = edgept->start_step;
new_pt->step_count = edgept->step_count;
}
prev_result = new_pt;
edgept = edgept->next;
} while (edgept != startpt);
prev_result->next = result;
result->prev = prev_result;
if (edgepts != stack_edgepts) {
delete[] edgepts;
}
return TESSLINE::BuildFromOutlineList(result);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/polyaprx.cpp
|
C++
|
apache-2.0
| 17,535
|
/**********************************************************************
* File: polyaprx.h
* Description: Code for polygonal approximation from old edgeprog.
* Author: Ray Smith
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef POLYAPRX_H
#define POLYAPRX_H
namespace tesseract {
class C_OUTLINE;
struct TESSLINE;
// convert a chain-coded input to the old OUTLINE approximation
TESSLINE *ApproximateOutline(bool allow_detailed_fx, C_OUTLINE *c_outline);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/polyaprx.h
|
C++
|
apache-2.0
| 1,142
|
/**********************************************************************
* File: polyblk.cpp (Formerly poly_block.c)
* Description: Polygonal blocks
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "polyblk.h"
#include "elst.h"
#include <cctype>
#include <cinttypes> // PRId32
#include <cmath>
#include <cstdio>
#include <memory> // std::unique_ptr
namespace tesseract {
#define INTERSECTING INT16_MAX
int lessthan(const void *first, const void *second);
POLY_BLOCK::POLY_BLOCK(ICOORDELT_LIST *points, PolyBlockType t) {
ICOORDELT_IT v = &vertices;
vertices.clear();
v.move_to_first();
v.add_list_before(points);
compute_bb();
type = t;
}
// Initialize from box coordinates.
POLY_BLOCK::POLY_BLOCK(const TBOX &tbox, PolyBlockType t) {
vertices.clear();
ICOORDELT_IT v = &vertices;
v.move_to_first();
v.add_to_end(new ICOORDELT(tbox.left(), tbox.top()));
v.add_to_end(new ICOORDELT(tbox.left(), tbox.bottom()));
v.add_to_end(new ICOORDELT(tbox.right(), tbox.bottom()));
v.add_to_end(new ICOORDELT(tbox.right(), tbox.top()));
compute_bb();
type = t;
}
/**
* @name POLY_BLOCK::compute_bb
*
* Compute the bounding box from the outline points.
*/
void POLY_BLOCK::compute_bb() { // constructor
ICOORD ibl, itr; // integer bb
ICOORD botleft; // bounding box
ICOORD topright;
ICOORD pos; // current pos;
ICOORDELT_IT pts = &vertices; // iterator
botleft = *pts.data();
topright = botleft;
do {
pos = *pts.data();
if (pos.x() < botleft.x()) {
// get bounding box
botleft = ICOORD(pos.x(), botleft.y());
}
if (pos.y() < botleft.y()) {
botleft = ICOORD(botleft.x(), pos.y());
}
if (pos.x() > topright.x()) {
topright = ICOORD(pos.x(), topright.y());
}
if (pos.y() > topright.y()) {
topright = ICOORD(topright.x(), pos.y());
}
pts.forward();
} while (!pts.at_first());
ibl = ICOORD(botleft.x(), botleft.y());
itr = ICOORD(topright.x(), topright.y());
box = TBOX(ibl, itr);
}
/**
* @name POLY_BLOCK::winding_number
*
* Return the winding number of the outline around the given point.
* @param point point to wind around
*/
int16_t POLY_BLOCK::winding_number(const ICOORD &point) {
int16_t count; // winding count
ICOORD pt; // current point
ICOORD vec; // point to current point
ICOORD vvec; // current point to next point
int32_t cross; // cross product
ICOORDELT_IT it = &vertices; // iterator
count = 0;
do {
pt = *it.data();
vec = pt - point;
vvec = *it.data_relative(1) - pt;
// crossing the line
if (vec.y() <= 0 && vec.y() + vvec.y() > 0) {
cross = vec * vvec; // cross product
if (cross > 0) {
count++; // crossing right half
} else if (cross == 0) {
return INTERSECTING; // going through point
}
} else if (vec.y() > 0 && vec.y() + vvec.y() <= 0) {
cross = vec * vvec;
if (cross < 0) {
count--; // crossing back
} else if (cross == 0) {
return INTERSECTING; // illegal
}
} else if (vec.y() == 0 && vec.x() == 0) {
return INTERSECTING;
}
it.forward();
} while (!it.at_first());
return count; // winding number
}
/// @return true if other is inside this.
bool POLY_BLOCK::contains(POLY_BLOCK *other) {
int16_t count; // winding count
ICOORDELT_IT it = &vertices; // iterator
ICOORD vertex;
if (!box.overlap(*(other->bounding_box()))) {
return false; // can't be contained
}
/* check that no vertex of this is inside other */
do {
vertex = *it.data();
// get winding number
count = other->winding_number(vertex);
if (count != INTERSECTING) {
if (count != 0) {
return false;
}
}
it.forward();
} while (!it.at_first());
/* check that all vertices of other are inside this */
// switch lists
it.set_to_list(other->points());
do {
vertex = *it.data();
// try other way round
count = winding_number(vertex);
if (count != INTERSECTING) {
if (count == 0) {
return false;
}
}
it.forward();
} while (!it.at_first());
return true;
}
/**
* @name POLY_BLOCK::rotate
*
* Rotate the POLY_BLOCK.
* @param rotation cos, sin of angle
*/
void POLY_BLOCK::rotate(FCOORD rotation) {
FCOORD pos; // current pos;
ICOORDELT *pt; // current point
ICOORDELT_IT pts = &vertices; // iterator
do {
pt = pts.data();
pos.set_x(pt->x());
pos.set_y(pt->y());
pos.rotate(rotation);
pt->set_x(static_cast<TDimension>(floor(pos.x() + 0.5)));
pt->set_y(static_cast<TDimension>(floor(pos.y() + 0.5)));
pts.forward();
} while (!pts.at_first());
compute_bb();
}
/**
* @name POLY_BLOCK::reflect_in_y_axis
*
* Reflect the coords of the polygon in the y-axis. (Flip the sign of x.)
*/
void POLY_BLOCK::reflect_in_y_axis() {
ICOORDELT *pt; // current point
ICOORDELT_IT pts = &vertices; // Iterator.
do {
pt = pts.data();
pt->set_x(-pt->x());
pts.forward();
} while (!pts.at_first());
compute_bb();
}
/**
* POLY_BLOCK::move
*
* Move the POLY_BLOCK.
* @param shift x,y translation vector
*/
void POLY_BLOCK::move(ICOORD shift) {
ICOORDELT *pt; // current point
ICOORDELT_IT pts = &vertices; // iterator
do {
pt = pts.data();
*pt += shift;
pts.forward();
} while (!pts.at_first());
compute_bb();
}
#ifndef GRAPHICS_DISABLED
void POLY_BLOCK::plot(ScrollView *window, int32_t num) {
ICOORDELT_IT v = &vertices;
window->Pen(ColorForPolyBlockType(type));
v.move_to_first();
if (num > 0) {
window->TextAttributes("Times", 80, false, false, false);
char temp_buff[34];
# if !defined(_WIN32) || defined(__MINGW32__)
snprintf(temp_buff, sizeof(temp_buff), "%" PRId32, num);
# else
_ltoa(num, temp_buff, 10);
# endif
window->Text(v.data()->x(), v.data()->y(), temp_buff);
}
window->SetCursor(v.data()->x(), v.data()->y());
for (v.mark_cycle_pt(); !v.cycled_list(); v.forward()) {
window->DrawTo(v.data()->x(), v.data()->y());
}
v.move_to_first();
window->DrawTo(v.data()->x(), v.data()->y());
}
void POLY_BLOCK::fill(ScrollView *window, ScrollView::Color colour) {
ICOORDELT_IT s_it;
std::unique_ptr<PB_LINE_IT> lines(new PB_LINE_IT(this));
window->Pen(colour);
for (auto y = this->bounding_box()->bottom(); y <= this->bounding_box()->top(); y++) {
const std::unique_ptr</*non-const*/ ICOORDELT_LIST> segments(lines->get_line(y));
if (!segments->empty()) {
s_it.set_to_list(segments.get());
for (s_it.mark_cycle_pt(); !s_it.cycled_list(); s_it.forward()) {
// Note different use of ICOORDELT, x coord is x coord of pixel
// at the start of line segment, y coord is length of line segment
// Last pixel is start pixel + length.
auto width = s_it.data()->y();
window->SetCursor(s_it.data()->x(), y);
window->DrawTo(s_it.data()->x() + static_cast<float>(width), y);
}
}
}
}
#endif
/// @return true if the polygons of other and this overlap.
bool POLY_BLOCK::overlap(POLY_BLOCK *other) {
int16_t count; // winding count
ICOORDELT_IT it = &vertices; // iterator
ICOORD vertex;
if (!box.overlap(*(other->bounding_box()))) {
return false; // can't be any overlap.
}
/* see if a vertex of this is inside other */
do {
vertex = *it.data();
// get winding number
count = other->winding_number(vertex);
if (count != INTERSECTING) {
if (count != 0) {
return true;
}
}
it.forward();
} while (!it.at_first());
/* see if a vertex of other is inside this */
// switch lists
it.set_to_list(other->points());
do {
vertex = *it.data();
// try other way round
count = winding_number(vertex);
if (count != INTERSECTING) {
if (count != 0) {
return true;
}
}
it.forward();
} while (!it.at_first());
return false;
}
ICOORDELT_LIST *PB_LINE_IT::get_line(TDimension y) {
ICOORDELT_IT v, r;
ICOORDELT_LIST *result;
ICOORDELT *x, *current, *previous;
float fy = y + 0.5f;
result = new ICOORDELT_LIST();
r.set_to_list(result);
v.set_to_list(block->points());
for (v.mark_cycle_pt(); !v.cycled_list(); v.forward()) {
if (((v.data_relative(-1)->y() > y) && (v.data()->y() <= y)) ||
((v.data_relative(-1)->y() <= y) && (v.data()->y() > y))) {
previous = v.data_relative(-1);
current = v.data();
float fx =
0.5f + previous->x() +
(current->x() - previous->x()) * (fy - previous->y()) / (current->y() - previous->y());
x = new ICOORDELT(static_cast<TDimension>(fx), 0);
r.add_to_end(x);
}
}
if (!r.empty()) {
r.sort(lessthan);
for (r.mark_cycle_pt(); !r.cycled_list(); r.forward()) {
x = r.data();
}
for (r.mark_cycle_pt(); !r.cycled_list(); r.forward()) {
r.data()->set_y(r.data_relative(1)->x() - r.data()->x());
r.forward();
delete (r.extract());
}
}
return result;
}
int lessthan(const void *first, const void *second) {
const ICOORDELT *p1 = *reinterpret_cast<const ICOORDELT *const *>(first);
const ICOORDELT *p2 = *reinterpret_cast<const ICOORDELT *const *>(second);
if (p1->x() < p2->x()) {
return (-1);
} else if (p1->x() > p2->x()) {
return (1);
} else {
return (0);
}
}
#ifndef GRAPHICS_DISABLED
/// Returns a color to draw the given type.
ScrollView::Color POLY_BLOCK::ColorForPolyBlockType(PolyBlockType type) {
// Keep kPBColors in sync with PolyBlockType.
const ScrollView::Color kPBColors[PT_COUNT] = {
ScrollView::WHITE, // Type is not yet known. Keep as the 1st element.
ScrollView::BLUE, // Text that lives inside a column.
ScrollView::CYAN, // Text that spans more than one column.
ScrollView::MEDIUM_BLUE, // Text that is in a cross-column pull-out
// region.
ScrollView::AQUAMARINE, // Partition belonging to an equation region.
ScrollView::SKY_BLUE, // Partition belonging to an inline equation
// region.
ScrollView::MAGENTA, // Partition belonging to a table region.
ScrollView::GREEN, // Text-line runs vertically.
ScrollView::LIGHT_BLUE, // Text that belongs to an image.
ScrollView::RED, // Image that lives inside a column.
ScrollView::YELLOW, // Image that spans more than one column.
ScrollView::ORANGE, // Image in a cross-column pull-out region.
ScrollView::BROWN, // Horizontal Line.
ScrollView::DARK_GREEN, // Vertical Line.
ScrollView::GREY // Lies outside of any column.
};
if (type < PT_COUNT) {
return kPBColors[type];
}
return ScrollView::WHITE;
}
#endif // !GRAPHICS_DISABLED
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/polyblk.cpp
|
C++
|
apache-2.0
| 11,834
|
/**********************************************************************
* File: polyblk.h (Formerly poly_block.h)
* Description: Polygonal blocks
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef POLYBLK_H
#define POLYBLK_H
#include "elst.h"
#include "points.h"
#include "rect.h"
#include "scrollview.h"
#include <tesseract/publictypes.h>
namespace tesseract {
class TESS_API POLY_BLOCK {
public:
POLY_BLOCK() = default;
// Initialize from box coordinates.
POLY_BLOCK(const TBOX &tbox, PolyBlockType type);
POLY_BLOCK(ICOORDELT_LIST *points, PolyBlockType type);
~POLY_BLOCK() = default;
TBOX *bounding_box() { // access function
return &box;
}
ICOORDELT_LIST *points() { // access function
return &vertices;
}
void compute_bb();
PolyBlockType isA() const {
return type;
}
bool IsText() const {
return PTIsTextType(type);
}
// Rotate about the origin by the given rotation. (Analogous to
// multiplying by a complex number.
void rotate(FCOORD rotation);
// Reflect the coords of the polygon in the y-axis. (Flip the sign of x.)
void reflect_in_y_axis();
// Move by adding shift to all coordinates.
void move(ICOORD shift);
#ifndef GRAPHICS_DISABLED
void plot(ScrollView *window, int32_t num);
void fill(ScrollView *window, ScrollView::Color colour);
#endif // !GRAPHICS_DISABLED
// Returns true if other is inside this.
bool contains(POLY_BLOCK *other);
// Returns true if the polygons of other and this overlap.
bool overlap(POLY_BLOCK *other);
// Returns the winding number of this around the test_pt.
// Positive for anticlockwise, negative for clockwise, and zero for
// test_pt outside this.
int16_t winding_number(const ICOORD &test_pt);
#ifndef GRAPHICS_DISABLED
// Static utility functions to handle the PolyBlockType.
// Returns a color to draw the given type.
static ScrollView::Color ColorForPolyBlockType(PolyBlockType type);
#endif // !GRAPHICS_DISABLED
private:
ICOORDELT_LIST vertices; // vertices
TBOX box; // bounding box
PolyBlockType type; // Type of this region.
};
// Class to iterate the scanlines of a polygon.
class PB_LINE_IT {
public:
PB_LINE_IT(POLY_BLOCK *blkptr) {
block = blkptr;
}
void set_to_block(POLY_BLOCK *blkptr) {
block = blkptr;
}
// Returns a list of runs of pixels for the given y coord.
// Each element of the returned list is the start (x) and extent(y) of
// a run inside the region.
// Delete the returned list after use.
ICOORDELT_LIST *get_line(TDimension y);
private:
POLY_BLOCK *block;
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/polyblk.h
|
C++
|
apache-2.0
| 3,278
|
/**********************************************************************
* File: quadlsq.cpp (Formerly qlsq.c)
* Description: Code for least squares approximation of quadratics.
* Author: Ray Smith
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "quadlsq.h"
#include "tprintf.h"
#include <cmath>
#include <cstdio>
namespace tesseract {
// Minimum variance in least squares before backing off to a lower degree.
const long double kMinVariance = 1.0L / 1024;
/**********************************************************************
* QLSQ::clear
*
* Function to initialize a QLSQ.
**********************************************************************/
void QLSQ::clear() { // initialize
a = 0.0;
b = 0.0;
c = 0.0;
n = 0; // No elements.
sigx = 0.0; // Zero accumulators.
sigy = 0.0;
sigxx = 0.0;
sigxy = 0.0;
sigyy = 0.0;
sigxxx = 0.0;
sigxxy = 0.0;
sigxxxx = 0.0;
}
/**********************************************************************
* QLSQ::add
*
* Add an element to the accumulator.
**********************************************************************/
void QLSQ::add(double x, double y) {
n++; // Count elements.
sigx += x; // Update accumulators.
sigy += y;
sigxx += x * x;
sigxy += x * y;
sigyy += y * y;
sigxxx += static_cast<long double>(x) * x * x;
sigxxy += static_cast<long double>(x) * x * y;
sigxxxx += static_cast<long double>(x) * x * x * x;
}
/**********************************************************************
* QLSQ::remove
*
* Delete an element from the accumulator.
**********************************************************************/
void QLSQ::remove(double x, double y) {
if (n <= 0) {
tprintf("Can't remove an element from an empty QLSQ accumulator!\n");
return;
}
n--; // Count elements.
sigx -= x; // Update accumulators.
sigy -= y;
sigxx -= x * x;
sigxy -= x * y;
sigyy -= y * y;
sigxxx -= static_cast<long double>(x) * x * x;
sigxxy -= static_cast<long double>(x) * x * y;
sigxxxx -= static_cast<long double>(x) * x * x * x;
}
/**********************************************************************
* QLSQ::fit
*
* Fit the given degree of polynomial and store the result.
* This creates a quadratic of the form axx + bx + c, but limited to
* the given degree.
**********************************************************************/
void QLSQ::fit(int degree) {
long double x_variance =
static_cast<long double>(sigxx) * n - static_cast<long double>(sigx) * sigx;
// Note: for computational efficiency, we do not normalize the variance,
// covariance and cube variance here as they are in the same order in both
// nominators and denominators. However, we need be careful in value range
// check.
if (x_variance < kMinVariance * n * n || degree < 1 || n < 2) {
// We cannot calculate b reliably so forget a and b, and just work on c.
a = b = 0.0;
if (n >= 1 && degree >= 0) {
c = sigy / n;
} else {
c = 0.0;
}
return;
}
long double top96 = 0.0; // Accurate top.
long double bottom96 = 0.0; // Accurate bottom.
long double cubevar = sigxxx * n - static_cast<long double>(sigxx) * sigx;
long double covariance =
static_cast<long double>(sigxy) * n - static_cast<long double>(sigx) * sigy;
if (n >= 4 && degree >= 2) {
top96 = cubevar * covariance;
top96 += x_variance * (static_cast<long double>(sigxx) * sigy - sigxxy * n);
bottom96 = cubevar * cubevar;
bottom96 -= x_variance * (sigxxxx * n - static_cast<long double>(sigxx) * sigxx);
}
if (bottom96 >= kMinVariance * n * n * n * n) {
// Denominators looking good
a = top96 / bottom96;
top96 = covariance - cubevar * a;
b = top96 / x_variance;
} else {
// Forget a, and concentrate on b.
a = 0.0;
b = covariance / x_variance;
}
c = (sigy - a * sigxx - b * sigx) / n;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/quadlsq.cpp
|
C++
|
apache-2.0
| 4,588
|
/**********************************************************************
* File: quadlsq.h (Formerly qlsq.h)
* Description: Code for least squares approximation of quadratics.
* Author: Ray Smith
*
* (C) Copyright 1993, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef QUADLSQ_H
#define QUADLSQ_H
#include "points.h"
namespace tesseract {
class QLSQ {
public:
QLSQ() { // constructor
clear(); // set to zeros
}
void clear(); // initialize
void add( // add element
double x, // coords to add
double y);
void remove( // delete element
double x, // coords to delete
double y);
int32_t count() { // no of elements
return n;
}
void fit( // fit the given
int degree); // return actual
double get_a() const { // get x squard
return a;
}
double get_b() const { // get x squard
return b;
}
double get_c() const { // get x squard
return c;
}
private:
int32_t n; // no of elements
double a, b, c; // result
double sigx; // sum of x
double sigy; // sum of y
double sigxx; // sum x squared
double sigxy; // sum of xy
double sigyy; // sum y squared
long double sigxxx; // sum x cubed
long double sigxxy; // sum xsquared y
long double sigxxxx; // sum x fourth
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/quadlsq.h
|
C++
|
apache-2.0
| 1,982
|
/**********************************************************************
* File: quadratc.h (Formerly quadrtic.h)
* Description: Code for the QUAD_COEFFS class.
* Author: Ray Smith
* Created: Tue Oct 08 17:24:40 BST 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef QUADRATC_H
#define QUADRATC_H
#include "points.h"
namespace tesseract {
class QUAD_COEFFS {
public:
QUAD_COEFFS() = default;
QUAD_COEFFS( // constructor
double xsq, // coefficients
float x, float constant) {
a = xsq;
b = x;
c = constant;
}
float y( // evaluate
float x) const { // at x
return static_cast<float>((a * x + b) * x + c);
}
void move( // reposition word
ICOORD vec) { // by vector
/************************************************************
y - q = a (x - p)^2 + b (x - p) + c
y - q = ax^2 - 2apx + ap^2 + bx - bp + c
y = ax^2 + (b - 2ap)x + (c - bp + ap^2 + q)
************************************************************/
int16_t p = vec.x();
int16_t q = vec.y();
c = static_cast<float>(c - b * p + a * p * p + q);
b = static_cast<float>(b - 2 * a * p);
}
double a; // x squared
float b; // x
float c; // constant
private:
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/quadratc.h
|
C++
|
apache-2.0
| 1,922
|
/**********************************************************************
* File: quspline.cpp (Formerly qspline.c)
* Description: Code for the QSPLINE class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "quspline.h"
#include "points.h" // for ICOORD
#include "quadlsq.h" // for QLSQ
#include "quadratc.h" // for QUAD_COEFFS
#include <allheaders.h> // for pixRenderPolyline, pixGetDepth, pixGetHeight
#include "pix.h" // for L_CLEAR_PIXELS, L_SET_PIXELS, Pix (ptr only)
namespace tesseract {
#define QSPLINE_PRECISION 16 // no of steps to draw
/**********************************************************************
* QSPLINE::QSPLINE
*
* Constructor to build a QSPLINE given the components used in the old code.
**********************************************************************/
QSPLINE::QSPLINE( // constructor
int32_t count, // no of segments
int32_t *xstarts, // start coords
double *coeffs // coefficients
) {
int32_t index; // segment index
// get memory
xcoords = new int32_t[count + 1];
quadratics = new QUAD_COEFFS[count];
segments = count;
for (index = 0; index < segments; index++) {
// copy them
xcoords[index] = xstarts[index];
quadratics[index] =
QUAD_COEFFS(coeffs[index * 3], coeffs[index * 3 + 1], coeffs[index * 3 + 2]);
}
// right edge
xcoords[index] = xstarts[index];
}
/**********************************************************************
* QSPLINE::QSPLINE
*
* Constructor to build a QSPLINE by appproximation of points.
**********************************************************************/
QSPLINE::QSPLINE( // constructor
int xstarts[], // spline boundaries
int segcount, // no of segments
int xpts[], // points to fit
int ypts[], int pointcount, // no of pts
int degree // fit required
) {
int pointindex; /*no along text line */
int segment; /*segment no */
int32_t *ptcounts; // no in each segment
QLSQ qlsq; /*accumulator */
segments = segcount;
xcoords = new int32_t[segcount + 1];
ptcounts = new int32_t[segcount + 1];
quadratics = new QUAD_COEFFS[segcount];
memmove(xcoords, xstarts, (segcount + 1) * sizeof(int32_t));
ptcounts[0] = 0; /*none in any yet */
for (segment = 0, pointindex = 0; pointindex < pointcount; pointindex++) {
while (segment < segcount && xpts[pointindex] >= xstarts[segment]) {
segment++; /*try next segment */
/*cumulative counts */
ptcounts[segment] = ptcounts[segment - 1];
}
ptcounts[segment]++; /*no in previous partition */
}
while (segment < segcount) {
segment++;
/*zero the rest */
ptcounts[segment] = ptcounts[segment - 1];
}
for (segment = 0; segment < segcount; segment++) {
qlsq.clear();
/*first blob */
pointindex = ptcounts[segment];
if (pointindex > 0 && xpts[pointindex] != xpts[pointindex - 1] &&
xpts[pointindex] != xstarts[segment]) {
qlsq.add(xstarts[segment],
ypts[pointindex - 1] + (ypts[pointindex] - ypts[pointindex - 1]) *
(xstarts[segment] - xpts[pointindex - 1]) /
(xpts[pointindex] - xpts[pointindex - 1]));
}
for (; pointindex < ptcounts[segment + 1]; pointindex++) {
qlsq.add(xpts[pointindex], ypts[pointindex]);
}
if (pointindex > 0 && pointindex < pointcount && xpts[pointindex] != xstarts[segment + 1]) {
qlsq.add(xstarts[segment + 1],
ypts[pointindex - 1] + (ypts[pointindex] - ypts[pointindex - 1]) *
(xstarts[segment + 1] - xpts[pointindex - 1]) /
(xpts[pointindex] - xpts[pointindex - 1]));
}
qlsq.fit(degree);
quadratics[segment].a = qlsq.get_a();
quadratics[segment].b = qlsq.get_b();
quadratics[segment].c = qlsq.get_c();
}
delete[] ptcounts;
}
/**********************************************************************
* QSPLINE::QSPLINE
*
* Constructor to build a QSPLINE from another.
**********************************************************************/
QSPLINE::QSPLINE( // constructor
const QSPLINE &src) {
segments = 0;
xcoords = nullptr;
quadratics = nullptr;
*this = src;
}
/**********************************************************************
* QSPLINE::~QSPLINE
*
* Destroy a QSPLINE.
**********************************************************************/
QSPLINE::~QSPLINE() {
delete[] xcoords;
delete[] quadratics;
}
/**********************************************************************
* QSPLINE::operator=
*
* Copy a QSPLINE
**********************************************************************/
QSPLINE &QSPLINE::operator=( // assignment
const QSPLINE &source) {
delete[] xcoords;
delete[] quadratics;
segments = source.segments;
xcoords = new int32_t[segments + 1];
quadratics = new QUAD_COEFFS[segments];
memmove(xcoords, source.xcoords, (segments + 1) * sizeof(int32_t));
memmove(quadratics, source.quadratics, segments * sizeof(QUAD_COEFFS));
return *this;
}
/**********************************************************************
* QSPLINE::step
*
* Return the total of the step functions between the given coords.
**********************************************************************/
double QSPLINE::step( // find step functions
double x1, // between coords
double x2) {
int index1, index2; // indices of coords
double total; /*total steps */
index1 = spline_index(x1);
index2 = spline_index(x2);
total = 0;
while (index1 < index2) {
total += static_cast<double>(quadratics[index1 + 1].y(static_cast<float>(xcoords[index1 + 1])));
total -= static_cast<double>(quadratics[index1].y(static_cast<float>(xcoords[index1 + 1])));
index1++; /*next segment */
}
return total; /*total steps */
}
/**********************************************************************
* QSPLINE::y
*
* Return the y value at the given x value.
**********************************************************************/
double QSPLINE::y( // evaluate
double x // coord to evaluate at
) const {
int32_t index; // segment index
index = spline_index(x);
return quadratics[index].y(x); // in correct segment
}
/**********************************************************************
* QSPLINE::spline_index
*
* Return the index to the largest xcoord not greater than x.
**********************************************************************/
int32_t QSPLINE::spline_index( // evaluate
double x // coord to evaluate at
) const {
int32_t index; // segment index
int32_t bottom; // bottom of range
int32_t top; // top of range
bottom = 0;
top = segments;
while (top - bottom > 1) {
index = (top + bottom) / 2; // centre of range
if (x >= xcoords[index]) {
bottom = index; // new min
} else {
top = index; // new max
}
}
return bottom;
}
/**********************************************************************
* QSPLINE::move
*
* Reposition spline by vector
**********************************************************************/
void QSPLINE::move( // reposition spline
ICOORD vec // by vector
) {
int32_t segment; // index of segment
int16_t x_shift = vec.x();
for (segment = 0; segment < segments; segment++) {
xcoords[segment] += x_shift;
quadratics[segment].move(vec);
}
xcoords[segment] += x_shift;
}
/**********************************************************************
* QSPLINE::overlap
*
* Return true if spline2 overlaps this by no more than fraction less
* than the bounds of this.
**********************************************************************/
bool QSPLINE::overlap( // test overlap
QSPLINE *spline2, // 2 cannot be smaller
double fraction // by more than this
) {
int leftlimit = xcoords[1]; /*common left limit */
int rightlimit = xcoords[segments - 1]; /*common right limit */
/*or too non-overlap */
return !(spline2->segments < 3 ||
spline2->xcoords[1] > leftlimit + fraction * (rightlimit - leftlimit) ||
spline2->xcoords[spline2->segments - 1] <
rightlimit - fraction * (rightlimit - leftlimit));
}
/**********************************************************************
* extrapolate_spline
*
* Extrapolates the spline linearly using the same gradient as the
* quadratic has at either end.
**********************************************************************/
void QSPLINE::extrapolate( // linear extrapolation
double gradient, // gradient to use
int xmin, // new left edge
int xmax // new right edge
) {
int segment; /*current segment of spline */
int dest_segment; // dest index
int32_t *xstarts; // new boundaries
QUAD_COEFFS *quads; // new ones
int increment; // in size
increment = xmin < xcoords[0] ? 1 : 0;
if (xmax > xcoords[segments]) {
increment++;
}
if (increment == 0) {
return;
}
xstarts = new int32_t[segments + 1 + increment];
quads = new QUAD_COEFFS[segments + increment];
if (xmin < xcoords[0]) {
xstarts[0] = xmin;
quads[0].a = 0;
quads[0].b = gradient;
quads[0].c = y(xcoords[0]) - quads[0].b * xcoords[0];
dest_segment = 1;
} else {
dest_segment = 0;
}
for (segment = 0; segment < segments; segment++) {
xstarts[dest_segment] = xcoords[segment];
quads[dest_segment] = quadratics[segment];
dest_segment++;
}
xstarts[dest_segment] = xcoords[segment];
if (xmax > xcoords[segments]) {
quads[dest_segment].a = 0;
quads[dest_segment].b = gradient;
quads[dest_segment].c = y(xcoords[segments]) - quads[dest_segment].b * xcoords[segments];
dest_segment++;
xstarts[dest_segment] = xmax + 1;
}
segments = dest_segment;
delete[] xcoords;
delete[] quadratics;
xcoords = xstarts;
quadratics = quads;
}
/**********************************************************************
* QSPLINE::plot
*
* Draw the QSPLINE in the given colour.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void QSPLINE::plot( // draw it
ScrollView *window, // window to draw in
ScrollView::Color colour // colour to draw in
) const {
int32_t segment; // index of segment
int16_t step; // index of poly piece
double increment; // x increment
double x; // x coord
window->Pen(colour);
for (segment = 0; segment < segments; segment++) {
increment = static_cast<double>(xcoords[segment + 1] - xcoords[segment]) / QSPLINE_PRECISION;
x = xcoords[segment];
for (step = 0; step <= QSPLINE_PRECISION; step++) {
if (segment == 0 && step == 0) {
window->SetCursor(x, quadratics[segment].y(x));
} else {
window->DrawTo(x, quadratics[segment].y(x));
}
x += increment;
}
}
}
#endif
void QSPLINE::plot(Image pix) const {
if (pix == nullptr) {
return;
}
int32_t segment; // Index of segment
int16_t step; // Index of poly piece
double increment; // x increment
double x; // x coord
auto height = static_cast<double>(pixGetHeight(pix));
Pta *points = ptaCreate(QSPLINE_PRECISION * segments);
const int kLineWidth = 5;
for (segment = 0; segment < segments; segment++) {
increment = static_cast<double>((xcoords[segment + 1] - xcoords[segment])) / QSPLINE_PRECISION;
x = xcoords[segment];
for (step = 0; step <= QSPLINE_PRECISION; step++) {
double y = height - quadratics[segment].y(x);
ptaAddPt(points, x, y);
x += increment;
}
}
switch (pixGetDepth(pix)) {
case 1:
pixRenderPolyline(pix, points, kLineWidth, L_SET_PIXELS, 1);
break;
case 32:
pixRenderPolylineArb(pix, points, kLineWidth, 255, 0, 0, 1);
break;
default:
pixRenderPolyline(pix, points, kLineWidth, L_CLEAR_PIXELS, 1);
break;
}
ptaDestroy(&points);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/quspline.cpp
|
C++
|
apache-2.0
| 13,050
|
/**********************************************************************
* File: quspline.h (Formerly qspline.h)
* Description: Code for the QSPLINE class.
* Author: Ray Smith
* Created: Tue Oct 08 17:16:12 BST 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef QUSPLINE_H
#define QUSPLINE_H
#include "scrollview.h" // for ScrollView, ScrollView::Color
#include <cstdint> // for int32_t
struct Pix;
namespace tesseract {
class ICOORD;
class QUAD_COEFFS;
class ROW;
class TBOX;
class TESS_API QSPLINE {
friend void make_first_baseline(TBOX *, int, int *, int *, QSPLINE *, QSPLINE *, float);
friend void make_holed_baseline(TBOX *, int, QSPLINE *, QSPLINE *, float);
friend void tweak_row_baseline(ROW *, double, double);
public:
QSPLINE() { // empty constructor
segments = 0;
xcoords = nullptr; // everything empty
quadratics = nullptr;
}
QSPLINE( // copy constructor
const QSPLINE &src);
QSPLINE( // constructor
int32_t count, // number of segments
int32_t *xstarts, // segment starts
double *coeffs); // coefficients
~QSPLINE(); // destructor
QSPLINE( // least squares fit
int xstarts[], // spline boundaries
int segcount, // no of segments
int xcoords[], // points to fit
int ycoords[], int blobcount, // no of coords
int degree); // function
double step( // step change
double x1, // between coords
double x2);
double y( // evaluate
double x) const; // at x
void move( // reposition spline
ICOORD vec); // by vector
bool overlap( // test overlap
QSPLINE *spline2, // 2 cannot be smaller
double fraction); // by more than this
void extrapolate( // linear extrapolation
double gradient, // gradient to use
int left, // new left edge
int right); // new right edge
#ifndef GRAPHICS_DISABLED
void plot( // draw it
ScrollView *window, // in window
ScrollView::Color colour) const; // in colour
#endif
// Paint the baseline over pix. If pix has depth of 32, then the line will
// be painted in red. Otherwise it will be painted in black.
void plot(Image pix) const;
QSPLINE &operator=(const QSPLINE &source); // from this
private:
int32_t spline_index( // binary search
double x) const; // for x
int32_t segments; // no of segments
int32_t *xcoords; // no of coords
QUAD_COEFFS *quadratics; // spline pieces
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/quspline.h
|
C++
|
apache-2.0
| 3,390
|
/**********************************************************************
* File: ratngs.cpp (Formerly ratings.c)
* Description: Code to manipulate the BLOB_CHOICE and WERD_CHOICE classes.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "ratngs.h"
#include "blobs.h"
#include "matrix.h"
#include "normalis.h" // kBlnBaselineOffset.
#include "unicharset.h"
#include <algorithm>
#include <cmath>
#include <string>
#include <vector>
namespace tesseract {
const float WERD_CHOICE::kBadRating = 100000.0;
// Min offset in baseline-normalized coords to make a character a subscript.
const int kMinSubscriptOffset = 20;
// Min offset in baseline-normalized coords to make a character a superscript.
const int kMinSuperscriptOffset = 20;
// Max y of bottom of a drop-cap blob.
const int kMaxDropCapBottom = -128;
// Max fraction of x-height to use as denominator in measuring x-height overlap.
const double kMaxOverlapDenominator = 0.125;
// Min fraction of x-height range that should be in agreement for matching
// x-heights.
const double kMinXHeightMatch = 0.5;
// Max tolerance on baseline position as a fraction of x-height for matching
// baselines.
const double kMaxBaselineDrift = 0.0625;
static const char kPermuterTypeNoPerm[] = "None";
static const char kPermuterTypePuncPerm[] = "Punctuation";
static const char kPermuterTypeTopPerm[] = "Top Choice";
static const char kPermuterTypeLowerPerm[] = "Top Lower Case";
static const char kPermuterTypeUpperPerm[] = "Top Upper Case";
static const char kPermuterTypeNgramPerm[] = "Ngram";
static const char kPermuterTypeNumberPerm[] = "Number";
static const char kPermuterTypeUserPatPerm[] = "User Pattern";
static const char kPermuterTypeSysDawgPerm[] = "System Dictionary";
static const char kPermuterTypeDocDawgPerm[] = "Document Dictionary";
static const char kPermuterTypeUserDawgPerm[] = "User Dictionary";
static const char kPermuterTypeFreqDawgPerm[] = "Frequent Words Dictionary";
static const char kPermuterTypeCompoundPerm[] = "Compound";
static const char *const kPermuterTypeNames[] = {
kPermuterTypeNoPerm, // 0
kPermuterTypePuncPerm, // 1
kPermuterTypeTopPerm, // 2
kPermuterTypeLowerPerm, // 3
kPermuterTypeUpperPerm, // 4
kPermuterTypeNgramPerm, // 5
kPermuterTypeNumberPerm, // 6
kPermuterTypeUserPatPerm, // 7
kPermuterTypeSysDawgPerm, // 8
kPermuterTypeDocDawgPerm, // 9
kPermuterTypeUserDawgPerm, // 10
kPermuterTypeFreqDawgPerm, // 11
kPermuterTypeCompoundPerm // 12
};
/**
* BLOB_CHOICE::BLOB_CHOICE
*
* Constructor to build a BLOB_CHOICE from a char, rating and certainty.
*/
BLOB_CHOICE::BLOB_CHOICE(UNICHAR_ID src_unichar_id, // character id
float src_rating, // rating
float src_cert, // certainty
int src_script_id, // script
float min_xheight, // min xheight allowed
float max_xheight, // max xheight by this char
float yshift, // yshift out of position
BlobChoiceClassifier c) { // adapted match or other
unichar_id_ = src_unichar_id;
rating_ = src_rating;
certainty_ = src_cert;
fontinfo_id_ = -1;
fontinfo_id2_ = -1;
script_id_ = src_script_id;
min_xheight_ = min_xheight;
max_xheight_ = max_xheight;
yshift_ = yshift;
classifier_ = c;
}
/**
* BLOB_CHOICE::BLOB_CHOICE
*
* Constructor to build a BLOB_CHOICE from another BLOB_CHOICE.
*/
BLOB_CHOICE::BLOB_CHOICE(const BLOB_CHOICE &other) : ELIST_LINK(other) {
unichar_id_ = other.unichar_id();
rating_ = other.rating();
certainty_ = other.certainty();
fontinfo_id_ = other.fontinfo_id();
fontinfo_id2_ = other.fontinfo_id2();
script_id_ = other.script_id();
matrix_cell_ = other.matrix_cell_;
min_xheight_ = other.min_xheight_;
max_xheight_ = other.max_xheight_;
yshift_ = other.yshift();
classifier_ = other.classifier_;
#ifndef DISABLED_LEGACY_ENGINE
fonts_ = other.fonts_;
#endif // ndef DISABLED_LEGACY_ENGINE
}
// Copy assignment operator.
BLOB_CHOICE &BLOB_CHOICE::operator=(const BLOB_CHOICE &other) {
ELIST_LINK::operator=(other);
unichar_id_ = other.unichar_id();
rating_ = other.rating();
certainty_ = other.certainty();
fontinfo_id_ = other.fontinfo_id();
fontinfo_id2_ = other.fontinfo_id2();
script_id_ = other.script_id();
matrix_cell_ = other.matrix_cell_;
min_xheight_ = other.min_xheight_;
max_xheight_ = other.max_xheight_;
yshift_ = other.yshift();
classifier_ = other.classifier_;
#ifndef DISABLED_LEGACY_ENGINE
fonts_ = other.fonts_;
#endif // ndef DISABLED_LEGACY_ENGINE
return *this;
}
// Returns true if *this and other agree on the baseline and x-height
// to within some tolerance based on a given estimate of the x-height.
bool BLOB_CHOICE::PosAndSizeAgree(const BLOB_CHOICE &other, float x_height, bool debug) const {
double baseline_diff = std::fabs(yshift() - other.yshift());
if (baseline_diff > kMaxBaselineDrift * x_height) {
if (debug) {
tprintf("Baseline diff %g for %d v %d\n", baseline_diff, unichar_id_, other.unichar_id_);
}
return false;
}
double this_range = max_xheight() - min_xheight();
double other_range = other.max_xheight() - other.min_xheight();
double denominator =
ClipToRange(std::min(this_range, other_range), 1.0, kMaxOverlapDenominator * x_height);
double overlap =
std::min(max_xheight(), other.max_xheight()) - std::max(min_xheight(), other.min_xheight());
overlap /= denominator;
if (debug) {
tprintf("PosAndSize for %d v %d: bl diff = %g, ranges %g, %g / %g ->%g\n", unichar_id_,
other.unichar_id_, baseline_diff, this_range, other_range, denominator, overlap);
}
return overlap >= kMinXHeightMatch;
}
// Helper to find the BLOB_CHOICE in the bc_list that matches the given
// unichar_id, or nullptr if there is no match.
BLOB_CHOICE *FindMatchingChoice(UNICHAR_ID char_id, BLOB_CHOICE_LIST *bc_list) {
// Find the corresponding best BLOB_CHOICE.
BLOB_CHOICE_IT choice_it(bc_list);
for (choice_it.mark_cycle_pt(); !choice_it.cycled_list(); choice_it.forward()) {
BLOB_CHOICE *choice = choice_it.data();
if (choice->unichar_id() == char_id) {
return choice;
}
}
return nullptr;
}
const char *WERD_CHOICE::permuter_name(uint8_t permuter) {
return kPermuterTypeNames[permuter];
}
const char *ScriptPosToString(enum ScriptPos script_pos) {
switch (script_pos) {
case SP_NORMAL:
return "NORM";
case SP_SUBSCRIPT:
return "SUB";
case SP_SUPERSCRIPT:
return "SUPER";
case SP_DROPCAP:
return "DROPC";
}
return "SP_UNKNOWN";
}
/**
* WERD_CHOICE::WERD_CHOICE
*
* Constructor to build a WERD_CHOICE from the given string.
* The function assumes that src_string is not nullptr.
*/
WERD_CHOICE::WERD_CHOICE(const char *src_string, const UNICHARSET &unicharset)
: unicharset_(&unicharset) {
std::vector<UNICHAR_ID> encoding;
std::vector<char> lengths;
std::string cleaned = unicharset.CleanupString(src_string);
if (unicharset.encode_string(cleaned.c_str(), true, &encoding, &lengths, nullptr)) {
lengths.push_back('\0');
std::string src_lengths = &lengths[0];
this->init(cleaned.c_str(), src_lengths.c_str(), 0.0, 0.0, NO_PERM);
} else { // There must have been an invalid unichar in the string.
this->init(8);
this->make_bad();
}
}
/**
* WERD_CHOICE::init
*
* Helper function to build a WERD_CHOICE from the given string,
* fragment lengths, rating, certainty and permuter.
*
* The function assumes that src_string is not nullptr.
* src_lengths argument could be nullptr, in which case the unichars
* in src_string are assumed to all be of length 1.
*/
void WERD_CHOICE::init(const char *src_string, const char *src_lengths, float src_rating,
float src_certainty, uint8_t src_permuter) {
int src_string_len = strlen(src_string);
if (src_string_len == 0) {
this->init(8);
} else {
this->init(src_lengths ? strlen(src_lengths) : src_string_len);
length_ = reserved_;
int offset = 0;
for (unsigned i = 0; i < length_; ++i) {
int unichar_length = src_lengths ? src_lengths[i] : 1;
unichar_ids_[i] = unicharset_->unichar_to_id(src_string + offset, unichar_length);
state_[i] = 1;
certainties_[i] = src_certainty;
offset += unichar_length;
}
}
adjust_factor_ = 1.0f;
rating_ = src_rating;
certainty_ = src_certainty;
permuter_ = src_permuter;
dangerous_ambig_found_ = false;
}
/**
* WERD_CHOICE::~WERD_CHOICE
*/
WERD_CHOICE::~WERD_CHOICE() = default;
const char *WERD_CHOICE::permuter_name() const {
return kPermuterTypeNames[permuter_];
}
// Returns the BLOB_CHOICE_LIST corresponding to the given index in the word,
// taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete.
BLOB_CHOICE_LIST *WERD_CHOICE::blob_choices(unsigned index, MATRIX *ratings) const {
MATRIX_COORD coord = MatrixCoord(index);
BLOB_CHOICE_LIST *result = ratings->get(coord.col, coord.row);
if (result == nullptr) {
result = new BLOB_CHOICE_LIST;
ratings->put(coord.col, coord.row, result);
}
return result;
}
// Returns the MATRIX_COORD corresponding to the location in the ratings
// MATRIX for the given index into the word.
MATRIX_COORD WERD_CHOICE::MatrixCoord(unsigned index) const {
int col = 0;
for (unsigned i = 0; i < index; ++i) {
col += state_[i];
}
int row = col + state_[index] - 1;
return MATRIX_COORD(col, row);
}
// Sets the entries for the given index from the BLOB_CHOICE, assuming
// unit fragment lengths, but setting the state for this index to blob_count.
void WERD_CHOICE::set_blob_choice(unsigned index, int blob_count, const BLOB_CHOICE *blob_choice) {
unichar_ids_[index] = blob_choice->unichar_id();
script_pos_[index] = tesseract::SP_NORMAL;
state_[index] = blob_count;
certainties_[index] = blob_choice->certainty();
}
/**
* contains_unichar_id
*
* Returns true if unichar_ids_ contain the given unichar_id, false otherwise.
*/
bool WERD_CHOICE::contains_unichar_id(UNICHAR_ID unichar_id) const {
for (unsigned i = 0; i < length_; ++i) {
if (unichar_ids_[i] == unichar_id) {
return true;
}
}
return false;
}
/**
* remove_unichar_ids
*
* Removes num unichar ids starting from index start from unichar_ids_
* and updates length_ and fragment_lengths_ to reflect this change.
* Note: this function does not modify rating_ and certainty_.
*/
void WERD_CHOICE::remove_unichar_ids(unsigned start, int num) {
ASSERT_HOST(start + num <= length_);
// Accumulate the states to account for the merged blobs.
for (int i = 0; i < num; ++i) {
if (start > 0) {
state_[start - 1] += state_[start + i];
} else if (start + num < length_) {
state_[start + num] += state_[start + i];
}
}
for (unsigned i = start; i + num < length_; ++i) {
unichar_ids_[i] = unichar_ids_[i + num];
script_pos_[i] = script_pos_[i + num];
state_[i] = state_[i + num];
certainties_[i] = certainties_[i + num];
}
length_ -= num;
}
/**
* reverse_and_mirror_unichar_ids
*
* Reverses and mirrors unichars in unichar_ids.
*/
void WERD_CHOICE::reverse_and_mirror_unichar_ids() {
for (unsigned i = 0; i < length_ / 2; ++i) {
UNICHAR_ID tmp_id = unichar_ids_[i];
unichar_ids_[i] = unicharset_->get_mirror(unichar_ids_[length_ - 1 - i]);
unichar_ids_[length_ - 1 - i] = unicharset_->get_mirror(tmp_id);
}
if (length_ % 2 != 0) {
unichar_ids_[length_ / 2] = unicharset_->get_mirror(unichar_ids_[length_ / 2]);
}
}
/**
* punct_stripped
*
* Returns the half-open interval of unichar_id indices [start, end) which
* enclose the core portion of this word -- the part after stripping
* punctuation from the left and right.
*/
void WERD_CHOICE::punct_stripped(unsigned *start, unsigned *end) const {
*start = 0;
*end = length();
while (*start < length() && unicharset()->get_ispunctuation(unichar_id(*start))) {
(*start)++;
}
while (*end > 0 && unicharset()->get_ispunctuation(unichar_id(*end - 1))) {
(*end)--;
}
}
void WERD_CHOICE::GetNonSuperscriptSpan(int *pstart, int *pend) const {
int end = length();
while (end > 0 && unicharset_->get_isdigit(unichar_ids_[end - 1]) &&
BlobPosition(end - 1) == tesseract::SP_SUPERSCRIPT) {
end--;
}
int start = 0;
while (start < end && unicharset_->get_isdigit(unichar_ids_[start]) &&
BlobPosition(start) == tesseract::SP_SUPERSCRIPT) {
start++;
}
*pstart = start;
*pend = end;
}
WERD_CHOICE WERD_CHOICE::shallow_copy(unsigned start, unsigned end) const {
ASSERT_HOST(start <= length_);
ASSERT_HOST(end <= length_);
if (end < start) {
end = start;
}
WERD_CHOICE retval(unicharset_, end - start);
for (auto i = start; i < end; i++) {
retval.append_unichar_id_space_allocated(unichar_ids_[i], state_[i], 0.0f, certainties_[i]);
}
return retval;
}
/**
* has_rtl_unichar_id
*
* Returns true if unichar_ids contain at least one "strongly" RTL unichar.
*/
bool WERD_CHOICE::has_rtl_unichar_id() const {
for (unsigned i = 0; i < length_; ++i) {
UNICHARSET::Direction dir = unicharset_->get_direction(unichar_ids_[i]);
if (dir == UNICHARSET::U_RIGHT_TO_LEFT || dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC) {
return true;
}
}
return false;
}
/**
* string_and_lengths
*
* Populates the given word_str with unichars from unichar_ids and
* and word_lengths_str with the corresponding unichar lengths.
*/
void WERD_CHOICE::string_and_lengths(std::string *word_str, std::string *word_lengths_str) const {
*word_str = "";
if (word_lengths_str != nullptr) {
*word_lengths_str = "";
}
for (unsigned i = 0; i < length_; ++i) {
const char *ch = unicharset_->id_to_unichar_ext(unichar_ids_[i]);
*word_str += ch;
if (word_lengths_str != nullptr) {
*word_lengths_str += (char)strlen(ch);
}
}
}
/**
* append_unichar_id
*
* Make sure there is enough space in the word for the new unichar id
* and call append_unichar_id_space_allocated().
*/
void WERD_CHOICE::append_unichar_id(UNICHAR_ID unichar_id, int blob_count, float rating,
float certainty) {
if (length_ == reserved_) {
this->double_the_size();
}
this->append_unichar_id_space_allocated(unichar_id, blob_count, rating, certainty);
}
/**
* WERD_CHOICE::operator+=
*
* Cat a second word rating on the end of this current one.
* The ratings are added and the confidence is the min.
* If the permuters are NOT the same the permuter is set to COMPOUND_PERM
*/
WERD_CHOICE &WERD_CHOICE::operator+=(const WERD_CHOICE &second) {
ASSERT_HOST(unicharset_ == second.unicharset_);
while (reserved_ < length_ + second.length()) {
this->double_the_size();
}
const std::vector<UNICHAR_ID> &other_unichar_ids = second.unichar_ids();
for (unsigned i = 0; i < second.length(); ++i) {
unichar_ids_[length_ + i] = other_unichar_ids[i];
state_[length_ + i] = second.state_[i];
certainties_[length_ + i] = second.certainties_[i];
script_pos_[length_ + i] = second.BlobPosition(i);
}
length_ += second.length();
if (second.adjust_factor_ > adjust_factor_) {
adjust_factor_ = second.adjust_factor_;
}
rating_ += second.rating(); // add ratings
if (second.certainty() < certainty_) { // take min
certainty_ = second.certainty();
}
if (second.dangerous_ambig_found_) {
dangerous_ambig_found_ = true;
}
if (permuter_ == NO_PERM) {
permuter_ = second.permuter();
} else if (second.permuter() != NO_PERM && second.permuter() != permuter_) {
permuter_ = COMPOUND_PERM;
}
return *this;
}
/**
* WERD_CHOICE::operator=
*
* Allocate enough memory to hold a copy of source and copy over
* all the information from source to this WERD_CHOICE.
*/
WERD_CHOICE &WERD_CHOICE::operator=(const WERD_CHOICE &source) {
while (reserved_ < source.length()) {
this->double_the_size();
}
unicharset_ = source.unicharset_;
const std::vector<UNICHAR_ID> &other_unichar_ids = source.unichar_ids();
for (unsigned i = 0; i < source.length(); ++i) {
unichar_ids_[i] = other_unichar_ids[i];
state_[i] = source.state_[i];
certainties_[i] = source.certainties_[i];
script_pos_[i] = source.BlobPosition(i);
}
length_ = source.length();
adjust_factor_ = source.adjust_factor_;
rating_ = source.rating();
certainty_ = source.certainty();
min_x_height_ = source.min_x_height();
max_x_height_ = source.max_x_height();
permuter_ = source.permuter();
dangerous_ambig_found_ = source.dangerous_ambig_found_;
return *this;
}
// Sets up the script_pos_ member using the blobs_list to get the bln
// bounding boxes, *this to get the unichars, and this->unicharset
// to get the target positions. If small_caps is true, sub/super are not
// considered, but dropcaps are.
// NOTE: blobs_list should be the chopped_word blobs. (Fully segmented.)
void WERD_CHOICE::SetScriptPositions(bool small_caps, TWERD *word, int debug) {
// Initialize to normal.
for (unsigned i = 0; i < length_; ++i) {
script_pos_[i] = tesseract::SP_NORMAL;
}
if (word->blobs.empty() || word->NumBlobs() != TotalOfStates()) {
return;
}
unsigned position_counts[4] = {0, 0, 0, 0};
int chunk_index = 0;
for (unsigned blob_index = 0; blob_index < length_; ++blob_index, ++chunk_index) {
TBLOB *tblob = word->blobs[chunk_index];
int uni_id = unichar_id(blob_index);
TBOX blob_box = tblob->bounding_box();
if (!state_.empty()) {
for (int i = 1; i < state_[blob_index]; ++i) {
++chunk_index;
tblob = word->blobs[chunk_index];
blob_box += tblob->bounding_box();
}
}
script_pos_[blob_index] = ScriptPositionOf(false, *unicharset_, blob_box, uni_id);
if (small_caps && script_pos_[blob_index] != tesseract::SP_DROPCAP) {
script_pos_[blob_index] = tesseract::SP_NORMAL;
}
position_counts[script_pos_[blob_index]]++;
}
// If almost everything looks like a superscript or subscript,
// we most likely just got the baseline wrong.
if (4 * position_counts[tesseract::SP_SUBSCRIPT] > 3 * length_ ||
4 * position_counts[tesseract::SP_SUPERSCRIPT] > 3 * length_) {
if (debug >= 2) {
tprintf(
"Most characters of %s are subscript or superscript.\n"
"That seems wrong, so I'll assume we got the baseline wrong\n",
unichar_string().c_str());
}
for (unsigned i = 0; i < length_; i++) {
ScriptPos sp = script_pos_[i];
if (sp == tesseract::SP_SUBSCRIPT || sp == tesseract::SP_SUPERSCRIPT) {
ASSERT_HOST(position_counts[sp] > 0);
position_counts[sp]--;
position_counts[tesseract::SP_NORMAL]++;
script_pos_[i] = tesseract::SP_NORMAL;
}
}
}
if ((debug >= 1 && position_counts[tesseract::SP_NORMAL] < length_) || debug >= 2) {
tprintf("SetScriptPosition on %s\n", unichar_string().c_str());
int chunk_index = 0;
for (unsigned blob_index = 0; blob_index < length_; ++blob_index) {
if (debug >= 2 || script_pos_[blob_index] != tesseract::SP_NORMAL) {
TBLOB *tblob = word->blobs[chunk_index];
ScriptPositionOf(true, *unicharset_, tblob->bounding_box(), unichar_id(blob_index));
}
chunk_index += state_.empty() ? 1 : state_[blob_index];
}
}
}
// Sets all the script_pos_ positions to the given position.
void WERD_CHOICE::SetAllScriptPositions(tesseract::ScriptPos position) {
for (unsigned i = 0; i < length_; ++i) {
script_pos_[i] = position;
}
}
/* static */
ScriptPos WERD_CHOICE::ScriptPositionOf(bool print_debug, const UNICHARSET &unicharset,
const TBOX &blob_box, UNICHAR_ID unichar_id) {
ScriptPos retval = tesseract::SP_NORMAL;
int top = blob_box.top();
int bottom = blob_box.bottom();
int min_bottom, max_bottom, min_top, max_top;
unicharset.get_top_bottom(unichar_id, &min_bottom, &max_bottom, &min_top, &max_top);
int sub_thresh_top = min_top - kMinSubscriptOffset;
int sub_thresh_bot = kBlnBaselineOffset - kMinSubscriptOffset;
int sup_thresh_bot = max_bottom + kMinSuperscriptOffset;
if (bottom <= kMaxDropCapBottom) {
retval = tesseract::SP_DROPCAP;
} else if (top < sub_thresh_top && bottom < sub_thresh_bot) {
retval = tesseract::SP_SUBSCRIPT;
} else if (bottom > sup_thresh_bot) {
retval = tesseract::SP_SUPERSCRIPT;
}
if (print_debug) {
const char *pos = ScriptPosToString(retval);
tprintf(
"%s Character %s[bot:%d top: %d] "
"bot_range[%d,%d] top_range[%d, %d] "
"sub_thresh[bot:%d top:%d] sup_thresh_bot %d\n",
pos, unicharset.id_to_unichar(unichar_id), bottom, top, min_bottom, max_bottom, min_top,
max_top, sub_thresh_bot, sub_thresh_top, sup_thresh_bot);
}
return retval;
}
// Returns the script-id (eg Han) of the dominant script in the word.
int WERD_CHOICE::GetTopScriptID() const {
unsigned max_script = unicharset_->get_script_table_size();
std::vector<unsigned> sid(max_script);
for (unsigned x = 0; x < length_; ++x) {
int script_id = unicharset_->get_script(unichar_id(x));
sid[script_id]++;
}
if (unicharset_->han_sid() != unicharset_->null_sid()) {
// Add the Hiragana & Katakana counts to Han and zero them out.
if (unicharset_->hiragana_sid() != unicharset_->null_sid()) {
sid[unicharset_->han_sid()] += sid[unicharset_->hiragana_sid()];
sid[unicharset_->hiragana_sid()] = 0;
}
if (unicharset_->katakana_sid() != unicharset_->null_sid()) {
sid[unicharset_->han_sid()] += sid[unicharset_->katakana_sid()];
sid[unicharset_->katakana_sid()] = 0;
}
}
// Note that high script ID overrides lower one on a tie, thus biasing
// towards non-Common script (if sorted that way in unicharset file).
unsigned max_sid = 0;
for (unsigned x = 1; x < max_script; x++) {
if (sid[x] >= sid[max_sid]) {
max_sid = x;
}
}
if (sid[max_sid] < length_ / 2) {
max_sid = unicharset_->null_sid();
}
return max_sid;
}
// Fixes the state_ for a chop at the given blob_posiiton.
void WERD_CHOICE::UpdateStateForSplit(int blob_position) {
int total_chunks = 0;
for (unsigned i = 0; i < length_; ++i) {
total_chunks += state_[i];
if (total_chunks > blob_position) {
++state_[i];
return;
}
}
}
// Returns the sum of all the state elements, being the total number of blobs.
unsigned WERD_CHOICE::TotalOfStates() const {
unsigned total_chunks = 0;
for (unsigned i = 0; i < length_; ++i) {
total_chunks += state_[i];
}
return total_chunks;
}
/**
* WERD_CHOICE::print
*
* Print WERD_CHOICE to stdout.
*/
void WERD_CHOICE::print(const char *msg) const {
tprintf("%s : ", msg);
for (unsigned i = 0; i < length_; ++i) {
tprintf("%s", unicharset_->id_to_unichar(unichar_ids_[i]));
}
tprintf(" : R=%g, C=%g, F=%g, Perm=%d, xht=[%g,%g], ambig=%d\n", rating_, certainty_,
adjust_factor_, permuter_, min_x_height_, max_x_height_, dangerous_ambig_found_);
tprintf("pos");
for (unsigned i = 0; i < length_; ++i) {
tprintf("\t%s", ScriptPosToString(script_pos_[i]));
}
tprintf("\nstr");
for (unsigned i = 0; i < length_; ++i) {
tprintf("\t%s", unicharset_->id_to_unichar(unichar_ids_[i]));
}
tprintf("\nstate:");
for (unsigned i = 0; i < length_; ++i) {
tprintf("\t%d ", state_[i]);
}
tprintf("\nC");
for (unsigned i = 0; i < length_; ++i) {
tprintf("\t%.3f", certainties_[i]);
}
tprintf("\n");
}
// Prints the segmentation state with an introductory message.
void WERD_CHOICE::print_state(const char *msg) const {
tprintf("%s", msg);
for (unsigned i = 0; i < length_; ++i) {
tprintf(" %d", state_[i]);
}
tprintf("\n");
}
#ifndef GRAPHICS_DISABLED
// Displays the segmentation state of *this (if not the same as the last
// one displayed) and waits for a click in the window.
void WERD_CHOICE::DisplaySegmentation(TWERD *word) {
// Number of different colors to draw with.
const int kNumColors = 6;
static ScrollView *segm_window = nullptr;
// Check the state against the static prev_drawn_state.
static std::vector<int> prev_drawn_state;
bool already_done = prev_drawn_state.size() == length_;
if (!already_done) {
prev_drawn_state.clear();
prev_drawn_state.resize(length_);
}
for (unsigned i = 0; i < length_; ++i) {
if (prev_drawn_state[i] != state_[i]) {
already_done = false;
}
prev_drawn_state[i] = state_[i];
}
if (already_done || word->blobs.empty()) {
return;
}
// Create the window if needed.
if (segm_window == nullptr) {
segm_window = new ScrollView("Segmentation", 5, 10, 500, 256, 2000.0, 256.0, true);
} else {
segm_window->Clear();
}
TBOX bbox;
int blob_index = 0;
for (unsigned c = 0; c < length_; ++c) {
auto color = static_cast<ScrollView::Color>(c % kNumColors + 3);
for (int i = 0; i < state_[c]; ++i, ++blob_index) {
TBLOB *blob = word->blobs[blob_index];
bbox += blob->bounding_box();
blob->plot(segm_window, color, color);
}
}
segm_window->ZoomToRectangle(bbox.left(), bbox.top(), bbox.right(), bbox.bottom());
segm_window->Update();
segm_window->Wait();
}
#endif // !GRAPHICS_DISABLED
bool EqualIgnoringCaseAndTerminalPunct(const WERD_CHOICE &word1, const WERD_CHOICE &word2) {
const UNICHARSET *uchset = word1.unicharset();
if (word2.unicharset() != uchset) {
return false;
}
unsigned w1start, w1end;
word1.punct_stripped(&w1start, &w1end);
unsigned w2start, w2end;
word2.punct_stripped(&w2start, &w2end);
if (w1end - w1start != w2end - w2start) {
return false;
}
for (unsigned i = 0; i < w1end - w1start; i++) {
if (uchset->to_lower(word1.unichar_id(w1start + i)) !=
uchset->to_lower(word2.unichar_id(w2start + i))) {
return false;
}
}
return true;
}
/**
* print_ratings_list
*
* Send all the ratings out to the logfile.
*
* @param msg intro message
* @param ratings list of ratings
* @param current_unicharset unicharset that can be used
* for id-to-unichar conversion
*/
void print_ratings_list(const char *msg, BLOB_CHOICE_LIST *ratings,
const UNICHARSET ¤t_unicharset) {
if (ratings->empty()) {
tprintf("%s:<none>\n", msg);
return;
}
if (*msg != '\0') {
tprintf("%s\n", msg);
}
BLOB_CHOICE_IT c_it;
c_it.set_to_list(ratings);
for (c_it.mark_cycle_pt(); !c_it.cycled_list(); c_it.forward()) {
c_it.data()->print(¤t_unicharset);
if (!c_it.at_last()) {
tprintf("\n");
}
}
tprintf("\n");
fflush(stdout);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/ratngs.cpp
|
C++
|
apache-2.0
| 27,614
|
/**********************************************************************
* File: ratngs.h (Formerly ratings.h)
* Description: Definition of the WERD_CHOICE and BLOB_CHOICE classes.
* Author: Ray Smith
*
* (C) Copyright 1992, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef RATNGS_H
#define RATNGS_H
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // DISABLED_LEGACY_ENGINE
#endif
#include "clst.h"
#include "elst.h"
#ifndef DISABLED_LEGACY_ENGINE
# include "fontinfo.h"
#endif // undef DISABLED_LEGACY_ENGINE
#include "matrix.h"
#include "unicharset.h"
#include "werd.h"
#include <tesseract/unichar.h>
#include <cassert>
#include <cfloat> // for FLT_MAX
namespace tesseract {
class MATRIX;
struct TBLOB;
struct TWERD;
// Enum to describe the source of a BLOB_CHOICE to make it possible to determine
// whether a blob has been classified by inspecting the BLOB_CHOICEs.
enum BlobChoiceClassifier {
BCC_STATIC_CLASSIFIER, // From the char_norm classifier.
BCC_ADAPTED_CLASSIFIER, // From the adaptive classifier.
BCC_SPECKLE_CLASSIFIER, // Backup for failed classification.
BCC_AMBIG, // Generated by ambiguity detection.
BCC_FAKE, // From some other process.
};
class BLOB_CHOICE : public ELIST_LINK {
public:
BLOB_CHOICE() {
unichar_id_ = UNICHAR_SPACE;
fontinfo_id_ = -1;
fontinfo_id2_ = -1;
rating_ = 10.0f;
certainty_ = -1.0f;
script_id_ = -1;
min_xheight_ = 0.0f;
max_xheight_ = 0.0f;
yshift_ = 0.0f;
classifier_ = BCC_FAKE;
}
BLOB_CHOICE(UNICHAR_ID src_unichar_id, // character id
float src_rating, // rating
float src_cert, // certainty
int script_id, // script
float min_xheight, // min xheight in image pixel units
float max_xheight, // max xheight allowed by this char
float yshift, // the larger of y shift (top or bottom)
BlobChoiceClassifier c); // adapted match or other
BLOB_CHOICE(const BLOB_CHOICE &other);
~BLOB_CHOICE() = default;
UNICHAR_ID unichar_id() const {
return unichar_id_;
}
float rating() const {
return rating_;
}
float certainty() const {
return certainty_;
}
int16_t fontinfo_id() const {
return fontinfo_id_;
}
int16_t fontinfo_id2() const {
return fontinfo_id2_;
}
#ifndef DISABLED_LEGACY_ENGINE
const std::vector<ScoredFont> &fonts() const {
return fonts_;
}
void set_fonts(const std::vector<ScoredFont> &fonts) {
fonts_ = fonts;
int score1 = 0, score2 = 0;
fontinfo_id_ = -1;
fontinfo_id2_ = -1;
for (auto &f : fonts_) {
if (f.score > score1) {
score2 = score1;
fontinfo_id2_ = fontinfo_id_;
score1 = f.score;
fontinfo_id_ = f.fontinfo_id;
} else if (f.score > score2) {
score2 = f.score;
fontinfo_id2_ = f.fontinfo_id;
}
}
}
#endif // ndef DISABLED_LEGACY_ENGINE
int script_id() const {
return script_id_;
}
const MATRIX_COORD &matrix_cell() {
return matrix_cell_;
}
float min_xheight() const {
return min_xheight_;
}
float max_xheight() const {
return max_xheight_;
}
float yshift() const {
return yshift_;
}
BlobChoiceClassifier classifier() const {
return classifier_;
}
bool IsAdapted() const {
return classifier_ == BCC_ADAPTED_CLASSIFIER;
}
bool IsClassified() const {
return classifier_ == BCC_STATIC_CLASSIFIER || classifier_ == BCC_ADAPTED_CLASSIFIER ||
classifier_ == BCC_SPECKLE_CLASSIFIER;
}
void set_unichar_id(UNICHAR_ID newunichar_id) {
unichar_id_ = newunichar_id;
}
void set_rating(float newrat) {
rating_ = newrat;
}
void set_certainty(float newrat) {
certainty_ = newrat;
}
void set_script(int newscript_id) {
script_id_ = newscript_id;
}
void set_matrix_cell(int col, int row) {
matrix_cell_.col = col;
matrix_cell_.row = row;
}
void set_classifier(BlobChoiceClassifier classifier) {
classifier_ = classifier;
}
static BLOB_CHOICE *deep_copy(const BLOB_CHOICE *src) {
auto *choice = new BLOB_CHOICE;
*choice = *src;
return choice;
}
// Returns true if *this and other agree on the baseline and x-height
// to within some tolerance based on a given estimate of the x-height.
bool PosAndSizeAgree(const BLOB_CHOICE &other, float x_height, bool debug) const;
void print(const UNICHARSET *unicharset) const {
tprintf("r%.2f c%.2f x[%g,%g]: %d %s",
static_cast<double>(rating_),
static_cast<double>(certainty_),
static_cast<double>(min_xheight_),
static_cast<double>(max_xheight_),
unichar_id_, (unicharset == nullptr) ? "" : unicharset->debug_str(unichar_id_).c_str());
}
void print_full() const {
print(nullptr);
tprintf(" script=%d, font1=%d, font2=%d, yshift=%g, classifier=%d\n", script_id_, fontinfo_id_,
fontinfo_id2_, static_cast<double>(yshift_), classifier_);
}
// Sort function for sorting BLOB_CHOICEs in increasing order of rating.
static int SortByRating(const void *p1, const void *p2) {
const BLOB_CHOICE *bc1 = *static_cast<const BLOB_CHOICE *const *>(p1);
const BLOB_CHOICE *bc2 = *static_cast<const BLOB_CHOICE *const *>(p2);
return (bc1->rating_ < bc2->rating_) ? -1 : 1;
}
private:
// Copy assignment operator.
BLOB_CHOICE &operator=(const BLOB_CHOICE &other);
UNICHAR_ID unichar_id_; // unichar id
#ifndef DISABLED_LEGACY_ENGINE
// Fonts and scores. Allowed to be empty.
std::vector<ScoredFont> fonts_;
#endif // ndef DISABLED_LEGACY_ENGINE
int16_t fontinfo_id_; // char font information
int16_t fontinfo_id2_; // 2nd choice font information
// Rating is the classifier distance weighted by the length of the outline
// in the blob. In terms of probability, classifier distance is -klog p such
// that the resulting distance is in the range [0, 1] and then
// rating = w (-k log p) where w is the weight for the length of the outline.
// Sums of ratings may be compared meaningfully for words of different
// segmentation.
float rating_; // size related
// Certainty is a number in [-20, 0] indicating the classifier certainty
// of the choice. In terms of probability, certainty = 20 (k log p) where
// k is defined as above to normalize -klog p to the range [0, 1].
float certainty_; // absolute
int script_id_;
// Holds the position of this choice in the ratings matrix.
// Used to location position in the matrix during path backtracking.
MATRIX_COORD matrix_cell_;
// X-height range (in image pixels) that this classification supports.
float min_xheight_;
float max_xheight_;
// yshift_ - The vertical distance (in image pixels) the character is
// shifted (up or down) from an acceptable y position.
float yshift_;
BlobChoiceClassifier classifier_; // What generated *this.
};
// Make BLOB_CHOICE listable.
ELISTIZEH(BLOB_CHOICE)
// Return the BLOB_CHOICE in bc_list matching a given unichar_id,
// or nullptr if there is no match.
BLOB_CHOICE *FindMatchingChoice(UNICHAR_ID char_id, BLOB_CHOICE_LIST *bc_list);
// Permuter codes used in WERD_CHOICEs.
enum PermuterType {
NO_PERM, // 0
PUNC_PERM, // 1
TOP_CHOICE_PERM, // 2
LOWER_CASE_PERM, // 3
UPPER_CASE_PERM, // 4
NGRAM_PERM, // 5
NUMBER_PERM, // 6
USER_PATTERN_PERM, // 7
SYSTEM_DAWG_PERM, // 8
DOC_DAWG_PERM, // 9
USER_DAWG_PERM, // 10
FREQ_DAWG_PERM, // 11
COMPOUND_PERM, // 12
NUM_PERMUTER_TYPES
};
// ScriptPos tells whether a character is subscript, superscript or normal.
enum ScriptPos { SP_NORMAL, SP_SUBSCRIPT, SP_SUPERSCRIPT, SP_DROPCAP };
const char *ScriptPosToString(ScriptPos script_pos);
class TESS_API WERD_CHOICE : public ELIST_LINK {
public:
static const float kBadRating;
static const char *permuter_name(uint8_t permuter);
WERD_CHOICE(const UNICHARSET *unicharset) : unicharset_(unicharset) {
this->init(8);
}
WERD_CHOICE(const UNICHARSET *unicharset, int reserved) : unicharset_(unicharset) {
this->init(reserved);
}
WERD_CHOICE(const char *src_string, const char *src_lengths, float src_rating,
float src_certainty, uint8_t src_permuter, const UNICHARSET &unicharset)
: unicharset_(&unicharset) {
this->init(src_string, src_lengths, src_rating, src_certainty, src_permuter);
}
WERD_CHOICE(const char *src_string, const UNICHARSET &unicharset);
WERD_CHOICE(const WERD_CHOICE &word) : ELIST_LINK(word), unicharset_(word.unicharset_) {
this->init(word.length());
this->operator=(word);
}
~WERD_CHOICE();
const UNICHARSET *unicharset() const {
return unicharset_;
}
bool empty() const {
return length_ == 0;
}
inline unsigned length() const {
return length_;
}
float adjust_factor() const {
return adjust_factor_;
}
void set_adjust_factor(float factor) {
adjust_factor_ = factor;
}
inline const std::vector<UNICHAR_ID> &unichar_ids() const {
return unichar_ids_;
}
inline UNICHAR_ID unichar_id(unsigned index) const {
assert(index < length_);
return unichar_ids_[index];
}
inline unsigned state(unsigned index) const {
return state_[index];
}
ScriptPos BlobPosition(unsigned index) const {
if (index >= length_) {
return SP_NORMAL;
}
return script_pos_[index];
}
inline float rating() const {
return rating_;
}
inline float certainty() const {
return certainty_;
}
inline float certainty(unsigned index) const {
return certainties_[index];
}
inline float min_x_height() const {
return min_x_height_;
}
inline float max_x_height() const {
return max_x_height_;
}
inline void set_x_heights(float min_height, float max_height) {
min_x_height_ = min_height;
max_x_height_ = max_height;
}
inline uint8_t permuter() const {
return permuter_;
}
const char *permuter_name() const;
// Returns the BLOB_CHOICE_LIST corresponding to the given index in the word,
// taken from the appropriate cell in the ratings MATRIX.
// Borrowed pointer, so do not delete.
BLOB_CHOICE_LIST *blob_choices(unsigned index, MATRIX *ratings) const;
// Returns the MATRIX_COORD corresponding to the location in the ratings
// MATRIX for the given index into the word.
MATRIX_COORD MatrixCoord(unsigned index) const;
inline void set_unichar_id(UNICHAR_ID unichar_id, unsigned index) {
assert(index < length_);
unichar_ids_[index] = unichar_id;
}
bool dangerous_ambig_found() const {
return dangerous_ambig_found_;
}
void set_dangerous_ambig_found_(bool value) {
dangerous_ambig_found_ = value;
}
inline void set_rating(float new_val) {
rating_ = new_val;
}
inline void set_certainty(float new_val) {
certainty_ = new_val;
}
inline void set_permuter(uint8_t perm) {
permuter_ = perm;
}
// Note: this function should only be used if all the fields
// are populated manually with set_* functions (rather than
// (copy)constructors and append_* functions).
inline void set_length(unsigned len) {
ASSERT_HOST(reserved_ >= len);
length_ = len;
}
/// Make more space in unichar_id_ and fragment_lengths_ arrays.
inline void double_the_size() {
if (reserved_ > 0) {
reserved_ *= 2;
} else {
reserved_ = 1;
}
unichar_ids_.resize(reserved_);
script_pos_.resize(reserved_);
state_.resize(reserved_);
certainties_.resize(reserved_);
}
/// Initializes WERD_CHOICE - reserves length slots in unichar_ids_ and
/// fragment_length_ arrays. Sets other values to default (blank) values.
inline void init(unsigned reserved) {
reserved_ = reserved;
if (reserved > 0) {
unichar_ids_.resize(reserved);
script_pos_.resize(reserved);
state_.resize(reserved);
certainties_.resize(reserved);
} else {
unichar_ids_.clear();
script_pos_.clear();
state_.clear();
certainties_.clear();
}
length_ = 0;
adjust_factor_ = 1.0f;
rating_ = 0.0;
certainty_ = FLT_MAX;
min_x_height_ = 0.0f;
max_x_height_ = FLT_MAX;
permuter_ = NO_PERM;
unichars_in_script_order_ = false; // Tesseract is strict left-to-right.
dangerous_ambig_found_ = false;
}
/// Helper function to build a WERD_CHOICE from the given string,
/// fragment lengths, rating, certainty and permuter.
/// The function assumes that src_string is not nullptr.
/// src_lengths argument could be nullptr, in which case the unichars
/// in src_string are assumed to all be of length 1.
void init(const char *src_string, const char *src_lengths, float src_rating, float src_certainty,
uint8_t src_permuter);
/// Set the fields in this choice to be default (bad) values.
inline void make_bad() {
length_ = 0;
rating_ = kBadRating;
certainty_ = -FLT_MAX;
}
/// This function assumes that there is enough space reserved
/// in the WERD_CHOICE for adding another unichar.
/// This is an efficient alternative to append_unichar_id().
inline void append_unichar_id_space_allocated(UNICHAR_ID unichar_id, int blob_count, float rating,
float certainty) {
assert(reserved_ > length_);
length_++;
this->set_unichar_id(unichar_id, blob_count, rating, certainty, length_ - 1);
}
void append_unichar_id(UNICHAR_ID unichar_id, int blob_count, float rating, float certainty);
inline void set_unichar_id(UNICHAR_ID unichar_id, int blob_count, float rating, float certainty,
unsigned index) {
assert(index < length_);
unichar_ids_[index] = unichar_id;
state_[index] = blob_count;
certainties_[index] = certainty;
script_pos_[index] = SP_NORMAL;
rating_ += rating;
if (certainty < certainty_) {
certainty_ = certainty;
}
}
// Sets the entries for the given index from the BLOB_CHOICE, assuming
// unit fragment lengths, but setting the state for this index to blob_count.
void set_blob_choice(unsigned index, int blob_count, const BLOB_CHOICE *blob_choice);
bool contains_unichar_id(UNICHAR_ID unichar_id) const;
void remove_unichar_ids(unsigned index, int num);
inline void remove_last_unichar_id() {
--length_;
}
inline void remove_unichar_id(unsigned index) {
this->remove_unichar_ids(index, 1);
}
bool has_rtl_unichar_id() const;
void reverse_and_mirror_unichar_ids();
// Returns the half-open interval of unichar_id indices [start, end) which
// enclose the core portion of this word -- the part after stripping
// punctuation from the left and right.
void punct_stripped(unsigned *start_core, unsigned *end_core) const;
// Returns the indices [start, end) containing the core of the word, stripped
// of any superscript digits on either side. (i.e., the non-footnote part
// of the word). There is no guarantee that the output range is non-empty.
void GetNonSuperscriptSpan(int *start, int *end) const;
// Return a copy of this WERD_CHOICE with the choices [start, end).
// The result is useful only for checking against a dictionary.
WERD_CHOICE shallow_copy(unsigned start, unsigned end) const;
void string_and_lengths(std::string *word_str, std::string *word_lengths_str) const;
std::string debug_string() const {
std::string word_str;
for (unsigned i = 0; i < length_; ++i) {
word_str += unicharset_->debug_str(unichar_ids_[i]);
word_str += " ";
}
return word_str;
}
// Returns true if any unichar_id in the word is a non-space-delimited char.
bool ContainsAnyNonSpaceDelimited() const {
for (unsigned i = 0; i < length_; ++i) {
if (!unicharset_->IsSpaceDelimited(unichar_ids_[i])) {
return true;
}
}
return false;
}
// Returns true if the word is all spaces.
bool IsAllSpaces() const {
for (unsigned i = 0; i < length_; ++i) {
if (unichar_ids_[i] != UNICHAR_SPACE) {
return false;
}
}
return true;
}
// Call this to override the default (strict left to right graphemes)
// with the fact that some engine produces a "reading order" set of
// Graphemes for each word.
bool set_unichars_in_script_order(bool in_script_order) {
return unichars_in_script_order_ = in_script_order;
}
bool unichars_in_script_order() const {
return unichars_in_script_order_;
}
// Returns a UTF-8 string equivalent to the current choice
// of UNICHAR IDs.
std::string &unichar_string() {
this->string_and_lengths(&unichar_string_, &unichar_lengths_);
return unichar_string_;
}
// Returns a UTF-8 string equivalent to the current choice
// of UNICHAR IDs.
const std::string &unichar_string() const {
this->string_and_lengths(&unichar_string_, &unichar_lengths_);
return unichar_string_;
}
// Returns the lengths, one byte each, representing the number of bytes
// required in the unichar_string for each UNICHAR_ID.
const std::string &unichar_lengths() const {
this->string_and_lengths(&unichar_string_, &unichar_lengths_);
return unichar_lengths_;
}
// Sets up the script_pos_ member using the blobs_list to get the bln
// bounding boxes, *this to get the unichars, and this->unicharset
// to get the target positions. If small_caps is true, sub/super are not
// considered, but dropcaps are.
// NOTE: blobs_list should be the chopped_word blobs. (Fully segmented.)
void SetScriptPositions(bool small_caps, TWERD *word, int debug = 0);
// Sets all the script_pos_ positions to the given position.
void SetAllScriptPositions(ScriptPos position);
static ScriptPos ScriptPositionOf(bool print_debug, const UNICHARSET &unicharset,
const TBOX &blob_box, UNICHAR_ID unichar_id);
// Returns the "dominant" script ID for the word. By "dominant", the script
// must account for at least half the characters. Otherwise, it returns 0.
// Note that for Japanese, Hiragana and Katakana are simply treated as Han.
int GetTopScriptID() const;
// Fixes the state_ for a chop at the given blob_posiiton.
void UpdateStateForSplit(int blob_position);
// Returns the sum of all the state elements, being the total number of blobs.
unsigned TotalOfStates() const;
void print() const {
this->print("");
}
void print(const char *msg) const;
// Prints the segmentation state with an introductory message.
void print_state(const char *msg) const;
// Displays the segmentation state of *this (if not the same as the last
// one displayed) and waits for a click in the window.
void DisplaySegmentation(TWERD *word);
WERD_CHOICE &operator+=( // concatanate
const WERD_CHOICE &second); // second on first
WERD_CHOICE &operator=(const WERD_CHOICE &source);
private:
const UNICHARSET *unicharset_;
// TODO(rays) Perhaps replace the multiple arrays with an array of structs?
// unichar_ids_ is an array of classifier "results" that make up a word.
// For each unichar_ids_[i], script_pos_[i] has the sub/super/normal position
// of each unichar_id.
// state_[i] indicates the number of blobs in WERD_RES::chopped_word that
// were put together to make the classification results in the ith position
// in unichar_ids_, and certainties_[i] is the certainty of the choice that
// was used in this word.
// == Change from before ==
// Previously there was fragment_lengths_ that allowed a word to be
// artificially composed of multiple fragment results. Since the new
// segmentation search doesn't do fragments, treatment of fragments has
// been moved to a lower level, augmenting the ratings matrix with the
// combined fragments, and allowing the language-model/segmentation-search
// to deal with only the combined unichar_ids.
std::vector<UNICHAR_ID> unichar_ids_; // unichar ids that represent the text of the word
std::vector<ScriptPos> script_pos_; // Normal/Sub/Superscript of each unichar.
std::vector<int> state_; // Number of blobs in each unichar.
std::vector<float> certainties_; // Certainty of each unichar.
unsigned reserved_; // size of the above arrays
unsigned length_; // word length
// Factor that was used to adjust the rating.
float adjust_factor_;
// Rating is the sum of the ratings of the individual blobs in the word.
float rating_; // size related
// certainty is the min (worst) certainty of the individual blobs in the word.
float certainty_; // absolute
// xheight computed from the result, or 0 if inconsistent.
float min_x_height_;
float max_x_height_;
uint8_t permuter_; // permuter code
// Normally, the ratings_ matrix represents the recognition results in order
// from left-to-right. However, some engines (say Cube) may return
// recognition results in the order of the script's major reading direction
// (for Arabic, that is right-to-left).
bool unichars_in_script_order_;
// True if NoDangerousAmbig found an ambiguity.
bool dangerous_ambig_found_;
// The following variables are populated and passed by reference any
// time unichar_string() or unichar_lengths() are called.
mutable std::string unichar_string_;
mutable std::string unichar_lengths_;
};
// Make WERD_CHOICE listable.
ELISTIZEH(WERD_CHOICE)
using BLOB_CHOICE_LIST_VECTOR = std::vector<BLOB_CHOICE_LIST *>;
// Utilities for comparing WERD_CHOICEs
bool EqualIgnoringCaseAndTerminalPunct(const WERD_CHOICE &word1, const WERD_CHOICE &word2);
// Utilities for debug printing.
void print_ratings_list(const char *msg, // intro message
BLOB_CHOICE_LIST *ratings, // list of results
const UNICHARSET ¤t_unicharset // unicharset that can be used
// for id-to-unichar conversion
);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/ratngs.h
|
C++
|
apache-2.0
| 22,751
|
/**********************************************************************
* File: rect.cpp (Formerly box.c)
* Description: Bounding box class definition.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "rect.h"
#include "serialis.h" // for TFile
namespace tesseract {
/**********************************************************************
* TBOX::TBOX() Constructor from 2 ICOORDS
*
**********************************************************************/
TBOX::TBOX( // constructor
const ICOORD pt1, // one corner
const ICOORD pt2 // the other corner
) {
if (pt1.x() <= pt2.x()) {
if (pt1.y() <= pt2.y()) {
bot_left = pt1;
top_right = pt2;
} else {
bot_left = ICOORD(pt1.x(), pt2.y());
top_right = ICOORD(pt2.x(), pt1.y());
}
} else {
if (pt1.y() <= pt2.y()) {
bot_left = ICOORD(pt2.x(), pt1.y());
top_right = ICOORD(pt1.x(), pt2.y());
} else {
bot_left = pt2;
top_right = pt1;
}
}
}
bool TBOX::DeSerialize(TFile *f) {
return bot_left.DeSerialize(f) && top_right.DeSerialize(f);
}
bool TBOX::Serialize(TFile *f) const {
return bot_left.Serialize(f) && top_right.Serialize(f);
}
// rotate_large constructs the containing bounding box of all 4
// corners after rotating them. It therefore guarantees that all
// original content is contained within, but also slightly enlarges the box.
void TBOX::rotate_large(const FCOORD &vec) {
ICOORD top_left(bot_left.x(), top_right.y());
ICOORD bottom_right(top_right.x(), bot_left.y());
top_left.rotate(vec);
bottom_right.rotate(vec);
rotate(vec);
TBOX box2(top_left, bottom_right);
*this += box2;
}
/**********************************************************************
* TBOX::intersection() Build the largest box contained in both boxes
*
**********************************************************************/
TBOX TBOX::intersection( // shared area box
const TBOX &box) const {
TDimension left;
TDimension bottom;
TDimension right;
TDimension top;
if (overlap(box)) {
if (box.bot_left.x() > bot_left.x()) {
left = box.bot_left.x();
} else {
left = bot_left.x();
}
if (box.top_right.x() < top_right.x()) {
right = box.top_right.x();
} else {
right = top_right.x();
}
if (box.bot_left.y() > bot_left.y()) {
bottom = box.bot_left.y();
} else {
bottom = bot_left.y();
}
if (box.top_right.y() < top_right.y()) {
top = box.top_right.y();
} else {
top = top_right.y();
}
} else {
left = INT16_MAX;
bottom = INT16_MAX;
top = -INT16_MAX;
right = -INT16_MAX;
}
return TBOX(left, bottom, right, top);
}
/**********************************************************************
* TBOX::bounding_union() Build the smallest box containing both boxes
*
**********************************************************************/
TBOX TBOX::bounding_union( // box enclosing both
const TBOX &box) const {
ICOORD bl; // bottom left
ICOORD tr; // top right
if (box.bot_left.x() < bot_left.x()) {
bl.set_x(box.bot_left.x());
} else {
bl.set_x(bot_left.x());
}
if (box.top_right.x() > top_right.x()) {
tr.set_x(box.top_right.x());
} else {
tr.set_x(top_right.x());
}
if (box.bot_left.y() < bot_left.y()) {
bl.set_y(box.bot_left.y());
} else {
bl.set_y(bot_left.y());
}
if (box.top_right.y() > top_right.y()) {
tr.set_y(box.top_right.y());
} else {
tr.set_y(top_right.y());
}
return TBOX(bl, tr);
}
/**********************************************************************
* TBOX::plot() Paint a box using specified settings
*
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void TBOX::plot( // paint box
ScrollView *fd, // where to paint
ScrollView::Color fill_colour, // colour for inside
ScrollView::Color border_colour // colour for border
) const {
fd->Brush(fill_colour);
fd->Pen(border_colour);
plot(fd);
}
#endif
// Appends the bounding box as (%d,%d)->(%d,%d) to a string.
void TBOX::print_to_str(std::string &str) const {
// "(%d,%d)->(%d,%d)", left(), bottom(), right(), top()
str += "(" + std::to_string(left());
str += "," + std::to_string(bottom());
str += ")->(" + std::to_string(right());
str += "," + std::to_string(top());
str += ')';
}
// Writes to the given file. Returns false in case of error.
bool TBOX::Serialize(FILE *fp) const {
if (!bot_left.Serialize(fp)) {
return false;
}
if (!top_right.Serialize(fp)) {
return false;
}
return true;
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool TBOX::DeSerialize(bool swap, FILE *fp) {
if (!bot_left.DeSerialize(swap, fp)) {
return false;
}
if (!top_right.DeSerialize(swap, fp)) {
return false;
}
return true;
}
/**********************************************************************
* operator+=
*
* Extend one box to include the other (In place union)
**********************************************************************/
TBOX &operator+=( // bounding bounding bx
TBOX &op1, // operands
const TBOX &op2) {
if (op2.bot_left.x() < op1.bot_left.x()) {
op1.bot_left.set_x(op2.bot_left.x());
}
if (op2.top_right.x() > op1.top_right.x()) {
op1.top_right.set_x(op2.top_right.x());
}
if (op2.bot_left.y() < op1.bot_left.y()) {
op1.bot_left.set_y(op2.bot_left.y());
}
if (op2.top_right.y() > op1.top_right.y()) {
op1.top_right.set_y(op2.top_right.y());
}
return op1;
}
/**********************************************************************
* operator&=
*
* Reduce one box to intersection with the other (In place intersection)
**********************************************************************/
TBOX &operator&=(TBOX &op1, const TBOX &op2) {
if (op1.overlap(op2)) {
if (op2.bot_left.x() > op1.bot_left.x()) {
op1.bot_left.set_x(op2.bot_left.x());
}
if (op2.top_right.x() < op1.top_right.x()) {
op1.top_right.set_x(op2.top_right.x());
}
if (op2.bot_left.y() > op1.bot_left.y()) {
op1.bot_left.set_y(op2.bot_left.y());
}
if (op2.top_right.y() < op1.top_right.y()) {
op1.top_right.set_y(op2.top_right.y());
}
} else {
op1.bot_left.set_x(INT16_MAX);
op1.bot_left.set_y(INT16_MAX);
op1.top_right.set_x(-INT16_MAX);
op1.top_right.set_y(-INT16_MAX);
}
return op1;
}
bool TBOX::x_almost_equal(const TBOX &box, int tolerance) const {
return (abs(left() - box.left()) <= tolerance && abs(right() - box.right()) <= tolerance);
}
bool TBOX::almost_equal(const TBOX &box, int tolerance) const {
return (abs(left() - box.left()) <= tolerance && abs(right() - box.right()) <= tolerance &&
abs(top() - box.top()) <= tolerance && abs(bottom() - box.bottom()) <= tolerance);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/rect.cpp
|
C++
|
apache-2.0
| 7,827
|
/**********************************************************************
* File: rect.h (Formerly box.h)
* Description: Bounding box class definition.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef RECT_H
#define RECT_H
#include "points.h" // for ICOORD, FCOORD
#include "scrollview.h" // for ScrollView, ScrollView::Color
#include "tesstypes.h" // for TDimension
#include "tprintf.h" // for tprintf
#include <tesseract/export.h> // for DLLSYM
#include <algorithm> // for std::max, std::min
#include <cmath> // for std::ceil, std::floor
#include <cstdint> // for INT16_MAX
#include <cstdio> // for FILE
#include <string> // for std::string
namespace tesseract {
class TESS_API TBOX { // bounding box
public:
TBOX()
: // empty constructor making a null box
bot_left(INT16_MAX, INT16_MAX)
, top_right(-INT16_MAX, -INT16_MAX) {}
TBOX( // constructor
const ICOORD pt1, // one corner
const ICOORD pt2); // the other corner
//*********************************************************************
// TBOX::TBOX() Constructor from 4 integer values.
// Note: It is caller's responsibility to provide values
// in the right order.
//*********************************************************************
TBOX( // constructor
TDimension left, TDimension bottom, TDimension right, TDimension top)
: bot_left(left, bottom), top_right(right, top) {}
TBOX( // box around FCOORD
const FCOORD pt);
bool null_box() const { // Is box null
return ((left() >= right()) || (top() <= bottom()));
}
bool operator==(const TBOX &other) const {
return bot_left == other.bot_left && top_right == other.top_right;
}
TDimension top() const { // coord of top
return top_right.y();
}
void set_top(int y) {
top_right.set_y(y);
}
TDimension bottom() const { // coord of bottom
return bot_left.y();
}
void set_bottom(int y) {
bot_left.set_y(y);
}
TDimension left() const { // coord of left
return bot_left.x();
}
void set_left(int x) {
bot_left.set_x(x);
}
TDimension right() const { // coord of right
return top_right.x();
}
void set_right(int x) {
top_right.set_x(x);
}
int x_middle() const {
return (bot_left.x() + top_right.x()) / 2;
}
int y_middle() const {
return (bot_left.y() + top_right.y()) / 2;
}
const ICOORD &botleft() const { // access function
return bot_left;
}
ICOORD botright() const { // ~ access function
return ICOORD(top_right.x(), bot_left.y());
}
ICOORD topleft() const { // ~ access function
return ICOORD(bot_left.x(), top_right.y());
}
const ICOORD &topright() const { // access function
return top_right;
}
TDimension height() const { // how high is it?
if (!null_box()) {
return top_right.y() - bot_left.y();
} else {
return 0;
}
}
TDimension width() const { // how high is it?
if (!null_box()) {
return top_right.x() - bot_left.x();
} else {
return 0;
}
}
int32_t area() const { // what is the area?
if (!null_box()) {
return width() * height();
} else {
return 0;
}
}
// Pads the box on either side by the supplied x,y pad amounts.
// NO checks for exceeding any bounds like 0 or an image size.
void pad(int xpad, int ypad) {
ICOORD pad(xpad, ypad);
bot_left -= pad;
top_right += pad;
}
void move_bottom_edge( // move one edge
const TDimension y) { // by +/- y
bot_left += ICOORD(0, y);
}
void move_left_edge( // move one edge
const TDimension x) { // by +/- x
bot_left += ICOORD(x, 0);
}
void move_right_edge( // move one edge
const TDimension x) { // by +/- x
top_right += ICOORD(x, 0);
}
void move_top_edge( // move one edge
const TDimension y) { // by +/- y
top_right += ICOORD(0, y);
}
void move( // move box
const ICOORD vec) { // by vector
bot_left += vec;
top_right += vec;
}
void move( // move box
const FCOORD vec) { // by float vector
bot_left.set_x(static_cast<TDimension>(std::floor(bot_left.x() + vec.x())));
// round left
bot_left.set_y(static_cast<TDimension>(std::floor(bot_left.y() + vec.y())));
// round down
top_right.set_x(static_cast<TDimension>(std::ceil(top_right.x() + vec.x())));
// round right
top_right.set_y(static_cast<TDimension>(std::ceil(top_right.y() + vec.y())));
// round up
}
void scale( // scale box
const float f) { // by multiplier
// round left
bot_left.set_x(static_cast<TDimension>(std::floor(bot_left.x() * f)));
// round down
bot_left.set_y(static_cast<TDimension>(std::floor(bot_left.y() * f)));
// round right
top_right.set_x(static_cast<TDimension>(std::ceil(top_right.x() * f)));
// round up
top_right.set_y(static_cast<TDimension>(std::ceil(top_right.y() * f)));
}
void scale( // scale box
const FCOORD vec) { // by float vector
bot_left.set_x(static_cast<TDimension>(std::floor(bot_left.x() * vec.x())));
bot_left.set_y(static_cast<TDimension>(std::floor(bot_left.y() * vec.y())));
top_right.set_x(static_cast<TDimension>(std::ceil(top_right.x() * vec.x())));
top_right.set_y(static_cast<TDimension>(std::ceil(top_right.y() * vec.y())));
}
// rotate doesn't enlarge the box - it just rotates the bottom-left
// and top-right corners. Use rotate_large if you want to guarantee
// that all content is contained within the rotated box.
void rotate(const FCOORD &vec) { // by vector
bot_left.rotate(vec);
top_right.rotate(vec);
*this = TBOX(bot_left, top_right);
}
// rotate_large constructs the containing bounding box of all 4
// corners after rotating them. It therefore guarantees that all
// original content is contained within, but also slightly enlarges the box.
void rotate_large(const FCOORD &vec);
bool contains( // is pt inside box
const FCOORD pt) const;
bool contains( // is box inside box
const TBOX &box) const;
bool overlap( // do boxes overlap
const TBOX &box) const;
bool major_overlap( // do boxes overlap more than half
const TBOX &box) const;
// Do boxes overlap on x axis.
bool x_overlap(const TBOX &box) const;
// Return the horizontal gap between the boxes. If the boxes
// overlap horizontally then the return value is negative, indicating
// the amount of the overlap.
int x_gap(const TBOX &box) const {
return std::max(bot_left.x(), box.bot_left.x()) - std::min(top_right.x(), box.top_right.x());
}
// Return the vertical gap between the boxes. If the boxes
// overlap vertically then the return value is negative, indicating
// the amount of the overlap.
int y_gap(const TBOX &box) const {
return std::max(bot_left.y(), box.bot_left.y()) - std::min(top_right.y(), box.top_right.y());
}
// Do boxes overlap on x axis by more than
// half of the width of the narrower box.
bool major_x_overlap(const TBOX &box) const;
// Do boxes overlap on y axis.
bool y_overlap(const TBOX &box) const;
// Do boxes overlap on y axis by more than
// half of the height of the shorter box.
bool major_y_overlap(const TBOX &box) const;
// fraction of current box's area covered by other
double overlap_fraction(const TBOX &box) const;
// fraction of the current box's projected area covered by the other's
double x_overlap_fraction(const TBOX &box) const;
// fraction of the current box's projected area covered by the other's
double y_overlap_fraction(const TBOX &box) const;
// Returns true if the boxes are almost equal on x axis.
bool x_almost_equal(const TBOX &box, int tolerance) const;
// Returns true if the boxes are almost equal
bool almost_equal(const TBOX &box, int tolerance) const;
TBOX intersection( // shared area box
const TBOX &box) const;
TBOX bounding_union( // box enclosing both
const TBOX &box) const;
// Sets the box boundaries to the given coordinates.
void set_to_given_coords(int x_min, int y_min, int x_max, int y_max) {
bot_left.set_x(x_min);
bot_left.set_y(y_min);
top_right.set_x(x_max);
top_right.set_y(y_max);
}
void print() const { // print
tprintf("Bounding box=(%d,%d)->(%d,%d)\n", left(), bottom(), right(), top());
}
// Appends the bounding box as (%d,%d)->(%d,%d) to a string.
void print_to_str(std::string &str) const;
#ifndef GRAPHICS_DISABLED
void plot( // use current settings
ScrollView *fd) const { // where to paint
fd->Rectangle(bot_left.x(), bot_left.y(), top_right.x(), top_right.y());
}
void plot( // paint box
ScrollView *fd, // where to paint
ScrollView::Color fill_colour, // colour for inside
ScrollView::Color border_colour) const; // colour for border
#endif
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
bool Serialize(TFile *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
bool DeSerialize(TFile *fp);
friend TBOX &operator+=(TBOX &, const TBOX &);
// in place union
friend TBOX &operator&=(TBOX &, const TBOX &);
// in place intersection
private:
ICOORD bot_left; // bottom left corner
ICOORD top_right; // top right corner
};
/**********************************************************************
* TBOX::TBOX() Constructor from 1 FCOORD
*
**********************************************************************/
inline TBOX::TBOX( // constructor
const FCOORD pt // floating centre
) {
bot_left =
ICOORD(static_cast<TDimension>(std::floor(pt.x())), static_cast<TDimension>(std::floor(pt.y())));
top_right =
ICOORD(static_cast<TDimension>(std::ceil(pt.x())), static_cast<TDimension>(std::ceil(pt.y())));
}
/**********************************************************************
* TBOX::contains() Is point within box
*
**********************************************************************/
inline bool TBOX::contains(const FCOORD pt) const {
return ((pt.x() >= bot_left.x()) && (pt.x() <= top_right.x()) && (pt.y() >= bot_left.y()) &&
(pt.y() <= top_right.y()));
}
/**********************************************************************
* TBOX::contains() Is box within box
*
**********************************************************************/
inline bool TBOX::contains(const TBOX &box) const {
return (contains(box.bot_left) && contains(box.top_right));
}
/**********************************************************************
* TBOX::overlap() Do two boxes overlap?
*
**********************************************************************/
inline bool TBOX::overlap( // do boxes overlap
const TBOX &box) const {
return ((box.bot_left.x() <= top_right.x()) && (box.top_right.x() >= bot_left.x()) &&
(box.bot_left.y() <= top_right.y()) && (box.top_right.y() >= bot_left.y()));
}
/**********************************************************************
* TBOX::major_overlap() Do two boxes overlap by at least half of the smallest?
*
**********************************************************************/
inline bool TBOX::major_overlap( // Do boxes overlap more that half.
const TBOX &box) const {
int overlap = std::min(box.top_right.x(), top_right.x());
overlap -= std::max(box.bot_left.x(), bot_left.x());
overlap += overlap;
if (overlap < std::min(box.width(), width())) {
return false;
}
overlap = std::min(box.top_right.y(), top_right.y());
overlap -= std::max(box.bot_left.y(), bot_left.y());
overlap += overlap;
if (overlap < std::min(box.height(), height())) {
return false;
}
return true;
}
/**********************************************************************
* TBOX::overlap_fraction() Fraction of area covered by the other box
*
**********************************************************************/
inline double TBOX::overlap_fraction(const TBOX &box) const {
double fraction = 0.0;
if (this->area()) {
fraction = this->intersection(box).area() * 1.0 / this->area();
}
return fraction;
}
/**********************************************************************
* TBOX::x_overlap() Do two boxes overlap on x-axis
*
**********************************************************************/
inline bool TBOX::x_overlap(const TBOX &box) const {
return ((box.bot_left.x() <= top_right.x()) && (box.top_right.x() >= bot_left.x()));
}
/**********************************************************************
* TBOX::major_x_overlap() Do two boxes overlap by more than half the
* width of the narrower box on the x-axis
*
**********************************************************************/
inline bool TBOX::major_x_overlap(const TBOX &box) const {
TDimension overlap = box.width();
if (this->left() > box.left()) {
overlap -= this->left() - box.left();
}
if (this->right() < box.right()) {
overlap -= box.right() - this->right();
}
return (overlap >= box.width() / 2 || overlap >= this->width() / 2);
}
/**********************************************************************
* TBOX::y_overlap() Do two boxes overlap on y-axis
*
**********************************************************************/
inline bool TBOX::y_overlap(const TBOX &box) const {
return ((box.bot_left.y() <= top_right.y()) && (box.top_right.y() >= bot_left.y()));
}
/**********************************************************************
* TBOX::major_y_overlap() Do two boxes overlap by more than half the
* height of the shorter box on the y-axis
*
**********************************************************************/
inline bool TBOX::major_y_overlap(const TBOX &box) const {
TDimension overlap = box.height();
if (this->bottom() > box.bottom()) {
overlap -= this->bottom() - box.bottom();
}
if (this->top() < box.top()) {
overlap -= box.top() - this->top();
}
return (overlap >= box.height() / 2 || overlap >= this->height() / 2);
}
/**********************************************************************
* TBOX::x_overlap_fraction() Calculates the horizontal overlap of the
* given boxes as a fraction of this boxes
* width.
*
**********************************************************************/
inline double TBOX::x_overlap_fraction(const TBOX &other) const {
int low = std::max(left(), other.left());
int high = std::min(right(), other.right());
int width = right() - left();
if (width == 0) {
int x = left();
if (other.left() <= x && x <= other.right()) {
return 1.0;
} else {
return 0.0;
}
} else {
return std::max(0.0, static_cast<double>(high - low) / width);
}
}
/**********************************************************************
* TBOX::y_overlap_fraction() Calculates the vertical overlap of the
* given boxes as a fraction of this boxes
* height.
*
**********************************************************************/
inline double TBOX::y_overlap_fraction(const TBOX &other) const {
int low = std::max(bottom(), other.bottom());
int high = std::min(top(), other.top());
int height = top() - bottom();
if (height == 0) {
int y = bottom();
if (other.bottom() <= y && y <= other.top()) {
return 1.0;
} else {
return 0.0;
}
} else {
return std::max(0.0, static_cast<double>(high - low) / height);
}
}
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/rect.h
|
C++
|
apache-2.0
| 16,539
|
/**********************************************************************
* File: rejctmap.cpp (Formerly rejmap.c)
* Description: REJ and REJMAP class functions.
* Author: Phil Cheatle
*
* (C) Copyright 1994, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "rejctmap.h"
#include <memory>
#include "params.h"
namespace tesseract {
void REJ::full_print(FILE *fp) const {
fprintf(fp, "R_TESS_FAILURE: %s\n", flag(R_TESS_FAILURE) ? "T" : "F");
fprintf(fp, "R_SMALL_XHT: %s\n", flag(R_SMALL_XHT) ? "T" : "F");
fprintf(fp, "R_EDGE_CHAR: %s\n", flag(R_EDGE_CHAR) ? "T" : "F");
fprintf(fp, "R_1IL_CONFLICT: %s\n", flag(R_1IL_CONFLICT) ? "T" : "F");
fprintf(fp, "R_POSTNN_1IL: %s\n", flag(R_POSTNN_1IL) ? "T" : "F");
fprintf(fp, "R_REJ_CBLOB: %s\n", flag(R_REJ_CBLOB) ? "T" : "F");
fprintf(fp, "R_MM_REJECT: %s\n", flag(R_MM_REJECT) ? "T" : "F");
fprintf(fp, "R_BAD_REPETITION: %s\n", flag(R_BAD_REPETITION) ? "T" : "F");
fprintf(fp, "R_POOR_MATCH: %s\n", flag(R_POOR_MATCH) ? "T" : "F");
fprintf(fp, "R_NOT_TESS_ACCEPTED: %s\n",
flag(R_NOT_TESS_ACCEPTED) ? "T" : "F");
fprintf(fp, "R_CONTAINS_BLANKS: %s\n", flag(R_CONTAINS_BLANKS) ? "T" : "F");
fprintf(fp, "R_BAD_PERMUTER: %s\n", flag(R_BAD_PERMUTER) ? "T" : "F");
fprintf(fp, "R_HYPHEN: %s\n", flag(R_HYPHEN) ? "T" : "F");
fprintf(fp, "R_DUBIOUS: %s\n", flag(R_DUBIOUS) ? "T" : "F");
fprintf(fp, "R_NO_ALPHANUMS: %s\n", flag(R_NO_ALPHANUMS) ? "T" : "F");
fprintf(fp, "R_MOSTLY_REJ: %s\n", flag(R_MOSTLY_REJ) ? "T" : "F");
fprintf(fp, "R_XHT_FIXUP: %s\n", flag(R_XHT_FIXUP) ? "T" : "F");
fprintf(fp, "R_BAD_QUALITY: %s\n", flag(R_BAD_QUALITY) ? "T" : "F");
fprintf(fp, "R_DOC_REJ: %s\n", flag(R_DOC_REJ) ? "T" : "F");
fprintf(fp, "R_BLOCK_REJ: %s\n", flag(R_BLOCK_REJ) ? "T" : "F");
fprintf(fp, "R_ROW_REJ: %s\n", flag(R_ROW_REJ) ? "T" : "F");
fprintf(fp, "R_UNLV_REJ: %s\n", flag(R_UNLV_REJ) ? "T" : "F");
fprintf(fp, "R_HYPHEN_ACCEPT: %s\n", flag(R_HYPHEN_ACCEPT) ? "T" : "F");
fprintf(fp, "R_NN_ACCEPT: %s\n", flag(R_NN_ACCEPT) ? "T" : "F");
fprintf(fp, "R_MM_ACCEPT: %s\n", flag(R_MM_ACCEPT) ? "T" : "F");
fprintf(fp, "R_QUALITY_ACCEPT: %s\n", flag(R_QUALITY_ACCEPT) ? "T" : "F");
fprintf(fp, "R_MINIMAL_REJ_ACCEPT: %s\n",
flag(R_MINIMAL_REJ_ACCEPT) ? "T" : "F");
}
REJMAP &REJMAP::operator=(const REJMAP &source) {
initialise(source.len);
for (unsigned i = 0; i < len; i++) {
ptr[i] = source.ptr[i];
}
return *this;
}
void REJMAP::initialise(uint16_t length) {
ptr = std::make_unique<REJ[]>(length);
len = length;
}
int16_t REJMAP::accept_count() const { // How many accepted?
int16_t count = 0;
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
count++;
}
}
return count;
}
bool REJMAP::recoverable_rejects() const { // Any non perm rejs?
for (unsigned i = 0; i < len; i++) {
if (ptr[i].recoverable()) {
return true;
}
}
return false;
}
bool REJMAP::quality_recoverable_rejects() const { // Any potential rejs?
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accept_if_good_quality()) {
return true;
}
}
return false;
}
void REJMAP::remove_pos( // Cut out an element
uint16_t pos // element to remove
) {
ASSERT_HOST(pos < len);
ASSERT_HOST(len > 0);
len--;
for (; pos < len; pos++) {
ptr[pos] = ptr[pos + 1];
}
}
void REJMAP::print(FILE *fp) const {
fputc('"', fp);
for (unsigned i = 0; i < len; i++) {
fputc( ptr[i].display_char(), fp);
}
fputc('"', fp);
}
void REJMAP::full_print(FILE *fp) const {
for (unsigned i = 0; i < len; i++) {
ptr[i].full_print(fp);
fprintf(fp, "\n");
}
}
void REJMAP::rej_word_small_xht() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
ptr[i].setrej_small_xht();
}
}
void REJMAP::rej_word_tess_failure() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
ptr[i].setrej_tess_failure();
}
}
void REJMAP::rej_word_not_tess_accepted() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_not_tess_accepted();
}
}
}
void REJMAP::rej_word_contains_blanks() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_contains_blanks();
}
}
}
void REJMAP::rej_word_bad_permuter() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_bad_permuter();
}
}
}
void REJMAP::rej_word_xht_fixup() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_xht_fixup();
}
}
}
void REJMAP::rej_word_no_alphanums() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_no_alphanums();
}
}
}
void REJMAP::rej_word_mostly_rej() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_mostly_rej();
}
}
}
void REJMAP::rej_word_bad_quality() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_bad_quality();
}
}
}
void REJMAP::rej_word_doc_rej() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_doc_rej();
}
}
}
void REJMAP::rej_word_block_rej() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_block_rej();
}
}
}
void REJMAP::rej_word_row_rej() { // Reject whole word
for (unsigned i = 0; i < len; i++) {
if (ptr[i].accepted()) {
ptr[i].setrej_row_rej();
}
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/rejctmap.cpp
|
C++
|
apache-2.0
| 6,338
|
/**********************************************************************
* File: rejctmap.h (Formerly rejmap.h)
* Description: REJ and REJMAP class functions.
* Author: Phil Cheatle
*
* (C) Copyright 1994, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
This module may look unnecessarily verbose, but here's the philosophy...
ALL processing of the reject map is done in this module. There are lots of
separate calls to set reject/accept flags. These have DELIBERATELY been kept
distinct so that this module can decide what to do.
Basically, there is a flag for each sort of rejection or acceptance. This
provides a history of what has happened to EACH character.
Determining whether a character is CURRENTLY rejected depends on implicit
understanding of the SEQUENCE of possible calls. The flags are defined and
grouped in the REJ_FLAGS enum. These groupings are used in determining a
characters CURRENT rejection status. Basically, a character is ACCEPTED if
none of the permanent rej flags are set
AND ( the character has never been rejected
OR an accept flag is set which is LATER than the latest reject flag )
IT IS FUNDAMENTAL THAT ANYONE HACKING THIS CODE UNDERSTANDS THE SIGNIFICANCE
OF THIS IMPLIED TEMPORAL ORDERING OF THE FLAGS!!!!
**********************************************************************/
#ifndef REJCTMAP_H
#define REJCTMAP_H
#include "errcode.h"
#include "params.h"
#include <bitset>
#include <memory>
namespace tesseract {
enum REJ_FLAGS {
/* Reject modes which are NEVER overridden */
R_TESS_FAILURE, // PERM Tess didn't classify
R_SMALL_XHT, // PERM Xht too small
R_EDGE_CHAR, // PERM Too close to edge of image
R_1IL_CONFLICT, // PERM 1Il confusion
R_POSTNN_1IL, // PERM 1Il unrejected by NN
R_REJ_CBLOB, // PERM Odd blob
R_MM_REJECT, // PERM Matrix match rejection (m's)
R_BAD_REPETITION, // TEMP Repeated char which doesn't match trend
/* Initial reject modes (pre NN_ACCEPT) */
R_POOR_MATCH, // TEMP Ray's original heuristic (Not used)
R_NOT_TESS_ACCEPTED, // TEMP Tess didn't accept WERD
R_CONTAINS_BLANKS, // TEMP Tess failed on other chs in WERD
R_BAD_PERMUTER, // POTENTIAL Bad permuter for WERD
/* Reject modes generated after NN_ACCEPT but before MM_ACCEPT */
R_HYPHEN, // TEMP Post NN dodgy hyphen or full stop
R_DUBIOUS, // TEMP Post NN dodgy chars
R_NO_ALPHANUMS, // TEMP No alphanumerics in word after NN
R_MOSTLY_REJ, // TEMP Most of word rejected so rej the rest
R_XHT_FIXUP, // TEMP Xht tests unsure
/* Reject modes generated after MM_ACCEPT but before QUALITY_ACCEPT */
R_BAD_QUALITY, // TEMP Quality metrics bad for WERD
/* Reject modes generated after QUALITY_ACCEPT but before MINIMAL_REJ accep*/
R_DOC_REJ, // TEMP Document rejection
R_BLOCK_REJ, // TEMP Block rejection
R_ROW_REJ, // TEMP Row rejection
R_UNLV_REJ, // TEMP ~ turned to - or ^ turned to space
/* Accept modes which occur between the above rejection groups */
R_NN_ACCEPT, // NN acceptance
R_HYPHEN_ACCEPT, // Hyphen acceptance
R_MM_ACCEPT, // Matrix match acceptance
R_QUALITY_ACCEPT, // Accept word in good quality doc
R_MINIMAL_REJ_ACCEPT // Accept EVERYTHING except tess failures
};
/* REJECT MAP VALUES */
#define MAP_ACCEPT '1'
#define MAP_REJECT_PERM '0'
#define MAP_REJECT_TEMP '2'
#define MAP_REJECT_POTENTIAL '3'
class REJ {
std::bitset<32> flags;
void set_flag(REJ_FLAGS rej_flag) {
flags.set(rej_flag);
}
public:
REJ() = default;
REJ( // classwise copy
const REJ &source) {
flags = source.flags;
}
REJ &operator=( // assign REJ
const REJ &source) = default;
bool flag(REJ_FLAGS rej_flag) const {
return flags[rej_flag];
}
char display_char() const {
if (perm_rejected()) {
return MAP_REJECT_PERM;
} else if (accept_if_good_quality()) {
return MAP_REJECT_POTENTIAL;
} else if (rejected()) {
return MAP_REJECT_TEMP;
} else {
return MAP_ACCEPT;
}
}
bool perm_rejected() const { // Is char perm reject?
return (flag(R_TESS_FAILURE) || flag(R_SMALL_XHT) || flag(R_EDGE_CHAR) ||
flag(R_1IL_CONFLICT) || flag(R_POSTNN_1IL) || flag(R_REJ_CBLOB) ||
flag(R_BAD_REPETITION) || flag(R_MM_REJECT));
}
private:
bool rej_before_nn_accept() const {
return flag(R_POOR_MATCH) || flag(R_NOT_TESS_ACCEPTED) ||
flag(R_CONTAINS_BLANKS) || flag(R_BAD_PERMUTER);
}
bool rej_between_nn_and_mm() const {
return flag(R_HYPHEN) || flag(R_DUBIOUS) || flag(R_NO_ALPHANUMS) ||
flag(R_MOSTLY_REJ) || flag(R_XHT_FIXUP);
}
bool rej_between_mm_and_quality_accept() const {
return flag(R_BAD_QUALITY);
}
bool rej_between_quality_and_minimal_rej_accept() const {
return flag(R_DOC_REJ) || flag(R_BLOCK_REJ) || flag(R_ROW_REJ) ||
flag(R_UNLV_REJ);
}
bool rej_before_mm_accept() const {
return rej_between_nn_and_mm() ||
(rej_before_nn_accept() && !flag(R_NN_ACCEPT) &&
!flag(R_HYPHEN_ACCEPT));
}
bool rej_before_quality_accept() const {
return rej_between_mm_and_quality_accept() ||
(!flag(R_MM_ACCEPT) && rej_before_mm_accept());
}
public:
bool rejected() const { // Is char rejected?
if (flag(R_MINIMAL_REJ_ACCEPT)) {
return false;
} else {
return (perm_rejected() || rej_between_quality_and_minimal_rej_accept() ||
(!flag(R_QUALITY_ACCEPT) && rej_before_quality_accept()));
}
}
bool accept_if_good_quality() const { // potential rej?
return (rejected() && !perm_rejected() && flag(R_BAD_PERMUTER) &&
!flag(R_POOR_MATCH) && !flag(R_NOT_TESS_ACCEPTED) &&
!flag(R_CONTAINS_BLANKS) &&
(!rej_between_nn_and_mm() && !rej_between_mm_and_quality_accept() &&
!rej_between_quality_and_minimal_rej_accept()));
}
void setrej_tess_failure() { // Tess generated blank
set_flag(R_TESS_FAILURE);
}
void setrej_small_xht() { // Small xht char/wd
set_flag(R_SMALL_XHT);
}
void setrej_edge_char() { // Close to image edge
set_flag(R_EDGE_CHAR);
}
void setrej_1Il_conflict() { // Initial reject map
set_flag(R_1IL_CONFLICT);
}
void setrej_postNN_1Il() { // 1Il after NN
set_flag(R_POSTNN_1IL);
}
void setrej_rej_cblob() { // Insert duff blob
set_flag(R_REJ_CBLOB);
}
void setrej_mm_reject() { // Matrix matcher
set_flag(R_MM_REJECT);
}
void setrej_bad_repetition() { // Odd repeated char
set_flag(R_BAD_REPETITION);
}
void setrej_poor_match() { // Failed Rays heuristic
set_flag(R_POOR_MATCH);
}
void setrej_not_tess_accepted() {
// TEMP reject_word
set_flag(R_NOT_TESS_ACCEPTED);
}
void setrej_contains_blanks() {
// TEMP reject_word
set_flag(R_CONTAINS_BLANKS);
}
void setrej_bad_permuter() { // POTENTIAL reject_word
set_flag(R_BAD_PERMUTER);
}
void setrej_hyphen() { // PostNN dubious hyphen or .
set_flag(R_HYPHEN);
}
void setrej_dubious() { // PostNN dubious limit
set_flag(R_DUBIOUS);
}
void setrej_no_alphanums() { // TEMP reject_word
set_flag(R_NO_ALPHANUMS);
}
void setrej_mostly_rej() { // TEMP reject_word
set_flag(R_MOSTLY_REJ);
}
void setrej_xht_fixup() { // xht fixup
set_flag(R_XHT_FIXUP);
}
void setrej_bad_quality() { // TEMP reject_word
set_flag(R_BAD_QUALITY);
}
void setrej_doc_rej() { // TEMP reject_word
set_flag(R_DOC_REJ);
}
void setrej_block_rej() { // TEMP reject_word
set_flag(R_BLOCK_REJ);
}
void setrej_row_rej() { // TEMP reject_word
set_flag(R_ROW_REJ);
}
void setrej_unlv_rej() { // TEMP reject_word
set_flag(R_UNLV_REJ);
}
void setrej_hyphen_accept() { // NN Flipped a char
set_flag(R_HYPHEN_ACCEPT);
}
void setrej_nn_accept() { // NN Flipped a char
set_flag(R_NN_ACCEPT);
}
void setrej_mm_accept() { // Matrix matcher
set_flag(R_MM_ACCEPT);
}
void setrej_quality_accept() { // Quality flip a char
set_flag(R_QUALITY_ACCEPT);
}
void setrej_minimal_rej_accept() {
// Accept all except blank
set_flag(R_MINIMAL_REJ_ACCEPT);
}
bool accepted() const { // Is char accepted?
return !rejected();
}
bool recoverable() const {
return (rejected() && !perm_rejected());
}
void full_print(FILE *fp) const;
};
class REJMAP {
std::unique_ptr<REJ[]> ptr; // ptr to the chars
uint16_t len = 0; // Number of chars
public:
REJMAP() = default;
REJMAP(const REJMAP &rejmap) {
*this = rejmap;
}
REJMAP &operator=(const REJMAP &source);
// Sets up the ptr array to length, whatever it was before.
void initialise(uint16_t length);
REJ &operator[]( // access function
uint16_t index) const // map index
{
ASSERT_HOST(index < len);
return ptr[index]; // no bounds checks
}
uint16_t length() const { // map length
return len;
}
int16_t accept_count() const; // How many accepted?
int16_t reject_count() const { // How many rejects?
return len - accept_count();
}
// Cut out an element.
void remove_pos(uint16_t pos);
void print(FILE *fp) const;
void full_print(FILE *fp) const;
bool recoverable_rejects() const; // Any non perm rejs?
bool quality_recoverable_rejects() const;
// Any potential rejs?
void rej_word_small_xht(); // Reject whole word
// Reject whole word
void rej_word_tess_failure();
void rej_word_not_tess_accepted();
// Reject whole word
// Reject whole word
void rej_word_contains_blanks();
// Reject whole word
void rej_word_bad_permuter();
void rej_word_xht_fixup(); // Reject whole word
// Reject whole word
void rej_word_no_alphanums();
void rej_word_mostly_rej(); // Reject whole word
void rej_word_bad_quality(); // Reject whole word
void rej_word_doc_rej(); // Reject whole word
void rej_word_block_rej(); // Reject whole word
void rej_word_row_rej(); // Reject whole word
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/rejctmap.h
|
C++
|
apache-2.0
| 10,681
|
/******************************************************************************
*
* File: seam.cpp (Formerly seam.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
/*----------------------------------------------------------------------
I n c l u d e s
----------------------------------------------------------------------*/
#include "seam.h"
#include "blobs.h"
#include "tprintf.h"
namespace tesseract {
/*----------------------------------------------------------------------
Public Function Code
----------------------------------------------------------------------*/
// Returns the bounding box of all the points in the seam.
TBOX SEAM::bounding_box() const {
TBOX box(location_.x, location_.y, location_.x, location_.y);
for (int s = 0; s < num_splits_; ++s) {
box += splits_[s].bounding_box();
}
return box;
}
// Returns true if the splits in *this SEAM appear OK in the sense that they
// do not cross any outlines and do not chop off any ridiculously small
// pieces.
bool SEAM::IsHealthy(const TBLOB &blob, int min_points, int min_area) const {
// TODO(rays) Try testing all the splits. Duplicating original code for now,
// which tested only the first.
return num_splits_ == 0 || splits_[0].IsHealthy(blob, min_points, min_area);
}
// Computes the widthp_/widthn_ range for all existing SEAMs and for *this
// seam, which is about to be inserted at insert_index. Returns false if
// any of the computations fails, as this indicates an invalid chop.
// widthn_/widthp_ are only changed if modify is true.
bool SEAM::PrepareToInsertSeam(const std::vector<SEAM *> &seams,
const std::vector<TBLOB *> &blobs, int insert_index, bool modify) {
for (int s = 0; s < insert_index; ++s) {
if (!seams[s]->FindBlobWidth(blobs, s, modify)) {
return false;
}
}
if (!FindBlobWidth(blobs, insert_index, modify)) {
return false;
}
for (unsigned s = insert_index; s < seams.size(); ++s) {
if (!seams[s]->FindBlobWidth(blobs, s + 1, modify)) {
return false;
}
}
return true;
}
// Computes the widthp_/widthn_ range. Returns false if not all the splits
// are accounted for. widthn_/widthp_ are only changed if modify is true.
bool SEAM::FindBlobWidth(const std::vector<TBLOB *> &blobs, int index, bool modify) {
int num_found = 0;
if (modify) {
widthp_ = 0;
widthn_ = 0;
}
for (int s = 0; s < num_splits_; ++s) {
const SPLIT &split = splits_[s];
bool found_split = split.ContainedByBlob(*blobs[index]);
// Look right.
for (unsigned b = index + 1; !found_split && b < blobs.size(); ++b) {
found_split = split.ContainedByBlob(*blobs[b]);
if (found_split && b - index > widthp_ && modify) {
widthp_ = b - index;
}
}
// Look left.
for (int b = index - 1; !found_split && b >= 0; --b) {
found_split = split.ContainedByBlob(*blobs[b]);
if (found_split && index - b > widthn_ && modify) {
widthn_ = index - b;
}
}
if (found_split) {
++num_found;
}
}
return num_found == num_splits_;
}
// Splits this blob into two blobs by applying the splits included in
// *this SEAM
void SEAM::ApplySeam(bool italic_blob, TBLOB *blob, TBLOB *other_blob) const {
for (int s = 0; s < num_splits_; ++s) {
splits_[s].SplitOutlineList(blob->outlines);
}
blob->ComputeBoundingBoxes();
divide_blobs(blob, other_blob, italic_blob, location_);
blob->EliminateDuplicateOutlines();
other_blob->EliminateDuplicateOutlines();
blob->CorrectBlobOrder(other_blob);
}
// Undoes ApplySeam by removing the seam between these two blobs.
// Produces one blob as a result, and deletes other_blob.
void SEAM::UndoSeam(TBLOB *blob, TBLOB *other_blob) const {
if (blob->outlines == nullptr) {
blob->outlines = other_blob->outlines;
other_blob->outlines = nullptr;
}
TESSLINE *outline = blob->outlines;
while (outline->next) {
outline = outline->next;
}
outline->next = other_blob->outlines;
other_blob->outlines = nullptr;
delete other_blob;
for (int s = 0; s < num_splits_; ++s) {
splits_[s].UnsplitOutlineList(blob);
}
blob->ComputeBoundingBoxes();
blob->EliminateDuplicateOutlines();
}
// Prints everything in *this SEAM.
void SEAM::Print(const char *label) const {
tprintf("%s", label);
tprintf(" %6.2f @ (%d,%d), p=%u, n=%u ", priority_, location_.x, location_.y, widthp_, widthn_);
for (int s = 0; s < num_splits_; ++s) {
splits_[s].Print();
if (s + 1 < num_splits_) {
tprintf(", ");
}
}
tprintf("\n");
}
// Prints a collection of SEAMs.
/* static */
void SEAM::PrintSeams(const char *label, const std::vector<SEAM *> &seams) {
if (!seams.empty()) {
tprintf("%s\n", label);
for (unsigned x = 0; x < seams.size(); ++x) {
tprintf("%2u: ", x);
seams[x]->Print("");
}
tprintf("\n");
}
}
#ifndef GRAPHICS_DISABLED
// Draws the seam in the given window.
void SEAM::Mark(ScrollView *window) const {
for (int s = 0; s < num_splits_; ++s) {
splits_[s].Mark(window);
}
}
#endif
// Break up the blobs in this chain so that they are all independent.
// This operation should undo the affect of join_pieces.
/* static */
void SEAM::BreakPieces(const std::vector<SEAM *> &seams, const std::vector<TBLOB *> &blobs,
int first, int last) {
for (int x = first; x < last; ++x) {
seams[x]->Reveal();
}
TESSLINE *outline = blobs[first]->outlines;
int next_blob = first + 1;
while (outline != nullptr && next_blob <= last) {
if (outline->next == blobs[next_blob]->outlines) {
outline->next = nullptr;
outline = blobs[next_blob]->outlines;
++next_blob;
} else {
outline = outline->next;
}
}
}
// Join a group of base level pieces into a single blob that can then
// be classified.
/* static */
void SEAM::JoinPieces(const std::vector<SEAM *> &seams, const std::vector<TBLOB *> &blobs,
int first, int last) {
TESSLINE *outline = blobs[first]->outlines;
if (!outline) {
return;
}
for (int x = first; x < last; ++x) {
SEAM *seam = seams[x];
if (x - seam->widthn_ >= first && x + seam->widthp_ < last) {
seam->Hide();
}
while (outline->next) {
outline = outline->next;
}
outline->next = blobs[x + 1]->outlines;
}
}
// Hides the seam so the outlines appear not to be cut by it.
void SEAM::Hide() const {
for (int s = 0; s < num_splits_; ++s) {
splits_[s].Hide();
}
}
// Undoes hide, so the outlines are cut by the seam.
void SEAM::Reveal() const {
for (int s = 0; s < num_splits_; ++s) {
splits_[s].Reveal();
}
}
// Computes and returns, but does not set, the full priority of *this SEAM.
float SEAM::FullPriority(int xmin, int xmax, double overlap_knob, int centered_maxwidth,
double center_knob, double width_change_knob) const {
if (num_splits_ == 0) {
return 0.0f;
}
for (int s = 1; s < num_splits_; ++s) {
splits_[s].SplitOutline();
}
float full_priority =
priority_ + splits_[0].FullPriority(xmin, xmax, overlap_knob, centered_maxwidth, center_knob,
width_change_knob);
for (int s = num_splits_ - 1; s >= 1; --s) {
splits_[s].UnsplitOutlines();
}
return full_priority;
}
/**
* @name start_seam_list
*
* Initialize a list of seams that match the original number of blobs
* present in the starting segmentation. Each of the seams created
* by this routine have location information only.
*/
void start_seam_list(TWERD *word, std::vector<SEAM *> *seam_array) {
seam_array->clear();
TPOINT location;
for (unsigned b = 1; b < word->NumBlobs(); ++b) {
TBOX bbox = word->blobs[b - 1]->bounding_box();
TBOX nbox = word->blobs[b]->bounding_box();
location.x = (bbox.right() + nbox.left()) / 2;
location.y = (bbox.bottom() + bbox.top() + nbox.bottom() + nbox.top()) / 4;
seam_array->push_back(new SEAM(0.0f, location));
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/seam.cpp
|
C++
|
apache-2.0
| 8,758
|
/******************************************************************************
*
* File: seam.h
* Author: Mark Seaman, SW Productivity
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef SEAM_H
#define SEAM_H
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "blobs.h"
#include "split.h"
namespace tesseract {
using PRIORITY = float; /* PRIORITY */
class SEAM {
public:
// A seam with no splits
SEAM(float priority, const TPOINT &location)
: priority_(priority), location_(location), num_splits_(0) {}
// A seam with a single split point.
SEAM(float priority, const TPOINT &location, const SPLIT &split)
: priority_(priority), location_(location), num_splits_(1) {
splits_[0] = split;
}
// Default copy constructor, operator= and destructor are OK!
// Accessors.
float priority() const {
return priority_;
}
void set_priority(float priority) {
priority_ = priority;
}
bool HasAnySplits() const {
return num_splits_ > 0;
}
// Returns the bounding box of all the points in the seam.
TBOX bounding_box() const;
// Returns true if other can be combined into *this.
bool CombineableWith(const SEAM &other, int max_x_dist, float max_total_priority) const {
int dist = location_.x - other.location_.x;
return -max_x_dist < dist && dist < max_x_dist &&
num_splits_ + other.num_splits_ <= kMaxNumSplits &&
priority_ + other.priority_ < max_total_priority && !OverlappingSplits(other) &&
!SharesPosition(other);
}
// Combines other into *this. Only works if CombinableWith returned true.
void CombineWith(const SEAM &other) {
priority_ += other.priority_;
location_ += other.location_;
location_ /= 2;
for (uint8_t s = 0; s < other.num_splits_ && num_splits_ < kMaxNumSplits; ++s) {
splits_[num_splits_++] = other.splits_[s];
}
}
// Returns true if the given blob contains all splits of *this SEAM.
bool ContainedByBlob(const TBLOB &blob) const {
for (int s = 0; s < num_splits_; ++s) {
if (!splits_[s].ContainedByBlob(blob)) {
return false;
}
}
return true;
}
// Returns true if the given EDGEPT is used by this SEAM, checking only
// the EDGEPT pointer, not the coordinates.
bool UsesPoint(const EDGEPT *point) const {
for (int s = 0; s < num_splits_; ++s) {
if (splits_[s].UsesPoint(point)) {
return true;
}
}
return false;
}
// Returns true if *this and other share any common point, by coordinates.
bool SharesPosition(const SEAM &other) const {
for (int s = 0; s < num_splits_; ++s) {
for (int t = 0; t < other.num_splits_; ++t) {
if (splits_[s].SharesPosition(other.splits_[t])) {
return true;
}
}
}
return false;
}
// Returns true if *this and other have any vertically overlapping splits.
bool OverlappingSplits(const SEAM &other) const {
for (int s = 0; s < num_splits_; ++s) {
TBOX split1_box = splits_[s].bounding_box();
for (int t = 0; t < other.num_splits_; ++t) {
TBOX split2_box = other.splits_[t].bounding_box();
if (split1_box.y_overlap(split2_box)) {
return true;
}
}
}
return false;
}
// Marks the edgepts used by the seam so the segments made by the cut
// never get split further by another seam in the future.
void Finalize() {
for (int s = 0; s < num_splits_; ++s) {
splits_[s].point1->MarkChop();
splits_[s].point2->MarkChop();
}
}
// Returns true if the splits in *this SEAM appear OK in the sense that they
// do not cross any outlines and do not chop off any ridiculously small
// pieces.
bool IsHealthy(const TBLOB &blob, int min_points, int min_area) const;
// Computes the widthp_/widthn_ range for all existing SEAMs and for *this
// seam, which is about to be inserted at insert_index. Returns false if
// any of the computations fails, as this indicates an invalid chop.
// widthn_/widthp_ are only changed if modify is true.
bool PrepareToInsertSeam(const std::vector<SEAM *> &seams, const std::vector<TBLOB *> &blobs,
int insert_index, bool modify);
// Computes the widthp_/widthn_ range. Returns false if not all the splits
// are accounted for. widthn_/widthp_ are only changed if modify is true.
bool FindBlobWidth(const std::vector<TBLOB *> &blobs, int index, bool modify);
// Splits this blob into two blobs by applying the splits included in
// *this SEAM
void ApplySeam(bool italic_blob, TBLOB *blob, TBLOB *other_blob) const;
// Undoes ApplySeam by removing the seam between these two blobs.
// Produces one blob as a result, and deletes other_blob.
void UndoSeam(TBLOB *blob, TBLOB *other_blob) const;
// Prints everything in *this SEAM.
void Print(const char *label) const;
// Prints a collection of SEAMs.
static void PrintSeams(const char *label, const std::vector<SEAM *> &seams);
#ifndef GRAPHICS_DISABLED
// Draws the seam in the given window.
void Mark(ScrollView *window) const;
#endif
// Break up the blobs in this chain so that they are all independent.
// This operation should undo the affect of join_pieces.
static void BreakPieces(const std::vector<SEAM *> &seams, const std::vector<TBLOB *> &blobs,
int first, int last);
// Join a group of base level pieces into a single blob that can then
// be classified.
static void JoinPieces(const std::vector<SEAM *> &seams, const std::vector<TBLOB *> &blobs,
int first, int last);
// Hides the seam so the outlines appear not to be cut by it.
void Hide() const;
// Undoes hide, so the outlines are cut by the seam.
void Reveal() const;
// Computes and returns, but does not set, the full priority of *this SEAM.
// The arguments here are config parameters defined in Wordrec. Add chop_
// to the beginning of the name.
float FullPriority(int xmin, int xmax, double overlap_knob, int centered_maxwidth,
double center_knob, double width_change_knob) const;
private:
// Maximum number of splits that a SEAM can hold.
static const uint8_t kMaxNumSplits = 3;
// Priority of this split. Lower is better.
float priority_;
// Position of the middle of the seam.
TPOINT location_;
// A range such that all splits in *this SEAM are contained within blobs in
// the range [index - widthn_,index + widthp_] where index is the index of
// this SEAM in the seams vector.
uint8_t widthp_ = 0;
uint8_t widthn_ = 0;
// Number of splits_ that are used.
uint8_t num_splits_;
// Set of pairs of points that are the ends of each split in the SEAM.
SPLIT splits_[kMaxNumSplits];
};
void start_seam_list(TWERD *word, std::vector<SEAM *> *seam_array);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/seam.h
|
C++
|
apache-2.0
| 7,601
|
/******************************************************************************
*
* File: split.cpp (Formerly split.c)
* Author: Mark Seaman, OCR Technology
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*************************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "split.h"
#include "coutln.h"
#include "tprintf.h"
#include <algorithm>
namespace tesseract {
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
// Limit on the amount of penalty for the chop being off-center.
const int kCenterGradeCap = 25;
// Ridiculously large priority for splits that are no use.
const double kBadPriority = 999.0;
BOOL_VAR(wordrec_display_splits, 0, "Display splits");
// Hides the SPLIT so the outlines appear not to be cut by it.
void SPLIT::Hide() const {
EDGEPT *edgept = point1;
do {
edgept->Hide();
edgept = edgept->next;
} while (!edgept->EqualPos(*point2) && edgept != point1);
edgept = point2;
do {
edgept->Hide();
edgept = edgept->next;
} while (!edgept->EqualPos(*point1) && edgept != point2);
}
// Undoes hide, so the outlines are cut by the SPLIT.
void SPLIT::Reveal() const {
EDGEPT *edgept = point1;
do {
edgept->Reveal();
edgept = edgept->next;
} while (!edgept->EqualPos(*point2) && edgept != point1);
edgept = point2;
do {
edgept->Reveal();
edgept = edgept->next;
} while (!edgept->EqualPos(*point1) && edgept != point2);
}
// Compute a split priority based on the bounding boxes of the parts.
// The arguments here are config parameters defined in Wordrec. Add chop_
// to the beginning of the name.
float SPLIT::FullPriority(int xmin, int xmax, double overlap_knob, int centered_maxwidth,
double center_knob, double width_change_knob) const {
TBOX box1 = Box12();
TBOX box2 = Box21();
int min_left = std::min(box1.left(), box2.left());
int max_right = std::max(box1.right(), box2.right());
if (xmin < min_left && xmax > max_right) {
return kBadPriority;
}
float grade = 0.0f;
// grade_overlap.
int width1 = box1.width();
int width2 = box2.width();
int min_width = std::min(width1, width2);
int overlap = -box1.x_gap(box2);
if (overlap == min_width) {
grade += 100.0f; // Total overlap.
} else {
if (2 * overlap > min_width) {
overlap += 2 * overlap - min_width;
}
if (overlap > 0) {
grade += overlap_knob * overlap;
}
}
// grade_center_of_blob.
if (width1 <= centered_maxwidth || width2 <= centered_maxwidth) {
grade += std::min(static_cast<double>(kCenterGradeCap), center_knob * abs(width1 - width2));
}
// grade_width_change.
float width_change_grade = 20 - (max_right - min_left - std::max(width1, width2));
if (width_change_grade > 0.0f) {
grade += width_change_grade * width_change_knob;
}
return grade;
}
// Returns true if *this SPLIT appears OK in the sense that it does not cross
// any outlines and does not chop off any ridiculously small pieces.
bool SPLIT::IsHealthy(const TBLOB &blob, int min_points, int min_area) const {
return !IsLittleChunk(min_points, min_area) &&
!blob.SegmentCrossesOutline(point1->pos, point2->pos);
}
// Returns true if the split generates a small chunk in terms of either area
// or number of points.
bool SPLIT::IsLittleChunk(int min_points, int min_area) const {
if (point1->ShortNonCircularSegment(min_points, point2) &&
point1->SegmentArea(point2) < min_area) {
return true;
}
if (point2->ShortNonCircularSegment(min_points, point1) &&
point2->SegmentArea(point1) < min_area) {
return true;
}
return false;
}
/**********************************************************************
* make_edgept
*
* Create an EDGEPT and hook it into an existing list of edge points.
**********************************************************************/
EDGEPT *make_edgept(TDimension x, TDimension y, EDGEPT *next, EDGEPT *prev) {
EDGEPT *this_edgept;
/* Create point */
this_edgept = new EDGEPT;
this_edgept->pos.x = x;
this_edgept->pos.y = y;
// Now deal with the src_outline steps.
C_OUTLINE *prev_ol = prev->src_outline;
if (prev_ol != nullptr && prev->next == next) {
// Compute the fraction of the segment that is being cut.
FCOORD segment_vec(next->pos.x - prev->pos.x, next->pos.y - prev->pos.y);
FCOORD target_vec(x - prev->pos.x, y - prev->pos.y);
double cut_fraction = target_vec.length() / segment_vec.length();
// Get the start and end at the step level.
ICOORD step_start = prev_ol->position_at_index(prev->start_step);
int end_step = prev->start_step + prev->step_count;
int step_length = prev_ol->pathlength();
ICOORD step_end = prev_ol->position_at_index(end_step % step_length);
ICOORD step_vec = step_end - step_start;
double target_length = step_vec.length() * cut_fraction;
// Find the point on the segment that gives the length nearest to target.
int best_step = prev->start_step;
ICOORD total_step(0, 0);
double best_dist = target_length;
for (int s = prev->start_step; s < end_step; ++s) {
total_step += prev_ol->step(s % step_length);
double dist = fabs(target_length - total_step.length());
if (dist < best_dist) {
best_dist = dist;
best_step = s + 1;
}
}
// The new point is an intermediate point.
this_edgept->src_outline = prev_ol;
this_edgept->step_count = end_step - best_step;
this_edgept->start_step = best_step % step_length;
prev->step_count = best_step - prev->start_step;
} else {
// The new point is poly only.
this_edgept->src_outline = nullptr;
this_edgept->step_count = 0;
this_edgept->start_step = 0;
}
/* Hook it up */
this_edgept->next = next;
this_edgept->prev = prev;
prev->next = this_edgept;
next->prev = this_edgept;
/* Set up vec entries */
this_edgept->vec.x = this_edgept->next->pos.x - x;
this_edgept->vec.y = this_edgept->next->pos.y - y;
this_edgept->prev->vec.x = x - this_edgept->prev->pos.x;
this_edgept->prev->vec.y = y - this_edgept->prev->pos.y;
return this_edgept;
}
/**********************************************************************
* remove_edgept
*
* Remove a given EDGEPT from its list and delete it.
**********************************************************************/
void remove_edgept(EDGEPT *point) {
EDGEPT *prev = point->prev;
EDGEPT *next = point->next;
// Add point's steps onto prev's steps if they are from the same outline.
if (prev->src_outline == point->src_outline && prev->src_outline != nullptr) {
prev->step_count += point->step_count;
}
prev->next = next;
next->prev = prev;
prev->vec.x = next->pos.x - prev->pos.x;
prev->vec.y = next->pos.y - prev->pos.y;
delete point;
}
/**********************************************************************
* Print
*
* Shows the coordinates of both points in a split.
**********************************************************************/
void SPLIT::Print() const {
tprintf("(%d,%d)--(%d,%d)", point1->pos.x, point1->pos.y, point2->pos.x, point2->pos.y);
}
#ifndef GRAPHICS_DISABLED
// Draws the split in the given window.
void SPLIT::Mark(ScrollView *window) const {
window->Pen(ScrollView::GREEN);
window->Line(point1->pos.x, point1->pos.y, point2->pos.x, point2->pos.y);
window->UpdateWindow();
}
#endif
// Creates two outlines out of one by splitting the original one in half.
// Inserts the resulting outlines into the given list.
void SPLIT::SplitOutlineList(TESSLINE *outlines) const {
SplitOutline();
while (outlines->next != nullptr) {
outlines = outlines->next;
}
outlines->next = new TESSLINE;
outlines->next->loop = point1;
outlines->next->ComputeBoundingBox();
outlines = outlines->next;
outlines->next = new TESSLINE;
outlines->next->loop = point2;
outlines->next->ComputeBoundingBox();
outlines->next->next = nullptr;
}
// Makes a split between these two edge points, but does not affect the
// outlines to which they belong.
void SPLIT::SplitOutline() const {
EDGEPT *temp2 = point2->next;
EDGEPT *temp1 = point1->next;
/* Create two new points */
EDGEPT *new_point1 = make_edgept(point1->pos.x, point1->pos.y, temp1, point2);
EDGEPT *new_point2 = make_edgept(point2->pos.x, point2->pos.y, temp2, point1);
// point1 and 2 are now cross-over points, so they must have nullptr
// src_outlines and give their src_outline information their new
// replacements.
new_point1->src_outline = point1->src_outline;
new_point1->start_step = point1->start_step;
new_point1->step_count = point1->step_count;
new_point2->src_outline = point2->src_outline;
new_point2->start_step = point2->start_step;
new_point2->step_count = point2->step_count;
point1->src_outline = nullptr;
point1->start_step = 0;
point1->step_count = 0;
point2->src_outline = nullptr;
point2->start_step = 0;
point2->step_count = 0;
}
// Undoes the effect of SplitOutlineList, correcting the outlines for undoing
// the split, but possibly leaving some duplicate outlines.
void SPLIT::UnsplitOutlineList(TBLOB *blob) const {
/* Modify edge points */
UnsplitOutlines();
auto *outline1 = new TESSLINE;
outline1->next = blob->outlines;
blob->outlines = outline1;
outline1->loop = point1;
auto *outline2 = new TESSLINE;
outline2->next = blob->outlines;
blob->outlines = outline2;
outline2->loop = point2;
}
// Removes the split that was put between these two points.
void SPLIT::UnsplitOutlines() const {
EDGEPT *tmp1 = point1->next;
EDGEPT *tmp2 = point2->next;
tmp1->next->prev = point2;
tmp2->next->prev = point1;
// tmp2 is coincident with point1. point1 takes tmp2's place as tmp2 is
// deleted.
point1->next = tmp2->next;
point1->src_outline = tmp2->src_outline;
point1->start_step = tmp2->start_step;
point1->step_count = tmp2->step_count;
// Likewise point2 takes tmp1's place.
point2->next = tmp1->next;
point2->src_outline = tmp1->src_outline;
point2->start_step = tmp1->start_step;
point2->step_count = tmp1->step_count;
delete tmp1;
delete tmp2;
point1->vec.x = point1->next->pos.x - point1->pos.x;
point1->vec.y = point1->next->pos.y - point1->pos.y;
point2->vec.x = point2->next->pos.x - point2->pos.x;
point2->vec.y = point2->next->pos.y - point2->pos.y;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/split.cpp
|
C++
|
apache-2.0
| 11,200
|
/******************************************************************************
*
* File: split.h
* Author: Mark Seaman, SW Productivity
* Status: Reusable Software Component
*
* (c) Copyright 1987, Hewlett-Packard Company.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef SPLIT_H
#define SPLIT_H
#include "blobs.h" // for EDGEPT, TBLOB, TESSLINE
#include "params.h" // for BOOL_VAR_H, BoolParam
#include "rect.h" // for TBOX
namespace tesseract {
class ScrollView;
/*----------------------------------------------------------------------
T y p e s
----------------------------------------------------------------------*/
struct SPLIT {
SPLIT() : point1(nullptr), point2(nullptr) {}
SPLIT(EDGEPT *pt1, EDGEPT *pt2) : point1(pt1), point2(pt2) {}
// Returns the bounding box of all the points in the split.
TBOX bounding_box() const {
return TBOX(std::min(point1->pos.x, point2->pos.x), std::min(point1->pos.y, point2->pos.y),
std::max(point1->pos.x, point2->pos.x), std::max(point1->pos.y, point2->pos.y));
}
// Returns the bounding box of the outline from point1 to point2.
TBOX Box12() const {
return point1->SegmentBox(point2);
}
// Returns the bounding box of the outline from point1 to point1.
TBOX Box21() const {
return point2->SegmentBox(point1);
}
// Returns the bounding box of the out
// Hides the SPLIT so the outlines appear not to be cut by it.
void Hide() const;
// Undoes hide, so the outlines are cut by the SPLIT.
void Reveal() const;
// Returns true if the given EDGEPT is used by this SPLIT, checking only
// the EDGEPT pointer, not the coordinates.
bool UsesPoint(const EDGEPT *point) const {
return point1 == point || point2 == point;
}
// Returns true if the other SPLIT has any position shared with *this.
bool SharesPosition(const SPLIT &other) const {
return point1->EqualPos(*other.point1) || point1->EqualPos(*other.point2) ||
point2->EqualPos(*other.point1) || point2->EqualPos(*other.point2);
}
// Returns true if both points are contained within the blob.
bool ContainedByBlob(const TBLOB &blob) const {
return blob.Contains(point1->pos) && blob.Contains(point2->pos);
}
// Returns true if both points are contained within the outline.
bool ContainedByOutline(const TESSLINE &outline) const {
return outline.Contains(point1->pos) && outline.Contains(point2->pos);
}
// Compute a split priority based on the bounding boxes of the parts.
// The arguments here are config parameters defined in Wordrec. Add chop_
// to the beginning of the name.
float FullPriority(int xmin, int xmax, double overlap_knob, int centered_maxwidth,
double center_knob, double width_change_knob) const;
// Returns true if *this SPLIT appears OK in the sense that it does not cross
// any outlines and does not chop off any ridiculously small pieces.
bool IsHealthy(const TBLOB &blob, int min_points, int min_area) const;
// Returns true if the split generates a small chunk in terms of either area
// or number of points.
bool IsLittleChunk(int min_points, int min_area) const;
void Print() const;
#ifndef GRAPHICS_DISABLED
// Draws the split in the given window.
void Mark(ScrollView *window) const;
#endif
// Creates two outlines out of one by splitting the original one in half.
// Inserts the resulting outlines into the given list.
void SplitOutlineList(TESSLINE *outlines) const;
// Makes a split between these two edge points, but does not affect the
// outlines to which they belong.
void SplitOutline() const;
// Undoes the effect of SplitOutlineList, correcting the outlines for undoing
// the split, but possibly leaving some duplicate outlines.
void UnsplitOutlineList(TBLOB *blob) const;
// Removes the split that was put between these two points.
void UnsplitOutlines() const;
EDGEPT *point1;
EDGEPT *point2;
};
/*----------------------------------------------------------------------
V a r i a b l e s
----------------------------------------------------------------------*/
extern BOOL_VAR_H(wordrec_display_splits);
/*----------------------------------------------------------------------
F u n c t i o n s
----------------------------------------------------------------------*/
EDGEPT *make_edgept(TDimension x, TDimension y, EDGEPT *next, EDGEPT *prev);
void remove_edgept(EDGEPT *point);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/split.h
|
C++
|
apache-2.0
| 5,114
|
/**********************************************************************
* File: statistc.cpp (Formerly stats.c)
* Description: Simple statistical package for integer values.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "statistc.h"
#include "errcode.h"
#include "scrollview.h"
#include "tprintf.h"
#include "helpers.h"
#include <cmath>
#include <cstdlib>
#include <cstring>
namespace tesseract {
/**********************************************************************
* STATS::STATS
*
* Construct a new stats element by allocating and zeroing the memory.
**********************************************************************/
STATS::STATS(int32_t min_bucket_value, int32_t max_bucket_value) {
if (max_bucket_value < min_bucket_value) {
min_bucket_value = 0;
max_bucket_value = 1;
}
rangemin_ = min_bucket_value; // setup
rangemax_ = max_bucket_value;
buckets_ = new int32_t[1 + rangemax_ - rangemin_];
clear();
}
/**********************************************************************
* STATS::set_range
*
* Alter the range on an existing stats element.
**********************************************************************/
bool STATS::set_range(int32_t min_bucket_value, int32_t max_bucket_value) {
if (max_bucket_value < min_bucket_value) {
return false;
}
if (rangemax_ - rangemin_ != max_bucket_value - min_bucket_value) {
delete[] buckets_;
buckets_ = new int32_t[1 + max_bucket_value - min_bucket_value];
}
rangemin_ = min_bucket_value; // setup
rangemax_ = max_bucket_value;
clear(); // zero it
return true;
}
/**********************************************************************
* STATS::clear
*
* Clear out the STATS class by zeroing all the buckets.
**********************************************************************/
void STATS::clear() { // clear out buckets
total_count_ = 0;
if (buckets_ != nullptr) {
memset(buckets_, 0, (1 + rangemax_ - rangemin_) * sizeof(buckets_[0]));
}
}
/**********************************************************************
* STATS::~STATS
*
* Destructor for a stats class.
**********************************************************************/
STATS::~STATS() {
delete[] buckets_;
}
/**********************************************************************
* STATS::add
*
* Add a set of samples to (or delete from) a pile.
**********************************************************************/
void STATS::add(int32_t value, int32_t count) {
if (buckets_ != nullptr) {
value = ClipToRange(value, rangemin_, rangemax_);
buckets_[value - rangemin_] += count;
total_count_ += count; // keep count of total
}
}
/**********************************************************************
* STATS::mode
*
* Find the mode of a stats class.
**********************************************************************/
int32_t STATS::mode() const { // get mode of samples
if (buckets_ == nullptr) {
return rangemin_;
}
int32_t max = buckets_[0]; // max cell count
int32_t maxindex = 0; // index of max
for (int index = rangemax_ - rangemin_; index > 0; --index) {
if (buckets_[index] > max) {
max = buckets_[index]; // find biggest
maxindex = index;
}
}
return maxindex + rangemin_; // index of biggest
}
/**********************************************************************
* STATS::mean
*
* Find the mean of a stats class.
**********************************************************************/
double STATS::mean() const { // get mean of samples
if (buckets_ == nullptr || total_count_ <= 0) {
return static_cast<double>(rangemin_);
}
int64_t sum = 0;
for (int index = rangemax_ - rangemin_; index >= 0; --index) {
sum += static_cast<int64_t>(index) * buckets_[index];
}
return static_cast<double>(sum) / total_count_ + rangemin_;
}
/**********************************************************************
* STATS::sd
*
* Find the standard deviation of a stats class.
**********************************************************************/
double STATS::sd() const { // standard deviation
if (buckets_ == nullptr || total_count_ <= 0) {
return 0.0;
}
int64_t sum = 0;
double sqsum = 0.0;
for (int index = rangemax_ - rangemin_; index >= 0; --index) {
sum += static_cast<int64_t>(index) * buckets_[index];
sqsum += static_cast<double>(index) * index * buckets_[index];
}
double variance = static_cast<double>(sum) / total_count_;
variance = sqsum / total_count_ - variance * variance;
if (variance > 0.0) {
return sqrt(variance);
}
return 0.0;
}
/**********************************************************************
* STATS::ile
*
* Returns the fractile value such that frac fraction (in [0,1]) of samples
* has a value less than the return value.
**********************************************************************/
double STATS::ile(double frac) const {
if (buckets_ == nullptr || total_count_ == 0) {
return static_cast<double>(rangemin_);
}
#if 0
// TODO(rays) The existing code doesn't seem to be doing the right thing
// with target a double but this substitute crashes the code that uses it.
// Investigate and fix properly.
int target = IntCastRounded(frac * total_count_);
target = ClipToRange(target, 1, total_count_);
#else
double target = frac * total_count_;
target = ClipToRange(target, 1.0, static_cast<double>(total_count_));
#endif
int sum = 0;
int index = 0;
for (index = 0; index <= rangemax_ - rangemin_ && sum < target; sum += buckets_[index++]) {
;
}
if (index > 0) {
ASSERT_HOST(buckets_[index - 1] > 0);
return rangemin_ + index - static_cast<double>(sum - target) / buckets_[index - 1];
} else {
return static_cast<double>(rangemin_);
}
}
/**********************************************************************
* STATS::min_bucket
*
* Find REAL minimum bucket - ile(0.0) isn't necessarily correct
**********************************************************************/
int32_t STATS::min_bucket() const { // Find min
if (buckets_ == nullptr || total_count_ == 0) {
return rangemin_;
}
int32_t min = 0;
for (min = 0; (min <= rangemax_ - rangemin_) && (buckets_[min] == 0); min++) {
;
}
return rangemin_ + min;
}
/**********************************************************************
* STATS::max_bucket
*
* Find REAL maximum bucket - ile(1.0) isn't necessarily correct
**********************************************************************/
int32_t STATS::max_bucket() const { // Find max
if (buckets_ == nullptr || total_count_ == 0) {
return rangemin_;
}
int32_t max;
for (max = rangemax_ - rangemin_; max > 0 && buckets_[max] == 0; max--) {
;
}
return rangemin_ + max;
}
/**********************************************************************
* STATS::median
*
* Finds a more useful estimate of median than ile(0.5).
*
* Overcomes a problem with ile() - if the samples are, for example,
* 6,6,13,14 ile(0.5) return 7.0 - when a more useful value would be midway
* between 6 and 13 = 9.5
**********************************************************************/
double STATS::median() const { // get median
if (buckets_ == nullptr) {
return static_cast<double>(rangemin_);
}
double median = ile(0.5);
int median_pile = static_cast<int>(floor(median));
if ((total_count_ > 1) && (pile_count(median_pile) == 0)) {
int32_t min_pile;
int32_t max_pile;
/* Find preceding non zero pile */
for (min_pile = median_pile; pile_count(min_pile) == 0; min_pile--) {
;
}
/* Find following non zero pile */
for (max_pile = median_pile; pile_count(max_pile) == 0; max_pile++) {
;
}
median = (min_pile + max_pile) / 2.0;
}
return median;
}
/**********************************************************************
* STATS::local_min
*
* Return true if this point is a local min.
**********************************************************************/
bool STATS::local_min(int32_t x) const {
if (buckets_ == nullptr) {
return false;
}
x = ClipToRange(x, rangemin_, rangemax_) - rangemin_;
if (buckets_[x] == 0) {
return true;
}
int32_t index; // table index
for (index = x - 1; index >= 0 && buckets_[index] == buckets_[x]; --index) {
;
}
if (index >= 0 && buckets_[index] < buckets_[x]) {
return false;
}
for (index = x + 1; index <= rangemax_ - rangemin_ && buckets_[index] == buckets_[x]; ++index) {
;
}
if (index <= rangemax_ - rangemin_ && buckets_[index] < buckets_[x]) {
return false;
} else {
return true;
}
}
/**********************************************************************
* STATS::smooth
*
* Apply a triangular smoothing filter to the stats.
* This makes the modes a bit more useful.
* The factor gives the height of the triangle, i.e. the weight of the
* centre.
**********************************************************************/
void STATS::smooth(int32_t factor) {
if (buckets_ == nullptr || factor < 2) {
return;
}
STATS result(rangemin_, rangemax_);
int entrycount = 1 + rangemax_ - rangemin_;
for (int entry = 0; entry < entrycount; entry++) {
// centre weight
int count = buckets_[entry] * factor;
for (int offset = 1; offset < factor; offset++) {
if (entry - offset >= 0) {
count += buckets_[entry - offset] * (factor - offset);
}
if (entry + offset < entrycount) {
count += buckets_[entry + offset] * (factor - offset);
}
}
result.add(entry + rangemin_, count);
}
total_count_ = result.total_count_;
memcpy(buckets_, result.buckets_, entrycount * sizeof(buckets_[0]));
}
/**********************************************************************
* STATS::cluster
*
* Cluster the samples into max_cluster clusters.
* Each call runs one iteration. The array of clusters must be
* max_clusters+1 in size as cluster 0 is used to indicate which samples
* have been used.
* The return value is the current number of clusters.
**********************************************************************/
int32_t STATS::cluster(float lower, // thresholds
float upper,
float multiple, // distance threshold
int32_t max_clusters, // max no to make
STATS *clusters) { // array of clusters
bool new_cluster; // added one
float *centres; // cluster centres
int32_t entry; // bucket index
int32_t cluster; // cluster index
int32_t best_cluster; // one to assign to
int32_t new_centre = 0; // residual mode
int32_t new_mode; // pile count of new_centre
int32_t count; // pile to place
float dist; // from cluster
float min_dist; // from best_cluster
int32_t cluster_count; // no of clusters
if (buckets_ == nullptr || max_clusters < 1) {
return 0;
}
centres = new float[max_clusters + 1];
for (cluster_count = 1;
cluster_count <= max_clusters && clusters[cluster_count].buckets_ != nullptr &&
clusters[cluster_count].total_count_ > 0;
cluster_count++) {
centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5));
new_centre = clusters[cluster_count].mode();
for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ &&
pile_count(entry) <= pile_count(entry + 1);
entry--) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry <= rangemax_ &&
pile_count(entry) <= pile_count(entry - 1);
entry++) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
}
cluster_count--;
if (cluster_count == 0) {
clusters[0].set_range(rangemin_, rangemax_);
}
do {
new_cluster = false;
new_mode = 0;
for (entry = 0; entry <= rangemax_ - rangemin_; entry++) {
count = buckets_[entry] - clusters[0].buckets_[entry];
// remaining pile
if (count > 0) { // any to handle
min_dist = static_cast<float>(INT32_MAX);
best_cluster = 0;
for (cluster = 1; cluster <= cluster_count; cluster++) {
dist = entry + rangemin_ - centres[cluster];
// find distance
if (dist < 0) {
dist = -dist;
}
if (dist < min_dist) {
min_dist = dist; // find least
best_cluster = cluster;
}
}
if (min_dist > upper // far enough for new
&& (best_cluster == 0 || entry + rangemin_ > centres[best_cluster] * multiple ||
entry + rangemin_ < centres[best_cluster] / multiple)) {
if (count > new_mode) {
new_mode = count;
new_centre = entry + rangemin_;
}
}
}
}
// need new and room
if (new_mode > 0 && cluster_count < max_clusters) {
cluster_count++;
new_cluster = true;
if (!clusters[cluster_count].set_range(rangemin_, rangemax_)) {
delete[] centres;
return 0;
}
centres[cluster_count] = static_cast<float>(new_centre);
clusters[cluster_count].add(new_centre, new_mode);
clusters[0].add(new_centre, new_mode);
for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ &&
pile_count(entry) <= pile_count(entry + 1);
entry--) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry <= rangemax_ &&
pile_count(entry) <= pile_count(entry - 1);
entry++) {
count = pile_count(entry) - clusters[0].pile_count(entry);
if (count > 0) {
clusters[cluster_count].add(entry, count);
clusters[0].add(entry, count);
}
}
centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5));
}
} while (new_cluster && cluster_count < max_clusters);
delete[] centres;
return cluster_count;
}
// Helper tests that the current index is still part of the peak and gathers
// the data into the peak, returning false when the peak is ended.
// src_buckets[index] - used_buckets[index] is the unused part of the histogram.
// prev_count is the histogram count of the previous index on entry and is
// updated to the current index on return.
// total_count and total_value are accumulating the mean of the peak.
static bool GatherPeak(int index, const int *src_buckets, int *used_buckets, int *prev_count,
int *total_count, double *total_value) {
int pile_count = src_buckets[index] - used_buckets[index];
if (pile_count <= *prev_count && pile_count > 0) {
// Accumulate count and index.count product.
*total_count += pile_count;
*total_value += index * pile_count;
// Mark this index as used
used_buckets[index] = src_buckets[index];
*prev_count = pile_count;
return true;
} else {
return false;
}
}
// Finds (at most) the top max_modes modes, well actually the whole peak around
// each mode, returning them in the given modes vector as a <mean of peak,
// total count of peak> pair in order of decreasing total count.
// Since the mean is the key and the count the data in the pair, a single call
// to sort on the output will re-sort by increasing mean of peak if that is
// more useful than decreasing total count.
// Returns the actual number of modes found.
int STATS::top_n_modes(int max_modes, std::vector<KDPairInc<float, int>> &modes) const {
if (max_modes <= 0) {
return 0;
}
int src_count = 1 + rangemax_ - rangemin_;
// Used copies the counts in buckets_ as they get used.
STATS used(rangemin_, rangemax_);
modes.clear();
// Total count of the smallest peak found so far.
int least_count = 1;
// Mode that is used as a seed for each peak
int max_count = 0;
do {
// Find an unused mode.
max_count = 0;
int max_index = 0;
for (int src_index = 0; src_index < src_count; src_index++) {
int pile_count = buckets_[src_index] - used.buckets_[src_index];
if (pile_count > max_count) {
max_count = pile_count;
max_index = src_index;
}
}
if (max_count > 0) {
// Copy the bucket count to used so it doesn't get found again.
used.buckets_[max_index] = max_count;
// Get the entire peak.
double total_value = max_index * max_count;
int total_count = max_count;
int prev_pile = max_count;
for (int offset = 1; max_index + offset < src_count; ++offset) {
if (!GatherPeak(max_index + offset, buckets_, used.buckets_, &prev_pile, &total_count,
&total_value)) {
break;
}
}
prev_pile = buckets_[max_index];
for (int offset = 1; max_index - offset >= 0; ++offset) {
if (!GatherPeak(max_index - offset, buckets_, used.buckets_, &prev_pile, &total_count,
&total_value)) {
break;
}
}
if (total_count > least_count || modes.size() < static_cast<size_t>(max_modes)) {
// We definitely want this mode, so if we have enough discard the least.
if (modes.size() == static_cast<size_t>(max_modes)) {
modes.resize(max_modes - 1);
}
size_t target_index = 0;
// Linear search for the target insertion point.
while (target_index < modes.size() && modes[target_index].data() >= total_count) {
++target_index;
}
auto peak_mean = static_cast<float>(total_value / total_count + rangemin_);
modes.insert(modes.begin() + target_index, KDPairInc<float, int>(peak_mean, total_count));
least_count = modes.back().data();
}
}
} while (max_count > 0);
return modes.size();
}
/**********************************************************************
* STATS::print
*
* Prints a summary and table of the histogram.
**********************************************************************/
void STATS::print() const {
if (buckets_ == nullptr) {
return;
}
int32_t min = min_bucket() - rangemin_;
int32_t max = max_bucket() - rangemin_;
int num_printed = 0;
for (int index = min; index <= max; index++) {
if (buckets_[index] != 0) {
tprintf("%4d:%-3d ", rangemin_ + index, buckets_[index]);
if (++num_printed % 8 == 0) {
tprintf("\n");
}
}
}
tprintf("\n");
print_summary();
}
/**********************************************************************
* STATS::print_summary
*
* Print a summary of the stats.
**********************************************************************/
void STATS::print_summary() const {
if (buckets_ == nullptr) {
return;
}
int32_t min = min_bucket();
int32_t max = max_bucket();
tprintf("Total count=%d\n", total_count_);
tprintf("Min=%.2f Really=%d\n", ile(0.0), min);
tprintf("Lower quartile=%.2f\n", ile(0.25));
tprintf("Median=%.2f, ile(0.5)=%.2f\n", median(), ile(0.5));
tprintf("Upper quartile=%.2f\n", ile(0.75));
tprintf("Max=%.2f Really=%d\n", ile(1.0), max);
tprintf("Range=%d\n", max + 1 - min);
tprintf("Mean= %.2f\n", mean());
tprintf("SD= %.2f\n", sd());
}
/**********************************************************************
* STATS::plot
*
* Draw a histogram of the stats table.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void STATS::plot(ScrollView *window, // to draw in
float xorigin, // bottom left
float yorigin,
float xscale, // one x unit
float yscale, // one y unit
ScrollView::Color colour) const { // colour to draw in
if (buckets_ == nullptr) {
return;
}
window->Pen(colour);
for (int index = 0; index <= rangemax_ - rangemin_; index++) {
window->Rectangle(xorigin + xscale * index, yorigin, xorigin + xscale * (index + 1),
yorigin + yscale * buckets_[index]);
}
}
#endif
/**********************************************************************
* STATS::plotline
*
* Draw a histogram of the stats table. (Line only)
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void STATS::plotline(ScrollView *window, // to draw in
float xorigin, // bottom left
float yorigin,
float xscale, // one x unit
float yscale, // one y unit
ScrollView::Color colour) const { // colour to draw in
if (buckets_ == nullptr) {
return;
}
window->Pen(colour);
window->SetCursor(xorigin, yorigin + yscale * buckets_[0]);
for (int index = 0; index <= rangemax_ - rangemin_; index++) {
window->DrawTo(xorigin + xscale * index, yorigin + yscale * buckets_[index]);
}
}
#endif
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/statistc.cpp
|
C++
|
apache-2.0
| 22,644
|
/**********************************************************************
* File: statistc.h (Formerly stats.h)
* Description: Class description for STATS class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCSTRUCT_STATISTC_H_
#define TESSERACT_CCSTRUCT_STATISTC_H_
#include <cstdio>
#include "kdpair.h"
#include "scrollview.h"
namespace tesseract {
// Simple histogram-based statistics for integer values in a known
// range, such that the range is small compared to the number of samples.
class TESS_API STATS {
public:
// The histogram buckets are in the range
// [min_bucket_value, max_bucket_value].
// Any data under min_bucket value is silently mapped to min_bucket_value,
// and likewise, any data over max_bucket_value is silently mapped to
// max_bucket_value.
// In the internal array, min_bucket_value maps to 0 and
// 1 + max_bucket_value - min_bucket_value to the array size.
STATS(int32_t min_bucket_value, int32_t max_bucket_value);
STATS() = default; // empty for arrays
~STATS();
// (Re)Sets the range and clears the counts.
// See the constructor for info on max and min values.
bool set_range(int32_t min_bucket_value, int32_t max_bucket_value);
void clear(); // empty buckets
void add(int32_t value, int32_t count);
// "Accessors" return various statistics on the data.
int32_t mode() const; // get mode of samples
double mean() const; // get mean of samples
double sd() const; // standard deviation
// Returns the fractile value such that frac fraction (in [0,1]) of samples
// has a value less than the return value.
double ile(double frac) const;
// Returns the minimum used entry in the histogram (ie the minimum of the
// data, NOT the minimum of the supplied range, nor is it an index.)
// Would normally be called min(), but that is a reserved word in VC++.
int32_t min_bucket() const; // Find min
// Returns the maximum used entry in the histogram (ie the maximum of the
// data, NOT the maximum of the supplied range, nor is it an index.)
int32_t max_bucket() const; // Find max
// Finds a more useful estimate of median than ile(0.5).
// Overcomes a problem with ile() - if the samples are, for example,
// 6,6,13,14 ile(0.5) return 7.0 - when a more useful value would be midway
// between 6 and 13 = 9.5
double median() const; // get median of samples
// Returns the count of the given value.
int32_t pile_count(int32_t value) const {
if (buckets_ == nullptr) {
return 0;
}
if (value <= rangemin_) {
return buckets_[0];
}
if (value >= rangemax_) {
return buckets_[rangemax_ - rangemin_];
}
return buckets_[value - rangemin_];
}
// Returns the total count of all buckets.
int32_t get_total() const {
return total_count_; // total of all piles
}
// Returns true if x is a local min.
bool local_min(int32_t x) const;
// Apply a triangular smoothing filter to the stats.
// This makes the modes a bit more useful.
// The factor gives the height of the triangle, i.e. the weight of the
// centre.
void smooth(int32_t factor);
// Cluster the samples into max_cluster clusters.
// Each call runs one iteration. The array of clusters must be
// max_clusters+1 in size as cluster 0 is used to indicate which samples
// have been used.
// The return value is the current number of clusters.
int32_t cluster(float lower, // thresholds
float upper,
float multiple, // distance threshold
int32_t max_clusters, // max no to make
STATS *clusters); // array of clusters
// Finds (at most) the top max_modes modes, well actually the whole peak
// around each mode, returning them in the given modes vector as a <mean of
// peak, total count of peak> pair in order of decreasing total count. Since
// the mean is the key and the count the data in the pair, a single call to
// sort on the output will re-sort by increasing mean of peak if that is more
// useful than decreasing total count. Returns the actual number of modes
// found.
int top_n_modes(int max_modes, std::vector<KDPairInc<float, int>> &modes) const;
// Prints a summary and table of the histogram.
void print() const;
// Prints summary stats only of the histogram.
void print_summary() const;
#ifndef GRAPHICS_DISABLED
// Draws the histogram as a series of rectangles.
void plot(ScrollView *window, // window to draw in
float xorigin, // origin of histo
float yorigin, // gram
float xscale, // size of one unit
float yscale, // size of one uint
ScrollView::Color colour) const; // colour to draw in
// Draws a line graph of the histogram.
void plotline(ScrollView *window, // window to draw in
float xorigin, // origin of histo
float yorigin, // gram
float xscale, // size of one unit
float yscale, // size of one uint
ScrollView::Color colour) const; // colour to draw in
#endif // !GRAPHICS_DISABLED
private:
int32_t rangemin_ = 0; // min of range
int32_t rangemax_ = 0; // max of range
int32_t total_count_ = 0; // no of samples
int32_t *buckets_ = nullptr; // array of cells
};
} // namespace tesseract
#endif // TESSERACT_CCSTRUCT_STATISTC_H_
|
2301_81045437/tesseract
|
src/ccstruct/statistc.h
|
C++
|
apache-2.0
| 6,280
|
/**********************************************************************
* File: stepblob.cpp (Formerly cblob.c)
* Description: Code for C_BLOB class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "stepblob.h"
#include "points.h" // for operator+=, FCOORD, ICOORD
#include <allheaders.h> // for pixCreate, pixGetDepth
#include <vector> // for std::vector
namespace tesseract {
class DENORM;
// Max perimeter to width ratio for a baseline position above box bottom.
const double kMaxPerimeterWidthRatio = 8.0;
/**********************************************************************
* position_outline
*
* Position the outline in the given list at the relevant place
* according to its nesting.
**********************************************************************/
static void position_outline( // put in place
C_OUTLINE *outline, // thing to place
C_OUTLINE_LIST *destlist // destination list
) {
C_OUTLINE_IT it = destlist; // iterator
// iterator on children
C_OUTLINE_IT child_it = outline->child();
if (!it.empty()) {
do {
// outline from dest list
C_OUTLINE *dest_outline = it.data(); // get destination
// encloses dest
if (*dest_outline < *outline) {
// take off list
dest_outline = it.extract();
// put this in place
it.add_after_then_move(outline);
// make it a child
child_it.add_to_end(dest_outline);
while (!it.at_last()) {
it.forward(); // do rest of list
// check for other children
dest_outline = it.data();
if (*dest_outline < *outline) {
// take off list
dest_outline = it.extract();
child_it.add_to_end(dest_outline);
// make it a child
if (it.empty()) {
break;
}
}
}
return; // finished
}
// enclosed by dest
else if (*outline < *dest_outline) {
position_outline(outline, dest_outline->child());
// place in child list
return; // finished
}
it.forward();
} while (!it.at_first());
}
it.add_to_end(outline); // at outer level
}
/**********************************************************************
* plot_outline_list
*
* Draw a list of outlines in the given colour and their children
* in the child colour.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
static void plot_outline_list( // draw outlines
C_OUTLINE_LIST *list, // outline to draw
ScrollView *window, // window to draw in
ScrollView::Color colour, // colour to use
ScrollView::Color child_colour // colour of children
) {
C_OUTLINE *outline; // current outline
C_OUTLINE_IT it = list; // iterator
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
outline = it.data();
// draw it
outline->plot(window, colour);
if (!outline->child()->empty()) {
plot_outline_list(outline->child(), window, child_colour, child_colour);
}
}
}
// Draws the outlines in the given colour, and child_colour, normalized
// using the given denorm, making use of sub-pixel accurate information
// if available.
static void plot_normed_outline_list(const DENORM &denorm, C_OUTLINE_LIST *list,
ScrollView::Color colour, ScrollView::Color child_colour,
ScrollView *window) {
C_OUTLINE_IT it(list);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
outline->plot_normed(denorm, colour, window);
if (!outline->child()->empty()) {
plot_normed_outline_list(denorm, outline->child(), child_colour, child_colour, window);
}
}
}
#endif
/**********************************************************************
* reverse_outline_list
*
* Reverse a list of outlines and their children.
**********************************************************************/
static void reverse_outline_list(C_OUTLINE_LIST *list) {
C_OUTLINE_IT it = list; // iterator
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
outline->reverse(); // reverse it
outline->set_flag(COUT_INVERSE, true);
if (!outline->child()->empty()) {
reverse_outline_list(outline->child());
}
}
}
/**********************************************************************
* C_BLOB::C_BLOB
*
* Constructor to build a C_BLOB from a list of C_OUTLINEs.
* The C_OUTLINEs are not copied so the source list is emptied.
* The C_OUTLINEs are nested correctly in the blob.
**********************************************************************/
C_BLOB::C_BLOB(C_OUTLINE_LIST *outline_list) {
for (C_OUTLINE_IT ol_it(outline_list); !ol_it.empty(); ol_it.forward()) {
C_OUTLINE *outline = ol_it.extract();
// Position this outline in appropriate position in the hierarchy.
position_outline(outline, &outlines);
}
CheckInverseFlagAndDirection();
}
// Simpler constructor to build a blob from a single outline that has
// already been fully initialized.
C_BLOB::C_BLOB(C_OUTLINE *outline) {
C_OUTLINE_IT it(&outlines);
it.add_to_end(outline);
}
// Builds a set of one or more blobs from a list of outlines.
// Input: one outline on outline_list contains all the others, but the
// nesting and order are undefined.
// If good_blob is true, the blob is added to good_blobs_it, unless
// an illegal (generation-skipping) parent-child relationship is found.
// If so, the parent blob goes to bad_blobs_it, and the immediate children
// are promoted to the top level, recursively being sent to good_blobs_it.
// If good_blob is false, all created blobs will go to the bad_blobs_it.
// Output: outline_list is empty. One or more blobs are added to
// good_blobs_it and/or bad_blobs_it.
void C_BLOB::ConstructBlobsFromOutlines(bool good_blob, C_OUTLINE_LIST *outline_list,
C_BLOB_IT *good_blobs_it, C_BLOB_IT *bad_blobs_it) {
// List of top-level outlines with correctly nested children.
C_OUTLINE_LIST nested_outlines;
for (C_OUTLINE_IT ol_it(outline_list); !ol_it.empty(); ol_it.forward()) {
C_OUTLINE *outline = ol_it.extract();
// Position this outline in appropriate position in the hierarchy.
position_outline(outline, &nested_outlines);
}
// Check for legal nesting and reassign as required.
for (C_OUTLINE_IT ol_it(&nested_outlines); !ol_it.empty(); ol_it.forward()) {
C_OUTLINE *outline = ol_it.extract();
bool blob_is_good = good_blob;
if (!outline->IsLegallyNested()) {
// The blob is illegally nested.
// Mark it bad, and add all its children to the top-level list.
blob_is_good = false;
ol_it.add_list_after(outline->child());
}
auto *blob = new C_BLOB(outline);
// Set inverse flag and reverse if needed.
blob->CheckInverseFlagAndDirection();
// Put on appropriate list.
if (!blob_is_good && bad_blobs_it != nullptr) {
bad_blobs_it->add_after_then_move(blob);
} else {
good_blobs_it->add_after_then_move(blob);
}
}
}
// Sets the COUT_INVERSE flag appropriately on the outlines and their
// children recursively, reversing the outlines if needed so that
// everything has an anticlockwise top-level.
void C_BLOB::CheckInverseFlagAndDirection() {
C_OUTLINE_IT ol_it(&outlines);
for (ol_it.mark_cycle_pt(); !ol_it.cycled_list(); ol_it.forward()) {
C_OUTLINE *outline = ol_it.data();
if (outline->turn_direction() < 0) {
outline->reverse();
reverse_outline_list(outline->child());
outline->set_flag(COUT_INVERSE, true);
} else {
outline->set_flag(COUT_INVERSE, false);
}
}
}
// Build and return a fake blob containing a single fake outline with no
// steps.
C_BLOB *C_BLOB::FakeBlob(const TBOX &box) {
C_OUTLINE_LIST outlines;
C_OUTLINE::FakeOutline(box, &outlines);
return new C_BLOB(&outlines);
}
/**********************************************************************
* C_BLOB::bounding_box
*
* Return the bounding box of the blob.
**********************************************************************/
TBOX C_BLOB::bounding_box() const { // bounding box
// This is a read-only iteration of the outlines.
C_OUTLINE_IT it = const_cast<C_OUTLINE_LIST *>(&outlines);
TBOX box; // bounding box
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
box += outline->bounding_box();
}
return box;
}
/**********************************************************************
* C_BLOB::area
*
* Return the area of the blob.
**********************************************************************/
int32_t C_BLOB::area() { // area
C_OUTLINE_IT it = &outlines; // outlines of blob
int32_t total = 0; // total area
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
total += outline->area();
}
return total;
}
/**********************************************************************
* C_BLOB::perimeter
*
* Return the perimeter of the top and 2nd level outlines.
**********************************************************************/
int32_t C_BLOB::perimeter() {
C_OUTLINE_IT it = &outlines; // outlines of blob
int32_t total = 0; // total perimeter
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
total += outline->perimeter();
}
return total;
}
/**********************************************************************
* C_BLOB::outer_area
*
* Return the area of the blob.
**********************************************************************/
int32_t C_BLOB::outer_area() { // area
C_OUTLINE_IT it = &outlines; // outlines of blob
int32_t total = 0; // total area
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
total += outline->outer_area();
}
return total;
}
/**********************************************************************
* C_BLOB::count_transitions
*
* Return the total x and y maxes and mins in the blob.
* Child outlines are not counted.
**********************************************************************/
int32_t C_BLOB::count_transitions( // area
int32_t threshold // on size
) {
C_OUTLINE_IT it = &outlines; // outlines of blob
int32_t total = 0; // total area
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
total += outline->count_transitions(threshold);
}
return total;
}
/**********************************************************************
* C_BLOB::move
*
* Move C_BLOB by vector
**********************************************************************/
void C_BLOB::move( // reposition blob
const ICOORD vec // by vector
) {
C_OUTLINE_IT it(&outlines); // iterator
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->move(vec); // move each outline
}
}
// Static helper for C_BLOB::rotate to allow recursion of child outlines.
static void RotateOutlineList(const FCOORD &rotation, C_OUTLINE_LIST *outlines) {
C_OUTLINE_LIST new_outlines;
C_OUTLINE_IT src_it(outlines);
C_OUTLINE_IT dest_it(&new_outlines);
while (!src_it.empty()) {
C_OUTLINE *old_outline = src_it.extract();
src_it.forward();
auto *new_outline = new C_OUTLINE(old_outline, rotation);
if (!old_outline->child()->empty()) {
RotateOutlineList(rotation, old_outline->child());
C_OUTLINE_IT child_it(new_outline->child());
child_it.add_list_after(old_outline->child());
}
delete old_outline;
dest_it.add_to_end(new_outline);
}
src_it.add_list_after(&new_outlines);
}
/**********************************************************************
* C_BLOB::rotate
*
* Rotate C_BLOB by rotation.
* Warning! has to rebuild all the C_OUTLINEs.
**********************************************************************/
void C_BLOB::rotate(const FCOORD &rotation) {
RotateOutlineList(rotation, &outlines);
}
// Helper calls ComputeEdgeOffsets or ComputeBinaryOffsets recursively on the
// outline list and its children.
static void ComputeEdgeOffsetsOutlineList(int threshold, Image pix, C_OUTLINE_LIST *list) {
C_OUTLINE_IT it(list);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
if (pix != nullptr && pixGetDepth(pix) == 8) {
outline->ComputeEdgeOffsets(threshold, pix);
} else {
outline->ComputeBinaryOffsets();
}
if (!outline->child()->empty()) {
ComputeEdgeOffsetsOutlineList(threshold, pix, outline->child());
}
}
}
// Adds sub-pixel resolution EdgeOffsets for the outlines using greyscale
// if the supplied pix is 8-bit or the binary edges if nullptr.
void C_BLOB::ComputeEdgeOffsets(int threshold, Image pix) {
ComputeEdgeOffsetsOutlineList(threshold, pix, &outlines);
}
// Estimates and returns the baseline position based on the shape of the
// outlines.
// We first find the minimum y-coord (y_mins) at each x-coord within the blob.
// If there is a run of some y or y+1 in y_mins that is longer than the total
// number of positions at bottom or bottom+1, subject to the additional
// condition that at least one side of the y/y+1 run is higher than y+1, so it
// is not a local minimum, then y, not the bottom, makes a good candidate
// baseline position for this blob. Eg
// | ---|
// | |
// |- -----------| <= Good candidate baseline position.
// |- -|
// | -|
// |---| <= Bottom of blob
int16_t C_BLOB::EstimateBaselinePosition() {
TBOX box = bounding_box();
int left = box.left();
int width = box.width();
int bottom = box.bottom();
if (outlines.empty() || perimeter() > width * kMaxPerimeterWidthRatio) {
return bottom; // This is only for non-CJK blobs.
}
// Get the minimum y coordinate at each x-coordinate.
std::vector<int> y_mins(width + 1, box.top());
C_OUTLINE_IT it(&outlines);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
ICOORD pos = outline->start_pos();
for (int s = 0; s < outline->pathlength(); ++s) {
if (pos.y() < y_mins[pos.x() - left]) {
y_mins[pos.x() - left] = pos.y();
}
pos += outline->step(s);
}
}
// Find the total extent of the bottom or bottom + 1.
int bottom_extent = 0;
for (int x = 0; x <= width; ++x) {
if (y_mins[x] == bottom || y_mins[x] == bottom + 1) {
++bottom_extent;
}
}
// Find the lowest run longer than the bottom extent that is not the bottom.
int best_min = box.top();
int prev_run = 0;
int prev_y = box.top();
int prev_prev_y = box.top();
for (int x = 0; x < width; x += prev_run) {
// Find the length of the current run.
int y_at_x = y_mins[x];
int run = 1;
while (x + run <= width && y_mins[x + run] == y_at_x) {
++run;
}
if (y_at_x > bottom + 1) {
// Possible contender.
int total_run = run;
// Find extent of current value or +1 to the right of x.
while (x + total_run <= width &&
(y_mins[x + total_run] == y_at_x || y_mins[x + total_run] == y_at_x + 1)) {
++total_run;
}
// At least one end has to be higher so it is not a local max.
if (prev_prev_y > y_at_x + 1 || x + total_run > width || y_mins[x + total_run] > y_at_x + 1) {
// If the prev_run is at y + 1, then we can add that too. There cannot
// be a suitable run at y before that or we would have found it already.
if (prev_run > 0 && prev_y == y_at_x + 1) {
total_run += prev_run;
}
if (total_run > bottom_extent && y_at_x < best_min) {
best_min = y_at_x;
}
}
}
prev_run = run;
prev_prev_y = prev_y;
prev_y = y_at_x;
}
return best_min == box.top() ? bottom : best_min;
}
static void render_outline_list(C_OUTLINE_LIST *list, int left, int top, Image pix) {
C_OUTLINE_IT it(list);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
outline->render(left, top, pix);
if (!outline->child()->empty()) {
render_outline_list(outline->child(), left, top, pix);
}
}
}
static void render_outline_list_outline(C_OUTLINE_LIST *list, int left, int top, Image pix) {
C_OUTLINE_IT it(list);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
C_OUTLINE *outline = it.data();
outline->render_outline(left, top, pix);
}
}
// Returns a Pix rendering of the blob. pixDestroy after use.
Image C_BLOB::render() {
TBOX box = bounding_box();
Image pix = pixCreate(box.width(), box.height(), 1);
render_outline_list(&outlines, box.left(), box.top(), pix);
return pix;
}
// Returns a Pix rendering of the outline of the blob. (no fill).
// pixDestroy after use.
Image C_BLOB::render_outline() {
TBOX box = bounding_box();
Image pix = pixCreate(box.width(), box.height(), 1);
render_outline_list_outline(&outlines, box.left(), box.top(), pix);
return pix;
}
/**********************************************************************
* C_BLOB::plot
*
* Draw the C_BLOB in the given colour.
**********************************************************************/
#ifndef GRAPHICS_DISABLED
void C_BLOB::plot(ScrollView *window, // window to draw in
ScrollView::Color blob_colour, // main colour
ScrollView::Color child_colour) { // for holes
plot_outline_list(&outlines, window, blob_colour, child_colour);
}
// Draws the blob in the given colour, and child_colour, normalized
// using the given denorm, making use of sub-pixel accurate information
// if available.
void C_BLOB::plot_normed(const DENORM &denorm, ScrollView::Color blob_colour,
ScrollView::Color child_colour, ScrollView *window) {
plot_normed_outline_list(denorm, &outlines, blob_colour, child_colour, window);
}
#endif
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/stepblob.cpp
|
C++
|
apache-2.0
| 19,119
|
/**********************************************************************
* File: stepblob.h (Formerly cblob.h)
* Description: Code for C_BLOB class.
* Author: Ray Smith
* Created: Tue Oct 08 10:41:13 BST 1991
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef STEPBLOB_H
#define STEPBLOB_H
#include "coutln.h" // for C_OUTLINE_LIST, C_OUTLINE
#include "elst.h" // for ELIST_ITERATOR, ELISTIZEH, ELIST_LINK
#include "points.h" // for FCOORD, ICOORD (ptr only)
#include "rect.h" // for TBOX
#include "scrollview.h" // for ScrollView, ScrollView::Color
#include <cstdint> // for int32_t, int16_t
struct Pix;
namespace tesseract {
class C_BLOB;
class DENORM;
ELISTIZEH(C_BLOB)
class TESS_API C_BLOB : public ELIST_LINK {
public:
C_BLOB() = default;
explicit C_BLOB(C_OUTLINE_LIST *outline_list);
// Simpler constructor to build a blob from a single outline that has
// already been fully initialized.
explicit C_BLOB(C_OUTLINE *outline);
// Builds a set of one or more blobs from a list of outlines.
// Input: one outline on outline_list contains all the others, but the
// nesting and order are undefined.
// If good_blob is true, the blob is added to good_blobs_it, unless
// an illegal (generation-skipping) parent-child relationship is found.
// If so, the parent blob goes to bad_blobs_it, and the immediate children
// are promoted to the top level, recursively being sent to good_blobs_it.
// If good_blob is false, all created blobs will go to the bad_blobs_it.
// Output: outline_list is empty. One or more blobs are added to
// good_blobs_it and/or bad_blobs_it.
static void ConstructBlobsFromOutlines(bool good_blob, C_OUTLINE_LIST *outline_list,
C_BLOB_IT *good_blobs_it, C_BLOB_IT *bad_blobs_it);
// Sets the COUT_INVERSE flag appropriately on the outlines and their
// children recursively, reversing the outlines if needed so that
// everything has an anticlockwise top-level.
void CheckInverseFlagAndDirection();
// Build and return a fake blob containing a single fake outline with no
// steps.
static C_BLOB *FakeBlob(const TBOX &box);
C_OUTLINE_LIST *out_list() { // get outline list
return &outlines;
}
TBOX bounding_box() const; // compute bounding box
int32_t area(); // compute area
int32_t perimeter(); // Total perimeter of outlines and 1st level children.
int32_t outer_area(); // compute area
int32_t count_transitions( // count maxima
int32_t threshold); // size threshold
void move(const ICOORD vec); // reposition blob by vector
void rotate(const FCOORD &rotation); // Rotate by given vector.
// Adds sub-pixel resolution EdgeOffsets for the outlines using greyscale
// if the supplied pix is 8-bit or the binary edges if nullptr.
void ComputeEdgeOffsets(int threshold, Image pix);
// Estimates and returns the baseline position based on the shape of the
// outlines.
int16_t EstimateBaselinePosition();
// Returns a Pix rendering of the blob. pixDestroy after use.
Image render();
// Returns a Pix rendering of the outline of the blob. (no fill).
// pixDestroy after use.
Image render_outline();
#ifndef GRAPHICS_DISABLED
void plot( // draw one
ScrollView *window, // window to draw in
ScrollView::Color blob_colour, // for outer bits
ScrollView::Color child_colour); // for holes
// Draws the blob in the given colour, and child_colour, normalized
// using the given denorm, making use of sub-pixel accurate information
// if available.
void plot_normed(const DENORM &denorm, ScrollView::Color blob_colour,
ScrollView::Color child_colour, ScrollView *window);
#endif // !GRAPHICS_DISABLED
C_BLOB &operator=(const C_BLOB &source) {
if (!outlines.empty()) {
outlines.clear();
}
outlines.deep_copy(&source.outlines, &C_OUTLINE::deep_copy);
return *this;
}
static C_BLOB *deep_copy(const C_BLOB *src) {
auto *blob = new C_BLOB;
*blob = *src;
return blob;
}
static int SortByXMiddle(const void *v1, const void *v2) {
const C_BLOB *blob1 = *static_cast<const C_BLOB *const *>(v1);
const C_BLOB *blob2 = *static_cast<const C_BLOB *const *>(v2);
return blob1->bounding_box().x_middle() - blob2->bounding_box().x_middle();
}
private:
C_OUTLINE_LIST outlines; // master elements
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/stepblob.h
|
C++
|
apache-2.0
| 5,148
|
/**********************************************************************
* File: werd.cpp (Formerly word.c)
* Description: Code for the WERD class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "werd.h"
#include "linlsq.h"
#include "helpers.h"
namespace tesseract {
#define FIRST_COLOUR ScrollView::RED ///< first rainbow colour
#define LAST_COLOUR ScrollView::AQUAMARINE ///< last rainbow colour
#define CHILD_COLOUR ScrollView::BROWN ///< colour of children
/**
* WERD::WERD
*
* Constructor to build a WERD from a list of C_BLOBs.
* blob_list The C_BLOBs (in word order) are not copied;
* we take its elements and put them in our lists.
* blank_count blanks in front of the word
* text correct text, outlives this WERD
*/
WERD::WERD(C_BLOB_LIST *blob_list, uint8_t blank_count, const char *text)
: blanks(blank_count), flags(0), script_id_(0), correct(text ? text : "") {
C_BLOB_IT start_it = &cblobs;
C_BLOB_IT rej_cblob_it = &rej_cblobs;
C_OUTLINE_IT c_outline_it;
int16_t inverted_vote = 0;
int16_t non_inverted_vote = 0;
// Move blob_list's elements into cblobs.
start_it.add_list_after(blob_list);
/*
Set white on black flag for the WERD, moving any duff blobs onto the
rej_cblobs list.
First, walk the cblobs checking the inverse flag for each outline of each
cblob. If a cblob has inconsistent flag settings for its different
outlines, move the blob to the reject list. Otherwise, increment the
appropriate w-on-b or b-on-w vote for the word.
Now set the inversion flag for the WERD by maximum vote.
Walk the blobs again, moving any blob whose inversion flag does not agree
with the concencus onto the reject list.
*/
start_it.set_to_list(&cblobs);
if (start_it.empty()) {
return;
}
for (start_it.mark_cycle_pt(); !start_it.cycled_list(); start_it.forward()) {
bool reject_blob = false;
bool blob_inverted;
c_outline_it.set_to_list(start_it.data()->out_list());
blob_inverted = c_outline_it.data()->flag(COUT_INVERSE);
for (c_outline_it.mark_cycle_pt(); !c_outline_it.cycled_list() && !reject_blob;
c_outline_it.forward()) {
reject_blob = c_outline_it.data()->flag(COUT_INVERSE) != blob_inverted;
}
if (reject_blob) {
rej_cblob_it.add_after_then_move(start_it.extract());
} else {
if (blob_inverted) {
inverted_vote++;
} else {
non_inverted_vote++;
}
}
}
flags.set(W_INVERSE, (inverted_vote > non_inverted_vote));
start_it.set_to_list(&cblobs);
if (start_it.empty()) {
return;
}
for (start_it.mark_cycle_pt(); !start_it.cycled_list(); start_it.forward()) {
c_outline_it.set_to_list(start_it.data()->out_list());
if (c_outline_it.data()->flag(COUT_INVERSE) != flags[W_INVERSE]) {
rej_cblob_it.add_after_then_move(start_it.extract());
}
}
}
/**
* WERD::WERD
*
* Constructor to build a WERD from a list of C_BLOBs.
* The C_BLOBs are not copied so the source list is emptied.
*/
WERD::WERD(C_BLOB_LIST *blob_list, ///< In word order
WERD *clone) ///< Source of flags
: flags(clone->flags), script_id_(clone->script_id_), correct(clone->correct) {
C_BLOB_IT start_it = blob_list; // iterator
C_BLOB_IT end_it = blob_list; // another
while (!end_it.at_last()) {
end_it.forward(); // move to last
}
cblobs.assign_to_sublist(&start_it, &end_it);
// move to our list
blanks = clone->blanks;
// fprintf(stderr,"Wrong constructor!!!!\n");
}
// Construct a WERD from a single_blob and clone the flags from this.
// W_BOL and W_EOL flags are set according to the given values.
WERD *WERD::ConstructFromSingleBlob(bool bol, bool eol, C_BLOB *blob) {
C_BLOB_LIST temp_blobs;
C_BLOB_IT temp_it(&temp_blobs);
temp_it.add_after_then_move(blob);
WERD *blob_word = new WERD(&temp_blobs, this);
blob_word->set_flag(W_BOL, bol);
blob_word->set_flag(W_EOL, eol);
return blob_word;
}
/**
* WERD::bounding_box
*
* Return the bounding box of the WERD.
* This is quite a mess to compute!
* ORIGINALLY, REJECT CBLOBS WERE EXCLUDED, however, this led to bugs when the
* words on the row were re-sorted. The original words were built with reject
* blobs included. The FUZZY SPACE flags were set accordingly. If ALL the
* blobs in a word are rejected the BB for the word is nullptr, causing the sort
* to screw up, leading to the erroneous possibility of the first word in a
* row being marked as FUZZY space.
*/
TBOX WERD::bounding_box() const {
return restricted_bounding_box(true, true);
}
// Returns the bounding box including the desired combination of upper and
// lower noise/diacritic elements.
TBOX WERD::restricted_bounding_box(bool upper_dots, bool lower_dots) const {
TBOX box = true_bounding_box();
int bottom = box.bottom();
int top = box.top();
// This is a read-only iteration of the rejected blobs.
C_BLOB_IT it(const_cast<C_BLOB_LIST *>(&rej_cblobs));
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
TBOX dot_box = it.data()->bounding_box();
if ((upper_dots || dot_box.bottom() <= top) && (lower_dots || dot_box.top() >= bottom)) {
box += dot_box;
}
}
return box;
}
// Returns the bounding box of only the good blobs.
TBOX WERD::true_bounding_box() const {
TBOX box; // box being built
// This is a read-only iteration of the good blobs.
C_BLOB_IT it(const_cast<C_BLOB_LIST *>(&cblobs));
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
box += it.data()->bounding_box();
}
return box;
}
/**
* WERD::move
*
* Reposition WERD by vector
* NOTE!! REJECT CBLOBS ARE NOT MOVED
*/
void WERD::move(const ICOORD vec) {
C_BLOB_IT cblob_it(&cblobs); // cblob iterator
for (cblob_it.mark_cycle_pt(); !cblob_it.cycled_list(); cblob_it.forward()) {
cblob_it.data()->move(vec);
}
}
/**
* WERD::join_on
*
* Join other word onto this one. Delete the old word.
*/
void WERD::join_on(WERD *other) {
C_BLOB_IT blob_it(&cblobs);
C_BLOB_IT src_it(&other->cblobs);
C_BLOB_IT rej_cblob_it(&rej_cblobs);
C_BLOB_IT src_rej_it(&other->rej_cblobs);
while (!src_it.empty()) {
blob_it.add_to_end(src_it.extract());
src_it.forward();
}
while (!src_rej_it.empty()) {
rej_cblob_it.add_to_end(src_rej_it.extract());
src_rej_it.forward();
}
}
/**
* WERD::copy_on
*
* Copy blobs from other word onto this one.
*/
void WERD::copy_on(WERD *other) {
bool reversed = other->bounding_box().left() < bounding_box().left();
C_BLOB_IT c_blob_it(&cblobs);
C_BLOB_LIST c_blobs;
c_blobs.deep_copy(&other->cblobs, &C_BLOB::deep_copy);
if (reversed) {
c_blob_it.add_list_before(&c_blobs);
} else {
c_blob_it.move_to_last();
c_blob_it.add_list_after(&c_blobs);
}
if (!other->rej_cblobs.empty()) {
C_BLOB_IT rej_c_blob_it(&rej_cblobs);
C_BLOB_LIST new_rej_c_blobs;
new_rej_c_blobs.deep_copy(&other->rej_cblobs, &C_BLOB::deep_copy);
if (reversed) {
rej_c_blob_it.add_list_before(&new_rej_c_blobs);
} else {
rej_c_blob_it.move_to_last();
rej_c_blob_it.add_list_after(&new_rej_c_blobs);
}
}
}
/**
* WERD::print
*
* Display members
*/
void WERD::print() const {
tprintf("Blanks= %d\n", blanks);
bounding_box().print();
tprintf("Flags = %lu = 0%lo\n", flags.to_ulong(), flags.to_ulong());
tprintf(" W_SEGMENTED = %s\n", flags[W_SEGMENTED] ? "TRUE" : "FALSE");
tprintf(" W_ITALIC = %s\n", flags[W_ITALIC] ? "TRUE" : "FALSE");
tprintf(" W_BOL = %s\n", flags[W_BOL] ? "TRUE" : "FALSE");
tprintf(" W_EOL = %s\n", flags[W_EOL] ? "TRUE" : "FALSE");
tprintf(" W_NORMALIZED = %s\n", flags[W_NORMALIZED] ? "TRUE" : "FALSE");
tprintf(" W_SCRIPT_HAS_XHEIGHT = %s\n", flags[W_SCRIPT_HAS_XHEIGHT] ? "TRUE" : "FALSE");
tprintf(" W_SCRIPT_IS_LATIN = %s\n", flags[W_SCRIPT_IS_LATIN] ? "TRUE" : "FALSE");
tprintf(" W_DONT_CHOP = %s\n", flags[W_DONT_CHOP] ? "TRUE" : "FALSE");
tprintf(" W_REP_CHAR = %s\n", flags[W_REP_CHAR] ? "TRUE" : "FALSE");
tprintf(" W_FUZZY_SP = %s\n", flags[W_FUZZY_SP] ? "TRUE" : "FALSE");
tprintf(" W_FUZZY_NON = %s\n", flags[W_FUZZY_NON] ? "TRUE" : "FALSE");
tprintf("Correct= %s\n", correct.c_str());
tprintf("Rejected cblob count = %d\n", rej_cblobs.length());
tprintf("Script = %d\n", script_id_);
}
/**
* WERD::plot
*
* Draw the WERD in the given colour.
*/
#ifndef GRAPHICS_DISABLED
void WERD::plot(ScrollView *window, ScrollView::Color colour) {
C_BLOB_IT it = &cblobs;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->plot(window, colour, colour);
}
plot_rej_blobs(window);
}
// Get the next color in the (looping) rainbow.
ScrollView::Color WERD::NextColor(ScrollView::Color colour) {
auto next = static_cast<ScrollView::Color>(colour + 1);
if (next >= LAST_COLOUR || next < FIRST_COLOUR) {
next = FIRST_COLOUR;
}
return next;
}
/**
* WERD::plot
*
* Draw the WERD in rainbow colours in window.
*/
void WERD::plot(ScrollView *window) {
ScrollView::Color colour = FIRST_COLOUR;
C_BLOB_IT it = &cblobs;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->plot(window, colour, CHILD_COLOUR);
colour = NextColor(colour);
}
plot_rej_blobs(window);
}
/**
* WERD::plot_rej_blobs
*
* Draw the WERD rejected blobs in window - ALWAYS GREY
*/
void WERD::plot_rej_blobs(ScrollView *window) {
C_BLOB_IT it = &rej_cblobs;
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
it.data()->plot(window, ScrollView::GREY, ScrollView::GREY);
}
}
#endif // !GRAPHICS_DISABLED
/**
* WERD::shallow_copy()
*
* Make a shallow copy of a word
*/
WERD *WERD::shallow_copy() {
WERD *new_word = new WERD;
new_word->blanks = blanks;
new_word->flags = flags;
new_word->correct = correct;
return new_word;
}
/**
* WERD::operator=
*
* Assign a word, DEEP copying the blob list
*/
WERD &WERD::operator=(const WERD &source) {
this->ELIST2_LINK::operator=(source);
blanks = source.blanks;
flags = source.flags;
script_id_ = source.script_id_;
correct = source.correct;
cblobs.clear();
cblobs.deep_copy(&source.cblobs, &C_BLOB::deep_copy);
rej_cblobs.clear();
rej_cblobs.deep_copy(&source.rej_cblobs, &C_BLOB::deep_copy);
return *this;
}
/**
* word_comparator()
*
* word comparator used to sort a word list so that words are in increasing
* order of left edge.
*/
int word_comparator(const void *word1p, const void *word2p) {
const WERD *word1 = *reinterpret_cast<const WERD *const *>(word1p);
const WERD *word2 = *reinterpret_cast<const WERD *const *>(word2p);
return word1->bounding_box().left() - word2->bounding_box().left();
}
/**
* WERD::ConstructWerdWithNewBlobs()
*
* This method returns a new werd constructed using the blobs in the input
* all_blobs list, which correspond to the blobs in this werd object. The
* blobs used to construct the new word are consumed and removed from the
* input all_blobs list.
* Returns nullptr if the word couldn't be constructed.
* Returns original blobs for which no matches were found in the output list
* orphan_blobs (appends).
*/
WERD *WERD::ConstructWerdWithNewBlobs(C_BLOB_LIST *all_blobs, C_BLOB_LIST *orphan_blobs) {
C_BLOB_LIST current_blob_list;
C_BLOB_IT werd_blobs_it(¤t_blob_list);
// Add the word's c_blobs.
werd_blobs_it.add_list_after(cblob_list());
// New blob list. These contain the blobs which will form the new word.
C_BLOB_LIST new_werd_blobs;
C_BLOB_IT new_blobs_it(&new_werd_blobs);
// not_found_blobs contains the list of current word's blobs for which a
// corresponding blob wasn't found in the input all_blobs list.
C_BLOB_LIST not_found_blobs;
C_BLOB_IT not_found_it(¬_found_blobs);
not_found_it.move_to_last();
werd_blobs_it.move_to_first();
for (werd_blobs_it.mark_cycle_pt(); !werd_blobs_it.cycled_list(); werd_blobs_it.forward()) {
C_BLOB *werd_blob = werd_blobs_it.extract();
TBOX werd_blob_box = werd_blob->bounding_box();
bool found = false;
// Now find the corresponding blob for this blob in the all_blobs
// list. For now, follow the inefficient method of pairwise
// comparisons. Ideally, one can pre-bucket the blobs by row.
C_BLOB_IT all_blobs_it(all_blobs);
for (all_blobs_it.mark_cycle_pt(); !all_blobs_it.cycled_list(); all_blobs_it.forward()) {
C_BLOB *a_blob = all_blobs_it.data();
// Compute the overlap of the two blobs. If major, a_blob should
// be added to the new blobs list.
TBOX a_blob_box = a_blob->bounding_box();
if (a_blob_box.null_box()) {
tprintf("Bounding box couldn't be ascertained\n");
}
if (werd_blob_box.contains(a_blob_box) || werd_blob_box.major_overlap(a_blob_box)) {
// Old blobs are from minimal splits, therefore are expected to be
// bigger. The new small blobs should cover a significant portion.
// This is it.
all_blobs_it.extract();
new_blobs_it.add_after_then_move(a_blob);
found = true;
}
}
if (!found) {
not_found_it.add_after_then_move(werd_blob);
} else {
delete werd_blob;
}
}
// Iterate over all not found blobs. Some of them may be due to
// under-segmentation (which is OK, since the corresponding blob is already
// in the list in that case.
not_found_it.move_to_first();
for (not_found_it.mark_cycle_pt(); !not_found_it.cycled_list(); not_found_it.forward()) {
C_BLOB *not_found = not_found_it.data();
TBOX not_found_box = not_found->bounding_box();
C_BLOB_IT existing_blobs_it(new_blobs_it);
for (existing_blobs_it.mark_cycle_pt(); !existing_blobs_it.cycled_list();
existing_blobs_it.forward()) {
C_BLOB *a_blob = existing_blobs_it.data();
TBOX a_blob_box = a_blob->bounding_box();
if ((not_found_box.major_overlap(a_blob_box) || a_blob_box.major_overlap(not_found_box)) &&
not_found_box.y_overlap_fraction(a_blob_box) > 0.8) {
// Already taken care of.
delete not_found_it.extract();
break;
}
}
}
if (orphan_blobs) {
C_BLOB_IT orphan_blobs_it(orphan_blobs);
orphan_blobs_it.move_to_last();
orphan_blobs_it.add_list_after(¬_found_blobs);
}
// New blobs are ready. Create a new werd object with these.
WERD *new_werd = nullptr;
if (!new_werd_blobs.empty()) {
new_werd = new WERD(&new_werd_blobs, this);
} else {
// Add the blobs back to this word so that it can be reused.
C_BLOB_IT this_list_it(cblob_list());
this_list_it.add_list_after(¬_found_blobs);
}
return new_werd;
}
// Removes noise from the word by moving small outlines to the rej_cblobs
// list, based on the size_threshold.
void WERD::CleanNoise(float size_threshold) {
C_BLOB_IT blob_it(&cblobs);
C_BLOB_IT rej_it(&rej_cblobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
C_BLOB *blob = blob_it.data();
C_OUTLINE_IT ol_it(blob->out_list());
for (ol_it.mark_cycle_pt(); !ol_it.cycled_list(); ol_it.forward()) {
C_OUTLINE *outline = ol_it.data();
TBOX ol_box = outline->bounding_box();
int ol_size = ol_box.width() > ol_box.height() ? ol_box.width() : ol_box.height();
if (ol_size < size_threshold) {
// This outline is too small. Move it to a separate blob in the
// reject blobs list.
auto *rej_blob = new C_BLOB(ol_it.extract());
rej_it.add_after_then_move(rej_blob);
}
}
if (blob->out_list()->empty()) {
delete blob_it.extract();
}
}
}
// Extracts all the noise outlines and stuffs the pointers into the given
// vector of outlines. Afterwards, the outlines vector owns the pointers.
void WERD::GetNoiseOutlines(std::vector<C_OUTLINE *> *outlines) {
C_BLOB_IT rej_it(&rej_cblobs);
for (rej_it.mark_cycle_pt(); !rej_it.empty(); rej_it.forward()) {
C_BLOB *blob = rej_it.extract();
C_OUTLINE_IT ol_it(blob->out_list());
outlines->push_back(ol_it.extract());
delete blob;
}
}
// Adds the selected outlines to the indcated real blobs, and puts the rest
// back in rej_cblobs where they came from. Where the target_blobs entry is
// nullptr, a run of wanted outlines is put into a single new blob.
// Ownership of the outlines is transferred back to the word. (Hence
// vector and not PointerVector.)
// Returns true if any new blob was added to the start of the word, which
// suggests that it might need joining to the word before it, and likewise
// sets make_next_word_fuzzy true if any new blob was added to the end.
bool WERD::AddSelectedOutlines(const std::vector<bool> &wanted,
const std::vector<C_BLOB *> &target_blobs,
const std::vector<C_OUTLINE *> &outlines,
bool *make_next_word_fuzzy) {
bool outline_added_to_start = false;
if (make_next_word_fuzzy != nullptr) {
*make_next_word_fuzzy = false;
}
C_BLOB_IT rej_it(&rej_cblobs);
for (unsigned i = 0; i < outlines.size(); ++i) {
C_OUTLINE *outline = outlines[i];
if (outline == nullptr) {
continue; // Already used it.
}
if (wanted[i]) {
C_BLOB *target_blob = target_blobs[i];
TBOX noise_box = outline->bounding_box();
if (target_blob == nullptr) {
target_blob = new C_BLOB(outline);
// Need to find the insertion point.
C_BLOB_IT blob_it(&cblobs);
for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
C_BLOB *blob = blob_it.data();
TBOX blob_box = blob->bounding_box();
if (blob_box.left() > noise_box.left()) {
if (blob_it.at_first() && !flag(W_FUZZY_SP) && !flag(W_FUZZY_NON)) {
// We might want to join this word to its predecessor.
outline_added_to_start = true;
}
blob_it.add_before_stay_put(target_blob);
break;
}
}
if (blob_it.cycled_list()) {
blob_it.add_to_end(target_blob);
if (make_next_word_fuzzy != nullptr) {
*make_next_word_fuzzy = true;
}
}
// Add all consecutive wanted, but null-blob outlines to same blob.
C_OUTLINE_IT ol_it(target_blob->out_list());
while (i + 1 < outlines.size() && wanted[i + 1] && target_blobs[i + 1] == nullptr) {
++i;
ol_it.add_to_end(outlines[i]);
}
} else {
// Insert outline into this blob.
C_OUTLINE_IT ol_it(target_blob->out_list());
ol_it.add_to_end(outline);
}
} else {
// Put back on noise list.
rej_it.add_to_end(new C_BLOB(outline));
}
}
return outline_added_to_start;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccstruct/werd.cpp
|
C++
|
apache-2.0
| 19,638
|
/**********************************************************************
* File: werd.h
* Description: Code for the WERD class.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef WERD_H
#define WERD_H
#include "elst2.h"
#include "params.h"
#include "stepblob.h"
#include <bitset>
namespace tesseract {
enum WERD_FLAGS {
W_SEGMENTED, ///< correctly segmented
W_ITALIC, ///< italic text
W_BOLD, ///< bold text
W_BOL, ///< start of line
W_EOL, ///< end of line
W_NORMALIZED, ///< flags
W_SCRIPT_HAS_XHEIGHT, ///< x-height concept makes sense.
W_SCRIPT_IS_LATIN, ///< Special case latin for y. splitting.
W_DONT_CHOP, ///< fixed pitch chopped
W_REP_CHAR, ///< repeated character
W_FUZZY_SP, ///< fuzzy space
W_FUZZY_NON, ///< fuzzy nonspace
W_INVERSE ///< white on black
};
enum DISPLAY_FLAGS {
/* Display flags bit number allocations */
DF_BOX, ///< Bounding box
DF_TEXT, ///< Correct ascii
DF_POLYGONAL, ///< Polyg approx
DF_EDGE_STEP, ///< Edge steps
DF_BN_POLYGONAL, ///< BL normalisd polyapx
DF_BLAMER ///< Blamer information
};
class ROW; // forward decl
class TESS_API WERD : public ELIST2_LINK {
public:
WERD() = default;
// WERD constructed with:
// blob_list - blobs of the word (we take this list's contents)
// blanks - number of blanks before the word
// text - correct text (outlives WERD)
WERD(C_BLOB_LIST *blob_list, uint8_t blanks, const char *text);
// WERD constructed from:
// blob_list - blobs in the word
// clone - werd to clone flags, etc from.
WERD(C_BLOB_LIST *blob_list, WERD *clone);
// Construct a WERD from a single_blob and clone the flags from this.
// W_BOL and W_EOL flags are set according to the given values.
WERD *ConstructFromSingleBlob(bool bol, bool eol, C_BLOB *blob);
~WERD() = default;
// assignment
WERD &operator=(const WERD &source);
// This method returns a new werd constructed using the blobs in the input
// all_blobs list, which correspond to the blobs in this werd object. The
// blobs used to construct the new word are consumed and removed from the
// input all_blobs list.
// Returns nullptr if the word couldn't be constructed.
// Returns original blobs for which no matches were found in the output list
// orphan_blobs (appends).
WERD *ConstructWerdWithNewBlobs(C_BLOB_LIST *all_blobs, C_BLOB_LIST *orphan_blobs);
// Accessors for reject / DUFF blobs in various formats
C_BLOB_LIST *rej_cblob_list() { // compact format
return &rej_cblobs;
}
// Accessors for good blobs in various formats.
C_BLOB_LIST *cblob_list() { // get compact blobs
return &cblobs;
}
uint8_t space() const { // access function
return blanks;
}
void set_blanks(uint8_t new_blanks) {
blanks = new_blanks;
}
int script_id() const {
return script_id_;
}
void set_script_id(int id) {
script_id_ = id;
}
// Returns the (default) bounding box including all the dots.
TBOX bounding_box() const; // compute bounding box
// Returns the bounding box including the desired combination of upper and
// lower noise/diacritic elements.
TBOX restricted_bounding_box(bool upper_dots, bool lower_dots) const;
// Returns the bounding box of only the good blobs.
TBOX true_bounding_box() const;
const char *text() const {
return correct.c_str();
}
void set_text(const char *new_text) {
correct = new_text;
}
bool flag(WERD_FLAGS mask) const {
return flags[mask];
}
void set_flag(WERD_FLAGS mask, bool value) {
flags.set(mask, value);
}
bool display_flag(uint8_t flag) const {
return disp_flags[flag];
}
void set_display_flag(uint8_t flag, bool value) {
disp_flags.set(flag, value);
}
WERD *shallow_copy(); // shallow copy word
// reposition word by vector
void move(const ICOORD vec);
// join other's blobs onto this werd, emptying out other.
void join_on(WERD *other);
// copy other's blobs onto this word, leaving other intact.
void copy_on(WERD *other);
// tprintf word metadata (but not blob innards)
void print() const;
#ifndef GRAPHICS_DISABLED
// plot word on window in a uniform colour
void plot(ScrollView *window, ScrollView::Color colour);
// Get the next color in the (looping) rainbow.
static ScrollView::Color NextColor(ScrollView::Color colour);
// plot word on window in a rainbow of colours
void plot(ScrollView *window);
// plot rejected blobs in a rainbow of colours
void plot_rej_blobs(ScrollView *window);
#endif // !GRAPHICS_DISABLED
// Removes noise from the word by moving small outlines to the rej_cblobs
// list, based on the size_threshold.
void CleanNoise(float size_threshold);
// Extracts all the noise outlines and stuffs the pointers into the given
// vector of outlines. Afterwards, the outlines vector owns the pointers.
void GetNoiseOutlines(std::vector<C_OUTLINE *> *outlines);
// Adds the selected outlines to the indcated real blobs, and puts the rest
// back in rej_cblobs where they came from. Where the target_blobs entry is
// nullptr, a run of wanted outlines is put into a single new blob.
// Ownership of the outlines is transferred back to the word. (Hence
// vector and not PointerVector.)
// Returns true if any new blob was added to the start of the word, which
// suggests that it might need joining to the word before it, and likewise
// sets make_next_word_fuzzy true if any new blob was added to the end.
bool AddSelectedOutlines(const std::vector<bool> &wanted,
const std::vector<C_BLOB *> &target_blobs,
const std::vector<C_OUTLINE *> &outlines, bool *make_next_word_fuzzy);
private:
uint8_t blanks = 0; // no of blanks
std::bitset<16> flags; // flags about word
std::bitset<16> disp_flags; // display flags
int16_t script_id_ = 0; // From unicharset.
std::string correct; // correct text
C_BLOB_LIST cblobs; // compacted blobs
C_BLOB_LIST rej_cblobs; // DUFF blobs
};
ELIST2IZEH(WERD)
} // namespace tesseract
#include "ocrrow.h" // placed here due to
namespace tesseract {
// compare words by increasing order of left edge, suitable for qsort(3)
int word_comparator(const void *word1p, const void *word2p);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccstruct/werd.h
|
C++
|
apache-2.0
| 7,123
|
///////////////////////////////////////////////////////////////////////
// File: ambigs.cpp
// Description: Functions for dealing with ambiguities
// (training and recognition).
// Author: Daria Antonova
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "ambigs.h"
#include "helpers.h"
#include "universalambigs.h"
#include <cstdio>
#if defined(_WIN32) && !defined(__GNUC__)
# define strtok_r(str, delim, saveptr) strtok_s(str, delim, saveptr)
#endif /* _WIN32 && !__GNUC__ */
namespace tesseract {
static const char kAmbigDelimiters[] = "\t ";
static const char kIllegalMsg[] = "Illegal ambiguity specification on line %d\n";
static const char kIllegalUnicharMsg[] = "Illegal unichar %s in ambiguity specification\n";
// Maximum line size:
// 10 for sizes of ambigs, tabs, abmig type and newline
// UNICHAR_LEN * (MAX_AMBIG_SIZE + 1) for each part of the ambig
const int kMaxAmbigStringSize = UNICHAR_LEN * (MAX_AMBIG_SIZE + 1);
AmbigSpec::AmbigSpec() {
wrong_ngram[0] = INVALID_UNICHAR_ID;
correct_fragments[0] = INVALID_UNICHAR_ID;
correct_ngram_id = INVALID_UNICHAR_ID;
type = NOT_AMBIG;
wrong_ngram_size = 0;
}
// Initializes the ambigs by adding a nullptr pointer to each table.
void UnicharAmbigs::InitUnicharAmbigs(const UNICHARSET &unicharset, bool use_ambigs_for_adaption) {
for (unsigned i = 0; i < unicharset.size(); ++i) {
replace_ambigs_.push_back(nullptr);
dang_ambigs_.push_back(nullptr);
one_to_one_definite_ambigs_.push_back(nullptr);
if (use_ambigs_for_adaption) {
ambigs_for_adaption_.push_back(nullptr);
reverse_ambigs_for_adaption_.push_back(nullptr);
}
}
}
// Loads the universal ambigs that are useful for any language.
void UnicharAmbigs::LoadUniversal(const UNICHARSET &encoder_set, UNICHARSET *unicharset) {
TFile file;
if (!file.Open(kUniversalAmbigsFile, ksizeofUniversalAmbigsFile)) {
return;
}
LoadUnicharAmbigs(encoder_set, &file, 0, false, unicharset);
}
void UnicharAmbigs::LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambig_file,
int debug_level, bool use_ambigs_for_adaption,
UNICHARSET *unicharset) {
UnicharIdVector *adaption_ambigs_entry;
if (debug_level) {
tprintf("Reading ambiguities\n");
}
int test_ambig_part_size;
int replacement_ambig_part_size;
// The space for buffer is allocated on the heap to avoid
// GCC frame size warning.
const int kBufferSize = 10 + 2 * kMaxAmbigStringSize;
char *buffer = new char[kBufferSize];
char replacement_string[kMaxAmbigStringSize];
UNICHAR_ID test_unichar_ids[MAX_AMBIG_SIZE + 1];
int line_num = 0;
int type = NOT_AMBIG;
// Determine the version of the ambigs file.
int version = 0;
ASSERT_HOST(ambig_file->FGets(buffer, kBufferSize) != nullptr && buffer[0] != '\0');
if (*buffer == 'v') {
version = static_cast<int>(strtol(buffer + 1, nullptr, 10));
++line_num;
} else {
ambig_file->Rewind();
}
while (ambig_file->FGets(buffer, kBufferSize) != nullptr) {
chomp_string(buffer);
if (debug_level > 2) {
tprintf("read line %s\n", buffer);
}
++line_num;
if (!ParseAmbiguityLine(line_num, version, debug_level, encoder_set, buffer,
&test_ambig_part_size, test_unichar_ids, &replacement_ambig_part_size,
replacement_string, &type)) {
continue;
}
// Construct AmbigSpec and add it to the appropriate AmbigSpec_LIST.
auto *ambig_spec = new AmbigSpec();
if (!InsertIntoTable((type == REPLACE_AMBIG) ? replace_ambigs_ : dang_ambigs_,
test_ambig_part_size, test_unichar_ids, replacement_ambig_part_size,
replacement_string, type, ambig_spec, unicharset)) {
continue;
}
// Update one_to_one_definite_ambigs_.
if (test_ambig_part_size == 1 && replacement_ambig_part_size == 1 && type == DEFINITE_AMBIG) {
if (one_to_one_definite_ambigs_[test_unichar_ids[0]] == nullptr) {
one_to_one_definite_ambigs_[test_unichar_ids[0]] = new UnicharIdVector();
}
one_to_one_definite_ambigs_[test_unichar_ids[0]]->push_back(ambig_spec->correct_ngram_id);
}
// Update ambigs_for_adaption_.
if (use_ambigs_for_adaption) {
std::vector<UNICHAR_ID> encoding;
// Silently ignore invalid strings, as before, so it is safe to use a
// universal ambigs file.
if (unicharset->encode_string(replacement_string, true, &encoding, nullptr, nullptr)) {
for (int i = 0; i < test_ambig_part_size; ++i) {
if (ambigs_for_adaption_[test_unichar_ids[i]] == nullptr) {
ambigs_for_adaption_[test_unichar_ids[i]] = new UnicharIdVector();
}
adaption_ambigs_entry = ambigs_for_adaption_[test_unichar_ids[i]];
for (int id_to_insert : encoding) {
ASSERT_HOST(id_to_insert != INVALID_UNICHAR_ID);
// Add the new unichar id to adaption_ambigs_entry (only if the
// vector does not already contain it) keeping it in sorted order.
size_t j;
for (j = 0;
j < adaption_ambigs_entry->size() && (*adaption_ambigs_entry)[j] > id_to_insert;
++j) {
}
if (j < adaption_ambigs_entry->size()) {
if ((*adaption_ambigs_entry)[j] != id_to_insert) {
adaption_ambigs_entry->insert(adaption_ambigs_entry->begin() + j, id_to_insert);
}
} else {
adaption_ambigs_entry->push_back(id_to_insert);
}
}
}
}
}
}
delete[] buffer;
// Fill in reverse_ambigs_for_adaption from ambigs_for_adaption vector.
if (use_ambigs_for_adaption) {
for (size_t i = 0; i < ambigs_for_adaption_.size(); ++i) {
adaption_ambigs_entry = ambigs_for_adaption_[i];
if (adaption_ambigs_entry == nullptr) {
continue;
}
for (size_t j = 0; j < adaption_ambigs_entry->size(); ++j) {
UNICHAR_ID ambig_id = (*adaption_ambigs_entry)[j];
if (reverse_ambigs_for_adaption_[ambig_id] == nullptr) {
reverse_ambigs_for_adaption_[ambig_id] = new UnicharIdVector();
}
reverse_ambigs_for_adaption_[ambig_id]->push_back(i);
}
}
}
// Print what was read from the input file.
if (debug_level > 1) {
for (int tbl = 0; tbl < 2; ++tbl) {
const UnicharAmbigsVector &print_table = (tbl == 0) ? replace_ambigs_ : dang_ambigs_;
for (size_t i = 0; i < print_table.size(); ++i) {
AmbigSpec_LIST *lst = print_table[i];
if (lst == nullptr) {
continue;
}
if (!lst->empty()) {
tprintf("%s Ambiguities for %s:\n", (tbl == 0) ? "Replaceable" : "Dangerous",
unicharset->debug_str(i).c_str());
}
AmbigSpec_IT lst_it(lst);
for (lst_it.mark_cycle_pt(); !lst_it.cycled_list(); lst_it.forward()) {
AmbigSpec *ambig_spec = lst_it.data();
tprintf("wrong_ngram:");
UnicharIdArrayUtils::print(ambig_spec->wrong_ngram, *unicharset);
tprintf("correct_fragments:");
UnicharIdArrayUtils::print(ambig_spec->correct_fragments, *unicharset);
}
}
}
if (use_ambigs_for_adaption) {
for (int vec_id = 0; vec_id < 2; ++vec_id) {
const std::vector<UnicharIdVector *> &vec =
(vec_id == 0) ? ambigs_for_adaption_ : reverse_ambigs_for_adaption_;
for (size_t i = 0; i < vec.size(); ++i) {
adaption_ambigs_entry = vec[i];
if (adaption_ambigs_entry != nullptr) {
tprintf("%sAmbigs for adaption for %s:\n", (vec_id == 0) ? "" : "Reverse ",
unicharset->debug_str(i).c_str());
for (size_t j = 0; j < adaption_ambigs_entry->size(); ++j) {
tprintf("%s ", unicharset->debug_str((*adaption_ambigs_entry)[j]).c_str());
}
tprintf("\n");
}
}
}
}
}
}
bool UnicharAmbigs::ParseAmbiguityLine(int line_num, int version, int debug_level,
const UNICHARSET &unicharset, char *buffer,
int *test_ambig_part_size, UNICHAR_ID *test_unichar_ids,
int *replacement_ambig_part_size, char *replacement_string,
int *type) {
if (version > 1) {
// Simpler format is just wrong-string correct-string type\n.
std::string input(buffer);
std::vector<std::string> fields = split(input, ' ');
if (fields.size() != 3) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
// Encode wrong-string.
std::vector<UNICHAR_ID> unichars;
if (!unicharset.encode_string(fields[0].c_str(), true, &unichars, nullptr, nullptr)) {
return false;
}
*test_ambig_part_size = unichars.size();
if (*test_ambig_part_size > MAX_AMBIG_SIZE) {
if (debug_level) {
tprintf("Too many unichars in ambiguity on line %d\n", line_num);
}
return false;
}
// Copy encoded string to output.
for (size_t i = 0; i < unichars.size(); ++i) {
test_unichar_ids[i] = unichars[i];
}
test_unichar_ids[unichars.size()] = INVALID_UNICHAR_ID;
// Encode replacement-string to check validity.
if (!unicharset.encode_string(fields[1].c_str(), true, &unichars, nullptr, nullptr)) {
return false;
}
*replacement_ambig_part_size = unichars.size();
if (*replacement_ambig_part_size > MAX_AMBIG_SIZE) {
if (debug_level) {
tprintf("Too many unichars in ambiguity on line %d\n", line_num);
}
return false;
}
if (sscanf(fields[2].c_str(), "%d", type) != 1) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
snprintf(replacement_string, kMaxAmbigStringSize, "%s", fields[1].c_str());
return true;
}
int i;
char *next_token;
char *token = strtok_r(buffer, kAmbigDelimiters, &next_token);
if (!token || sscanf(token, "%d", test_ambig_part_size) != 1 ||
*test_ambig_part_size <= 0) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
if (*test_ambig_part_size > MAX_AMBIG_SIZE) {
if (debug_level) {
tprintf("Too many unichars in ambiguity on line %d\n", line_num);
}
return false;
}
for (i = 0; i < *test_ambig_part_size; ++i) {
if (!(token = strtok_r(nullptr, kAmbigDelimiters, &next_token))) {
break;
}
if (!unicharset.contains_unichar(token)) {
if (debug_level) {
tprintf(kIllegalUnicharMsg, token);
}
break;
}
test_unichar_ids[i] = unicharset.unichar_to_id(token);
}
test_unichar_ids[i] = INVALID_UNICHAR_ID;
if (i != *test_ambig_part_size || !(token = strtok_r(nullptr, kAmbigDelimiters, &next_token)) ||
sscanf(token, "%d", replacement_ambig_part_size) != 1 ||
*replacement_ambig_part_size <= 0) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
if (*replacement_ambig_part_size > MAX_AMBIG_SIZE) {
if (debug_level) {
tprintf("Too many unichars in ambiguity on line %d\n", line_num);
}
return false;
}
replacement_string[0] = '\0';
for (i = 0; i < *replacement_ambig_part_size; ++i) {
if (!(token = strtok_r(nullptr, kAmbigDelimiters, &next_token))) {
break;
}
strcat(replacement_string, token);
if (!unicharset.contains_unichar(token)) {
if (debug_level) {
tprintf(kIllegalUnicharMsg, token);
}
break;
}
}
if (i != *replacement_ambig_part_size) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
if (version > 0) {
// The next field being true indicates that the ambiguity should
// always be substituted (e.g. '' should always be changed to ").
// For such "certain" n -> m ambigs tesseract will insert character
// fragments for the n pieces in the unicharset. AmbigsFound()
// will then replace the incorrect ngram with the character
// fragments of the correct character (or ngram if m > 1).
// Note that if m > 1, an ngram will be inserted into the
// modified word, not the individual unigrams. Tesseract
// has limited support for ngram unichar (e.g. dawg permuter).
token = strtok_r(nullptr, kAmbigDelimiters, &next_token);
if (!token || sscanf(token, "%d", type) != 1) {
if (debug_level) {
tprintf(kIllegalMsg, line_num);
}
return false;
}
}
return true;
}
bool UnicharAmbigs::InsertIntoTable(UnicharAmbigsVector &table, int test_ambig_part_size,
UNICHAR_ID *test_unichar_ids, int replacement_ambig_part_size,
const char *replacement_string, int type, AmbigSpec *ambig_spec,
UNICHARSET *unicharset) {
ambig_spec->type = static_cast<AmbigType>(type);
if (test_ambig_part_size == 1 && replacement_ambig_part_size == 1 &&
unicharset->to_lower(test_unichar_ids[0]) ==
unicharset->to_lower(unicharset->unichar_to_id(replacement_string))) {
ambig_spec->type = CASE_AMBIG;
}
ambig_spec->wrong_ngram_size =
UnicharIdArrayUtils::copy(test_unichar_ids, ambig_spec->wrong_ngram);
// Since we need to maintain a constant number of unichar positions in
// order to construct ambig_blob_choices vector in NoDangerousAmbig(), for
// each n->m ambiguity we will have to place n character fragments of the
// correct ngram into the corresponding positions in the vector (e.g. given
// "vvvvw" and vvvv->ww we will place v and |ww|0|4 into position 0, v and
// |ww|1|4 into position 1 and so on. The correct ngram is reconstructed
// from fragments by dawg_permute_and_select().
// Insert the corresponding correct ngram into the unicharset.
// Unicharset code assumes that the "base" ngram is inserted into
// the unicharset before fragments of this ngram are inserted.
unicharset->unichar_insert(replacement_string, OldUncleanUnichars::kTrue);
ambig_spec->correct_ngram_id = unicharset->unichar_to_id(replacement_string);
if (replacement_ambig_part_size > 1) {
unicharset->set_isngram(ambig_spec->correct_ngram_id, true);
}
// Add the corresponding fragments of the wrong ngram to unicharset.
int i;
for (i = 0; i < test_ambig_part_size; ++i) {
UNICHAR_ID unichar_id;
if (test_ambig_part_size == 1) {
unichar_id = ambig_spec->correct_ngram_id;
} else {
std::string frag_str =
CHAR_FRAGMENT::to_string(replacement_string, i, test_ambig_part_size, false);
unicharset->unichar_insert(frag_str.c_str(), OldUncleanUnichars::kTrue);
unichar_id = unicharset->unichar_to_id(frag_str.c_str());
}
ambig_spec->correct_fragments[i] = unichar_id;
}
ambig_spec->correct_fragments[i] = INVALID_UNICHAR_ID;
// Add AmbigSpec for this ambiguity to the corresponding AmbigSpec_LIST.
// Keep AmbigSpec_LISTs sorted by AmbigSpec.wrong_ngram.
if (table[test_unichar_ids[0]] == nullptr) {
table[test_unichar_ids[0]] = new AmbigSpec_LIST();
}
if (table[test_unichar_ids[0]]->add_sorted(AmbigSpec::compare_ambig_specs, true, ambig_spec)) {
return true;
}
delete ambig_spec;
return false;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/ambigs.cpp
|
C++
|
apache-2.0
| 16,121
|
///////////////////////////////////////////////////////////////////////
// File: ambigs.h
// Description: Constants, flags, functions for dealing with
// ambiguities (training and recognition).
// Author: Daria Antonova
//
// (C) Copyright 2008, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_AMBIGS_H_
#define TESSERACT_CCUTIL_AMBIGS_H_
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // DISABLED_LEGACY_ENGINE
#endif
#if !defined(DISABLED_LEGACY_ENGINE)
# include <tesseract/unichar.h>
# include "elst.h"
# include "tprintf.h"
# include "unicharset.h"
# define MAX_AMBIG_SIZE 10
namespace tesseract {
using UnicharIdVector = std::vector<UNICHAR_ID>;
enum AmbigType {
NOT_AMBIG, // the ngram pair is not ambiguous
REPLACE_AMBIG, // ocred ngram should always be substituted with correct
DEFINITE_AMBIG, // add correct ngram to the classifier results (1-1)
SIMILAR_AMBIG, // use pairwise classifier for ocred/correct pair (1-1)
CASE_AMBIG, // this is a case ambiguity (1-1)
AMBIG_TYPE_COUNT // number of enum entries
};
// A collection of utility functions for arrays of UNICHAR_IDs that are
// terminated by INVALID_UNICHAR_ID.
class UnicharIdArrayUtils {
public:
// Compares two arrays of unichar ids. Returns -1 if the length of array1 is
// less than length of array2, if any array1[i] is less than array2[i].
// Returns 0 if the arrays are equal, 1 otherwise.
// The function assumes that the arrays are terminated by INVALID_UNICHAR_ID.
static inline int compare(const UNICHAR_ID *ptr1, const UNICHAR_ID *ptr2) {
for (;;) {
const UNICHAR_ID val1 = *ptr1++;
const UNICHAR_ID val2 = *ptr2++;
if (val1 != val2) {
if (val1 == INVALID_UNICHAR_ID) {
return -1;
}
if (val2 == INVALID_UNICHAR_ID) {
return 1;
}
if (val1 < val2) {
return -1;
}
return 1;
}
if (val1 == INVALID_UNICHAR_ID) {
return 0;
}
}
}
// Copies UNICHAR_IDs from dst to src. Returns the number of ids copied.
// The function assumes that the arrays are terminated by INVALID_UNICHAR_ID
// and that dst has enough space for all the elements from src.
static inline int copy(const UNICHAR_ID src[], UNICHAR_ID dst[]) {
int i = 0;
do {
dst[i] = src[i];
} while (dst[i++] != INVALID_UNICHAR_ID);
return i - 1;
}
// Prints unichars corresponding to the unichar_ids in the given array.
// The function assumes that array is terminated by INVALID_UNICHAR_ID.
static inline void print(const UNICHAR_ID array[], const UNICHARSET &unicharset) {
const UNICHAR_ID *ptr = array;
if (*ptr == INVALID_UNICHAR_ID) {
tprintf("[Empty]");
}
while (*ptr != INVALID_UNICHAR_ID) {
tprintf("%s ", unicharset.id_to_unichar(*ptr++));
}
tprintf("( ");
ptr = array;
while (*ptr != INVALID_UNICHAR_ID) {
tprintf("%d ", *ptr++);
}
tprintf(")\n");
}
};
// AMBIG_SPEC_LIST stores a list of dangerous ambigs that
// start with the same unichar (e.g. r->t rn->m rr1->m).
class AmbigSpec : public ELIST_LINK {
public:
AmbigSpec();
~AmbigSpec() = default;
// Comparator function for sorting AmbigSpec_LISTs. The lists will
// be sorted by their wrong_ngram arrays. Example of wrong_ngram vectors
// in a sorted AmbigSpec_LIST: [9 1 3], [9 3 4], [9 8], [9, 8 1].
static int compare_ambig_specs(const void *spec1, const void *spec2) {
const AmbigSpec *s1 = *static_cast<const AmbigSpec *const *>(spec1);
const AmbigSpec *s2 = *static_cast<const AmbigSpec *const *>(spec2);
int result = UnicharIdArrayUtils::compare(s1->wrong_ngram, s2->wrong_ngram);
if (result != 0) {
return result;
}
return UnicharIdArrayUtils::compare(s1->correct_fragments, s2->correct_fragments);
}
UNICHAR_ID wrong_ngram[MAX_AMBIG_SIZE + 1];
UNICHAR_ID correct_fragments[MAX_AMBIG_SIZE + 1];
UNICHAR_ID correct_ngram_id;
AmbigType type;
int wrong_ngram_size;
};
ELISTIZEH(AmbigSpec)
// AMBIG_TABLE[i] stores a set of ambiguities whose
// wrong ngram starts with unichar id i.
using UnicharAmbigsVector = std::vector<AmbigSpec_LIST *>;
class UnicharAmbigs {
public:
UnicharAmbigs() = default;
~UnicharAmbigs() {
for (auto data : replace_ambigs_) {
delete data;
}
for (auto data : dang_ambigs_) {
delete data;
}
for (auto data : one_to_one_definite_ambigs_) {
delete data;
}
}
const UnicharAmbigsVector &dang_ambigs() const {
return dang_ambigs_;
}
const UnicharAmbigsVector &replace_ambigs() const {
return replace_ambigs_;
}
// Initializes the ambigs by adding a nullptr pointer to each table.
void InitUnicharAmbigs(const UNICHARSET &unicharset, bool use_ambigs_for_adaption);
// Loads the universal ambigs that are useful for any language.
void LoadUniversal(const UNICHARSET &encoder_set, UNICHARSET *unicharset);
// Fills in two ambiguity tables (replaceable and dangerous) with information
// read from the ambigs file. An ambiguity table is an array of lists.
// The array is indexed by a class id. Each entry in the table provides
// a list of potential ambiguities which can start with the corresponding
// character. For example the ambiguity "rn -> m", would be located in the
// table at index of unicharset.unichar_to_id('r').
// In 1-1 ambiguities (e.g. s -> S, 1 -> I) are recorded in
// one_to_one_definite_ambigs_. This vector is also indexed by the class id
// of the wrong part of the ambiguity and each entry contains a vector of
// unichar ids that are ambiguous to it.
// encoder_set is used to encode the ambiguity strings, undisturbed by new
// unichar_ids that may be created by adding the ambigs.
void LoadUnicharAmbigs(const UNICHARSET &encoder_set, TFile *ambigs_file, int debug_level,
bool use_ambigs_for_adaption, UNICHARSET *unicharset);
// Returns definite 1-1 ambigs for the given unichar id.
inline const UnicharIdVector *OneToOneDefiniteAmbigs(UNICHAR_ID unichar_id) const {
if (one_to_one_definite_ambigs_.empty()) {
return nullptr;
}
return one_to_one_definite_ambigs_[unichar_id];
}
// Returns a pointer to the vector with all unichar ids that appear in the
// 'correct' part of the ambiguity pair when the given unichar id appears
// in the 'wrong' part of the ambiguity. E.g. if DangAmbigs file consist of
// m->rn,rn->m,m->iii, UnicharAmbigsForAdaption() called with unichar id of
// m will return a pointer to a vector with unichar ids of r,n,i.
inline const UnicharIdVector *AmbigsForAdaption(UNICHAR_ID unichar_id) const {
if (ambigs_for_adaption_.empty()) {
return nullptr;
}
return ambigs_for_adaption_[unichar_id];
}
// Similar to the above, but return the vector of unichar ids for which
// the given unichar_id is an ambiguity (appears in the 'wrong' part of
// some ambiguity pair).
inline const UnicharIdVector *ReverseAmbigsForAdaption(UNICHAR_ID unichar_id) const {
if (reverse_ambigs_for_adaption_.empty()) {
return nullptr;
}
return reverse_ambigs_for_adaption_[unichar_id];
}
private:
bool ParseAmbiguityLine(int line_num, int version, int debug_level, const UNICHARSET &unicharset,
char *buffer, int *test_ambig_part_size, UNICHAR_ID *test_unichar_ids,
int *replacement_ambig_part_size, char *replacement_string, int *type);
bool InsertIntoTable(UnicharAmbigsVector &table, int test_ambig_part_size,
UNICHAR_ID *test_unichar_ids, int replacement_ambig_part_size,
const char *replacement_string, int type, AmbigSpec *ambig_spec,
UNICHARSET *unicharset);
UnicharAmbigsVector dang_ambigs_;
UnicharAmbigsVector replace_ambigs_;
std::vector<UnicharIdVector *> one_to_one_definite_ambigs_;
std::vector<UnicharIdVector *> ambigs_for_adaption_;
std::vector<UnicharIdVector *> reverse_ambigs_for_adaption_;
};
} // namespace tesseract
#endif // !defined(DISABLED_LEGACY_ENGINE)
#endif // TESSERACT_CCUTIL_AMBIGS_H_
|
2301_81045437/tesseract
|
src/ccutil/ambigs.h
|
C++
|
apache-2.0
| 8,800
|
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: bitvector.cpp
// Description: Class replacement for BITVECTOR.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "bitvector.h"
#include <algorithm>
#include <cstring>
#include "helpers.h"
#include "serialis.h" // for tesseract::Serialize
namespace tesseract {
// Fast lookup table to get the first least significant set bit in a byte.
// For zero, the table has 255, but since it is a special case, most code
// that uses this table will check for zero before looking up lsb_index_.
const uint8_t BitVector::lsb_index_[256] = {
255, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2,
0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1,
0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1,
0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
// Fast lookup table to get the residual bits after zeroing the first (lowest)
// set bit in a byte.
const uint8_t BitVector::lsb_eroded_[256] = {
0, 0, 0, 0x2, 0, 0x4, 0x4, 0x6, 0, 0x8, 0x8, 0x0a, 0x08, 0x0c, 0x0c, 0x0e,
0, 0x10, 0x10, 0x12, 0x10, 0x14, 0x14, 0x16, 0x10, 0x18, 0x18, 0x1a, 0x18, 0x1c, 0x1c, 0x1e,
0, 0x20, 0x20, 0x22, 0x20, 0x24, 0x24, 0x26, 0x20, 0x28, 0x28, 0x2a, 0x28, 0x2c, 0x2c, 0x2e,
0x20, 0x30, 0x30, 0x32, 0x30, 0x34, 0x34, 0x36, 0x30, 0x38, 0x38, 0x3a, 0x38, 0x3c, 0x3c, 0x3e,
0, 0x40, 0x40, 0x42, 0x40, 0x44, 0x44, 0x46, 0x40, 0x48, 0x48, 0x4a, 0x48, 0x4c, 0x4c, 0x4e,
0x40, 0x50, 0x50, 0x52, 0x50, 0x54, 0x54, 0x56, 0x50, 0x58, 0x58, 0x5a, 0x58, 0x5c, 0x5c, 0x5e,
0x40, 0x60, 0x60, 0x62, 0x60, 0x64, 0x64, 0x66, 0x60, 0x68, 0x68, 0x6a, 0x68, 0x6c, 0x6c, 0x6e,
0x60, 0x70, 0x70, 0x72, 0x70, 0x74, 0x74, 0x76, 0x70, 0x78, 0x78, 0x7a, 0x78, 0x7c, 0x7c, 0x7e,
0, 0x80, 0x80, 0x82, 0x80, 0x84, 0x84, 0x86, 0x80, 0x88, 0x88, 0x8a, 0x88, 0x8c, 0x8c, 0x8e,
0x80, 0x90, 0x90, 0x92, 0x90, 0x94, 0x94, 0x96, 0x90, 0x98, 0x98, 0x9a, 0x98, 0x9c, 0x9c, 0x9e,
0x80, 0xa0, 0xa0, 0xa2, 0xa0, 0xa4, 0xa4, 0xa6, 0xa0, 0xa8, 0xa8, 0xaa, 0xa8, 0xac, 0xac, 0xae,
0xa0, 0xb0, 0xb0, 0xb2, 0xb0, 0xb4, 0xb4, 0xb6, 0xb0, 0xb8, 0xb8, 0xba, 0xb8, 0xbc, 0xbc, 0xbe,
0x80, 0xc0, 0xc0, 0xc2, 0xc0, 0xc4, 0xc4, 0xc6, 0xc0, 0xc8, 0xc8, 0xca, 0xc8, 0xcc, 0xcc, 0xce,
0xc0, 0xd0, 0xd0, 0xd2, 0xd0, 0xd4, 0xd4, 0xd6, 0xd0, 0xd8, 0xd8, 0xda, 0xd8, 0xdc, 0xdc, 0xde,
0xc0, 0xe0, 0xe0, 0xe2, 0xe0, 0xe4, 0xe4, 0xe6, 0xe0, 0xe8, 0xe8, 0xea, 0xe8, 0xec, 0xec, 0xee,
0xe0, 0xf0, 0xf0, 0xf2, 0xf0, 0xf4, 0xf4, 0xf6, 0xf0, 0xf8, 0xf8, 0xfa, 0xf8, 0xfc, 0xfc, 0xfe};
// Fast lookup table to give the number of set bits in a byte.
const int BitVector::hamming_table_[256] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
BitVector &BitVector::operator=(const BitVector &src) {
array_ = src.array_;
bit_size_ = src.bit_size_;
return *this;
}
// Initializes the array to length * false.
void BitVector::Init(int length) {
Alloc(length);
SetAllFalse();
}
// Writes to the given file. Returns false in case of error.
bool BitVector::Serialize(FILE *fp) const {
if (!tesseract::Serialize(fp, &bit_size_)) {
return false;
}
int wordlen = WordLength();
return tesseract::Serialize(fp, &array_[0], wordlen);
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool BitVector::DeSerialize(bool swap, FILE *fp) {
uint32_t new_bit_size;
if (!tesseract::DeSerialize(fp, &new_bit_size)) {
return false;
}
if (swap) {
ReverseN(&new_bit_size, sizeof(new_bit_size));
}
Alloc(new_bit_size);
int wordlen = WordLength();
if (!tesseract::DeSerialize(fp, &array_[0], wordlen)) {
return false;
}
if (swap) {
for (int i = 0; i < wordlen; ++i) {
ReverseN(&array_[i], sizeof(array_[i]));
}
}
return true;
}
void BitVector::SetAllFalse() {
memset(&array_[0], 0, ByteLength());
}
void BitVector::SetAllTrue() {
memset(&array_[0], ~0, ByteLength());
}
// Returns the index of the next set bit after the given index.
// Useful for quickly iterating through the set bits in a sparse vector.
int BitVector::NextSetBit(int prev_bit) const {
// Move on to the next bit.
int next_bit = prev_bit + 1;
if (next_bit >= bit_size_) {
return -1;
}
// Check the remains of the word containing the next_bit first.
int next_word = WordIndex(next_bit);
int bit_index = next_word * kBitFactor;
int word_end = bit_index + kBitFactor;
uint32_t word = array_[next_word];
uint8_t byte = word & 0xff;
while (bit_index < word_end) {
if (bit_index + 8 > next_bit && byte != 0) {
while (bit_index + lsb_index_[byte] < next_bit && byte != 0) {
byte = lsb_eroded_[byte];
}
if (byte != 0) {
return bit_index + lsb_index_[byte];
}
}
word >>= 8;
bit_index += 8;
byte = word & 0xff;
}
// next_word didn't contain a 1, so find the next word with set bit.
++next_word;
int wordlen = WordLength();
while (next_word < wordlen && (word = array_[next_word]) == 0) {
++next_word;
bit_index += kBitFactor;
}
if (bit_index >= bit_size_) {
return -1;
}
// Find the first non-zero byte within the word.
while ((word & 0xff) == 0) {
word >>= 8;
bit_index += 8;
}
return bit_index + lsb_index_[word & 0xff];
}
// Returns the number of set bits in the vector.
int BitVector::NumSetBits() const {
int wordlen = WordLength();
int total_bits = 0;
for (int w = 0; w < wordlen; ++w) {
uint32_t word = array_[w];
for (int i = 0; i < 4; ++i) {
total_bits += hamming_table_[word & 0xff];
word >>= 8;
}
}
return total_bits;
}
// Logical in-place operations on whole bit vectors. Tries to do something
// sensible if they aren't the same size, but they should be really.
void BitVector::operator|=(const BitVector &other) {
int length = std::min(WordLength(), other.WordLength());
for (int w = 0; w < length; ++w) {
array_[w] |= other.array_[w];
}
}
void BitVector::operator&=(const BitVector &other) {
int length = std::min(WordLength(), other.WordLength());
for (int w = 0; w < length; ++w) {
array_[w] &= other.array_[w];
}
for (int w = WordLength() - 1; w >= length; --w) {
array_[w] = 0;
}
}
void BitVector::operator^=(const BitVector &other) {
int length = std::min(WordLength(), other.WordLength());
for (int w = 0; w < length; ++w) {
array_[w] ^= other.array_[w];
}
}
// Set subtraction *this = v1 - v2.
void BitVector::SetSubtract(const BitVector &v1, const BitVector &v2) {
Alloc(v1.size());
int length = std::min(v1.WordLength(), v2.WordLength());
for (int w = 0; w < length; ++w) {
array_[w] = v1.array_[w] ^ (v1.array_[w] & v2.array_[w]);
}
for (int w = WordLength() - 1; w >= length; --w) {
array_[w] = v1.array_[w];
}
}
// Allocates memory for a vector of the given length.
// Reallocates if the array is a different size, larger or smaller.
void BitVector::Alloc(int length) {
int initial_wordlength = WordLength();
bit_size_ = length;
int new_wordlength = WordLength();
if (new_wordlength != initial_wordlength) {
array_.resize(new_wordlength);
}
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/ccutil/bitvector.cpp
|
C++
|
apache-2.0
| 9,320
|
///////////////////////////////////////////////////////////////////////
// File: bitvector.h
// Description: Class replacement for BITVECTOR.
// Author: Ray Smith
//
// (C) Copyright 2011, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_BITVECTOR_H_
#define TESSERACT_CCUTIL_BITVECTOR_H_
#include <tesseract/export.h>
#include <cassert>
#include <cstdint> // for uint8_t
#include <cstdio>
#include <vector> // for std::vector
namespace tesseract {
// Trivial class to encapsulate a fixed-length array of bits, with
// Serialize/DeSerialize. Replaces the old macros.
class TESS_API BitVector {
public:
// Fast lookup table to get the first least significant set bit in a byte.
// For zero, the table has 255, but since it is a special case, most code
// that uses this table will check for zero before looking up lsb_index_.
static const uint8_t lsb_index_[256];
// Fast lookup table to get the residual bits after zeroing the least
// significant set bit in a byte.
static const uint8_t lsb_eroded_[256];
// Fast lookup table to give the number of set bits in a byte.
static const int hamming_table_[256];
BitVector() = default;
// Initializes the array to length * false.
explicit BitVector(int length) : bit_size_(length), array_(WordLength()) {
}
BitVector(const BitVector &src) : bit_size_(src.bit_size_), array_(src.array_) {
}
BitVector &operator=(const BitVector &src);
~BitVector() = default;
// Initializes the array to length * false.
void Init(int length);
int empty() const {
return bit_size_ == 0;
}
// Returns the number of bits that are accessible in the vector.
int size() const {
return bit_size_;
}
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
void SetAllFalse();
void SetAllTrue();
// Accessors to set/reset/get bits.
// The range of index is [0, size()-1].
// There is debug-only bounds checking.
void SetBit(int index) {
array_[WordIndex(index)] |= BitMask(index);
}
void ResetBit(int index) {
array_[WordIndex(index)] &= ~BitMask(index);
}
void SetValue(int index, bool value) {
if (value) {
SetBit(index);
} else {
ResetBit(index);
}
}
bool At(int index) const {
return (array_[WordIndex(index)] & BitMask(index)) != 0;
}
bool operator[](int index) const {
return (array_[WordIndex(index)] & BitMask(index)) != 0;
}
// Returns the index of the next set bit after the given index.
// Useful for quickly iterating through the set bits in a sparse vector.
int NextSetBit(int prev_bit) const;
// Returns the number of set bits in the vector.
int NumSetBits() const;
// Logical in-place operations on whole bit vectors. Tries to do something
// sensible if they aren't the same size, but they should be really.
void operator|=(const BitVector &other);
void operator&=(const BitVector &other);
void operator^=(const BitVector &other);
// Set subtraction *this = v1 - v2.
void SetSubtract(const BitVector &v1, const BitVector &v2);
private:
// Allocates memory for a vector of the given length.
void Alloc(int length);
// Computes the index to array_ for the given index, with debug range
// checking.
int WordIndex(int index) const {
assert(0 <= index && index < bit_size_);
return index / kBitFactor;
}
// Returns a mask to select the appropriate bit for the given index.
uint32_t BitMask(int index) const {
return 1 << (index & (kBitFactor - 1));
}
// Returns the number of array elements needed to represent the current
// bit_size_.
int WordLength() const {
return (bit_size_ + kBitFactor - 1) / kBitFactor;
}
// Returns the number of bytes consumed by the array_.
int ByteLength() const {
return WordLength() * sizeof(array_[0]);
}
// Number of bits in this BitVector.
int32_t bit_size_ = 0;
// Array of words used to pack the bits.
// Bits are stored little-endian by uint32_t word, ie by word first and then
// starting with the least significant bit in each word.
std::vector<uint32_t> array_;
// Number of bits in an array_ element.
static const int kBitFactor = sizeof(array_[0]) * 8;
};
} // namespace tesseract.
#endif // TESSERACT_CCUTIL_BITVECTOR_H_
|
2301_81045437/tesseract
|
src/ccutil/bitvector.h
|
C++
|
apache-2.0
| 5,064
|
// Copyright 2008 Google Inc. All Rights Reserved.
// Author: scharron@google.com (Samuel Charron)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if defined(_WIN32)
# include <io.h> // for _access
#endif
#include "ccutil.h"
#include <cstdlib>
#include <cstring> // for std::strrchrA
#include <filesystem> // for std::filesystem
namespace tesseract {
CCUtil::CCUtil()
: params_()
, INT_INIT_MEMBER(ambigs_debug_level, 0, "Debug level for unichar ambiguities", ¶ms_)
, BOOL_MEMBER(use_ambigs_for_adaption, false,
"Use ambigs for deciding"
" whether to adapt to a character",
¶ms_) {}
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
CCUtil::~CCUtil() = default;
/**
* @brief CCUtil::main_setup - set location of tessdata and name of image
*
* @param argv0 - paths to the directory with language files and config files.
* An actual value of argv0 is used if not nullptr, otherwise TESSDATA_PREFIX is
* used if not nullptr, next try to use compiled in -DTESSDATA_PREFIX. If
* previous is not successful - use current directory.
* @param basename - name of image
*/
void CCUtil::main_setup(const std::string &argv0, const std::string &basename) {
imagebasename = basename; /**< name of image */
const char *tessdata_prefix = getenv("TESSDATA_PREFIX");
// Ignore TESSDATA_PREFIX if there is no matching filesystem entry.
if (tessdata_prefix != nullptr && !std::filesystem::exists(tessdata_prefix)) {
tprintf("Warning: TESSDATA_PREFIX %s does not exist, ignore it\n", tessdata_prefix);
tessdata_prefix = nullptr;
}
if (!argv0.empty()) {
/* Use tessdata prefix from the command line. */
datadir = argv0;
} else if (tessdata_prefix) {
/* Use tessdata prefix from the environment. */
datadir = tessdata_prefix;
#if defined(_WIN32)
} else if (datadir.empty() || _access(datadir.c_str(), 0) != 0) {
/* Look for tessdata in directory of executable. */
char path[_MAX_PATH];
DWORD length = GetModuleFileName(nullptr, path, sizeof(path));
if (length > 0 && length < sizeof(path)) {
char *separator = std::strrchr(path, '\\');
if (separator != nullptr) {
*separator = '\0';
std::string subdir = path;
subdir += "/tessdata";
if (_access(subdir.c_str(), 0) == 0) {
datadir = subdir;
}
}
}
#endif /* _WIN32 */
}
// datadir may still be empty:
if (datadir.empty()) {
#if defined(TESSDATA_PREFIX)
// Use tessdata prefix which was compiled in.
datadir = TESSDATA_PREFIX "/tessdata/";
// Note that some software (for example conda) patches TESSDATA_PREFIX
// in the binary, so it might be shorter. Recalculate its length.
datadir.resize(std::strlen(datadir.c_str()));
#else
datadir = "./";
#endif /* TESSDATA_PREFIX */
}
// check for missing directory separator
const char lastchar = datadir.back();
if (lastchar != '/' && lastchar != '\\') {
datadir += '/';
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/ccutil.cpp
|
C++
|
apache-2.0
| 3,625
|
///////////////////////////////////////////////////////////////////////
// File: ccutil.h
// Description: ccutil class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_CCUTIL_H_
#define TESSERACT_CCUTIL_CCUTIL_H_
#ifndef _WIN32
# include <pthread.h>
# include <semaphore.h>
#endif
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // DISABLED_LEGACY_ENGINE
#endif
#ifndef DISABLED_LEGACY_ENGINE
# include "ambigs.h"
#endif
#include "errcode.h"
#ifdef _WIN32
# include "host.h" // windows.h for HANDLE, ...
#endif
#include "params.h"
#include "unicharset.h"
namespace tesseract {
class TESS_API CCUtil {
public:
CCUtil();
virtual ~CCUtil();
public:
// Read the arguments and set up the data path.
void main_setup(const std::string &argv0, // program name
const std::string &basename // name of image
);
ParamsVectors *params() {
return ¶ms_;
}
std::string datadir; // dir for data files
std::string imagebasename; // name of image
std::string lang;
std::string language_data_path_prefix;
UNICHARSET unicharset;
#ifndef DISABLED_LEGACY_ENGINE
UnicharAmbigs unichar_ambigs;
#endif
std::string imagefile; // image file name
std::string directory; // main directory
private:
ParamsVectors params_;
public:
// Member parameters.
// These have to be declared and initialized after params_ member, since
// params_ should be initialized before parameters are added to it.
INT_VAR_H(ambigs_debug_level);
BOOL_VAR_H(use_ambigs_for_adaption);
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_CCUTIL_H_
|
2301_81045437/tesseract
|
src/ccutil/ccutil.h
|
C++
|
apache-2.0
| 2,260
|
/**********************************************************************
* File: clst.cpp (Formerly clist.c)
* Description: CONS cell list handling code which is not in the include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "clst.h"
#include <cstdlib>
namespace tesseract {
/***********************************************************************
* CLIST::internal_deep_clear
*
* Used by the "deep_clear" member function of derived list
* classes to destroy all the elements on the list.
* The calling function passes a "zapper" function which can be called to
* delete each data element of the list, regardless of its class. This
* technique permits a generic clear function to destroy elements of
* different derived types correctly, without requiring virtual functions and
* the consequential memory overhead.
**********************************************************************/
void CLIST::internal_deep_clear( // destroy all links
void (*zapper)(void *)) { // ptr to zapper functn
if (!empty()) {
auto ptr = last->next; // set to first
last->next = nullptr; // break circle
last = nullptr; // set list empty
while (ptr) {
auto next = ptr->next;
zapper(ptr->data);
delete (ptr);
ptr = next;
}
}
}
/***********************************************************************
* CLIST::shallow_clear
*
* Used by the destructor and the "shallow_clear" member function of derived
* list classes to destroy the list.
* The data elements are NOT destroyed.
*
**********************************************************************/
void CLIST::shallow_clear() { // destroy all links
if (!empty()) {
auto ptr = last->next; // set to first
last->next = nullptr; // break circle
last = nullptr; // set list empty
while (ptr) {
auto next = ptr->next;
delete (ptr);
ptr = next;
}
}
}
/***********************************************************************
* CLIST::assign_to_sublist
*
* The list is set to a sublist of another list. "This" list must be empty
* before this function is invoked. The two iterators passed must refer to
* the same list, different from "this" one. The sublist removed is the
* inclusive list from start_it's current position to end_it's current
* position. If this range passes over the end of the source list then the
* source list has its end set to the previous element of start_it. The
* extracted sublist is unaffected by the end point of the source list, its
* end point is always the end_it position.
**********************************************************************/
void CLIST::assign_to_sublist( // to this list
CLIST_ITERATOR *start_it, // from list start
CLIST_ITERATOR *end_it) { // from list end
constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist");
if (!empty()) {
LIST_NOT_EMPTY.error("CLIST.assign_to_sublist", ABORT);
}
last = start_it->extract_sublist(end_it);
}
/***********************************************************************
* CLIST::sort
*
* Sort elements on list
**********************************************************************/
void CLIST::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
// Allocate an array of pointers, one per list element.
auto count = length();
if (count > 0) {
// ptr array to sort
std::vector<void *> base;
base.reserve(count);
CLIST_ITERATOR it(this);
// Extract all elements, putting the pointers in the array.
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
base.push_back(it.extract());
}
// Sort the pointer array.
qsort(&base[0], count, sizeof(base[0]), comparator);
// Rebuild the list from the sorted pointers.
for (auto current : base) {
it.add_to_end(current);
}
}
}
// Assuming list has been sorted already, insert new_data to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
// If unique, then don't add duplicate entries.
// Returns true if the element was added to the list.
bool CLIST::add_sorted(int comparator(const void *, const void *), bool unique, void *new_data) {
// Check for adding at the end.
if (last == nullptr || comparator(&last->data, &new_data) < 0) {
auto *new_element = new CLIST_LINK;
new_element->data = new_data;
if (last == nullptr) {
new_element->next = new_element;
} else {
new_element->next = last->next;
last->next = new_element;
}
last = new_element;
return true;
} else if (!unique || last->data != new_data) {
// Need to use an iterator.
CLIST_ITERATOR it(this);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
void *data = it.data();
if (data == new_data && unique) {
return false;
}
if (comparator(&data, &new_data) > 0) {
break;
}
}
if (it.cycled_list()) {
it.add_to_end(new_data);
} else {
it.add_before_then_move(new_data);
}
return true;
}
return false;
}
// Assuming that the minuend and subtrahend are already sorted with
// the same comparison function, shallow clears this and then copies
// the set difference minuend - subtrahend to this, being the elements
// of minuend that do not compare equal to anything in subtrahend.
// If unique is true, any duplicates in minuend are also eliminated.
void CLIST::set_subtract(int comparator(const void *, const void *), bool unique, CLIST *minuend,
CLIST *subtrahend) {
shallow_clear();
CLIST_ITERATOR m_it(minuend);
CLIST_ITERATOR s_it(subtrahend);
// Since both lists are sorted, finding the subtras that are not
// minus is a case of a parallel iteration.
for (m_it.mark_cycle_pt(); !m_it.cycled_list(); m_it.forward()) {
void *minu = m_it.data();
void *subtra = nullptr;
if (!s_it.empty()) {
subtra = s_it.data();
while (!s_it.at_last() && comparator(&subtra, &minu) < 0) {
s_it.forward();
subtra = s_it.data();
}
}
if (subtra == nullptr || comparator(&subtra, &minu) != 0) {
add_sorted(comparator, unique, minu);
}
}
}
/***********************************************************************
* MEMBER FUNCTIONS OF CLASS: CLIST_ITERATOR
* =========================================
**********************************************************************/
/***********************************************************************
* CLIST_ITERATOR::forward
*
* Move the iterator to the next element of the list.
* REMEMBER: ALL LISTS ARE CIRCULAR.
**********************************************************************/
void *CLIST_ITERATOR::forward() {
if (list->empty()) {
return nullptr;
}
if (current) { // not removed so
// set previous
prev = current;
started_cycling = true;
// In case next is deleted by another iterator, get next from current.
current = current->next;
} else {
if (ex_current_was_cycle_pt) {
cycle_pt = next;
}
current = next;
}
next = current->next;
return current->data;
}
/***********************************************************************
* CLIST_ITERATOR::data_relative
*
* Return the data pointer to the element "offset" elements from current.
* "offset" must not be less than -1.
* (This function can't be INLINEd because it contains a loop)
**********************************************************************/
void *CLIST_ITERATOR::data_relative( // get data + or - ...
int8_t offset) { // offset from current
CLIST_LINK *ptr;
#ifndef NDEBUG
if (!list)
NO_LIST.error("CLIST_ITERATOR::data_relative", ABORT);
if (list->empty())
EMPTY_LIST.error("CLIST_ITERATOR::data_relative", ABORT);
if (offset < -1)
BAD_PARAMETER.error("CLIST_ITERATOR::data_relative", ABORT, "offset < -l");
#endif
if (offset == -1) {
ptr = prev;
} else {
for (ptr = current ? current : prev; offset-- > 0; ptr = ptr->next) {
;
}
}
return ptr->data;
}
/***********************************************************************
* CLIST_ITERATOR::move_to_last()
*
* Move current so that it is set to the end of the list.
* Return data just in case anyone wants it.
* (This function can't be INLINEd because it contains a loop)
**********************************************************************/
void *CLIST_ITERATOR::move_to_last() {
while (current != list->last) {
forward();
}
if (current == nullptr) {
return nullptr;
} else {
return current->data;
}
}
/***********************************************************************
* CLIST_ITERATOR::exchange()
*
* Given another iterator, whose current element is a different element on
* the same list list OR an element of another list, exchange the two current
* elements. On return, each iterator points to the element which was the
* other iterators current on entry.
* (This function hasn't been in-lined because its a bit big!)
**********************************************************************/
void CLIST_ITERATOR::exchange( // positions of 2 links
CLIST_ITERATOR *other_it) { // other iterator
constexpr ERRCODE DONT_EXCHANGE_DELETED("Can't exchange deleted elements of lists");
/* Do nothing if either list is empty or if both iterators reference the same
link */
if ((list->empty()) || (other_it->list->empty()) || (current == other_it->current)) {
return;
}
/* Error if either current element is deleted */
if (!current || !other_it->current) {
DONT_EXCHANGE_DELETED.error("CLIST_ITERATOR.exchange", ABORT);
}
/* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements
(other before this); non-doubleton adjacent elements (this before other);
non-adjacent elements. */
// adjacent links
if ((next == other_it->current) || (other_it->next == current)) {
// doubleton list
if ((next == other_it->current) && (other_it->next == current)) {
prev = next = current;
other_it->prev = other_it->next = other_it->current;
} else { // non-doubleton with
// adjacent links
// other before this
if (other_it->next == current) {
other_it->prev->next = current;
other_it->current->next = next;
current->next = other_it->current;
other_it->next = other_it->current;
prev = current;
} else { // this before other
prev->next = other_it->current;
current->next = other_it->next;
other_it->current->next = current;
next = current;
other_it->prev = other_it->current;
}
}
} else { // no overlap
prev->next = other_it->current;
current->next = other_it->next;
other_it->prev->next = current;
other_it->current->next = next;
}
/* update end of list pointer when necessary (remember that the 2 iterators
may iterate over different lists!) */
if (list->last == current) {
list->last = other_it->current;
}
if (other_it->list->last == other_it->current) {
other_it->list->last = current;
}
if (current == cycle_pt) {
cycle_pt = other_it->cycle_pt;
}
if (other_it->current == other_it->cycle_pt) {
other_it->cycle_pt = cycle_pt;
}
/* The actual exchange - in all cases*/
auto old_current = current;
current = other_it->current;
other_it->current = old_current;
}
/***********************************************************************
* CLIST_ITERATOR::extract_sublist()
*
* This is a private member, used only by CLIST::assign_to_sublist.
* Given another iterator for the same list, extract the links from THIS to
* OTHER inclusive, link them into a new circular list, and return a
* pointer to the last element.
* (Can't inline this function because it contains a loop)
**********************************************************************/
CLIST_LINK *CLIST_ITERATOR::extract_sublist( // from this current
CLIST_ITERATOR *other_it) { // to other current
CLIST_ITERATOR temp_it = *this;
constexpr ERRCODE BAD_SUBLIST("Can't find sublist end point in original list");
#ifndef NDEBUG
constexpr ERRCODE BAD_EXTRACTION_PTS("Can't extract sublist from points on different lists");
constexpr ERRCODE DONT_EXTRACT_DELETED("Can't extract a sublist marked by deleted points");
if (list != other_it->list)
BAD_EXTRACTION_PTS.error("CLIST_ITERATOR.extract_sublist", ABORT);
if (list->empty())
EMPTY_LIST.error("CLIST_ITERATOR::extract_sublist", ABORT);
if (!current || !other_it->current)
DONT_EXTRACT_DELETED.error("CLIST_ITERATOR.extract_sublist", ABORT);
#endif
ex_current_was_last = other_it->ex_current_was_last = false;
ex_current_was_cycle_pt = false;
other_it->ex_current_was_cycle_pt = false;
temp_it.mark_cycle_pt();
do { // walk sublist
if (temp_it.cycled_list()) { // can't find end pt
BAD_SUBLIST.error("CLIST_ITERATOR.extract_sublist", ABORT);
}
if (temp_it.at_last()) {
list->last = prev;
ex_current_was_last = other_it->ex_current_was_last = true;
}
if (temp_it.current == cycle_pt) {
ex_current_was_cycle_pt = true;
}
if (temp_it.current == other_it->cycle_pt) {
other_it->ex_current_was_cycle_pt = true;
}
temp_it.forward();
} while (temp_it.prev != other_it->current);
// circularise sublist
other_it->current->next = current;
auto end_of_new_list = other_it->current;
// sublist = whole list
if (prev == other_it->current) {
list->last = nullptr;
prev = current = next = nullptr;
other_it->prev = other_it->current = other_it->next = nullptr;
} else {
prev->next = other_it->next;
current = other_it->current = nullptr;
next = other_it->next;
other_it->prev = prev;
}
return end_of_new_list;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/clst.cpp
|
C++
|
apache-2.0
| 15,041
|
/**********************************************************************
* File: clst.h (Formerly clist.h)
* Description: CONS cell list module include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef CLST_H
#define CLST_H
#include "list.h"
#include "lsterr.h"
#include "serialis.h"
#include <cstdio>
namespace tesseract {
class CLIST_ITERATOR;
/**********************************************************************
* CLASS - CLIST_LINK
*
* Generic link class for singly linked CONS cell lists
*
* Note: No destructor - elements are assumed to be destroyed EITHER after
* they have been extracted from a list OR by the CLIST destructor which
* walks the list.
**********************************************************************/
class CLIST_LINK {
friend class CLIST_ITERATOR;
friend class CLIST;
CLIST_LINK *next;
void *data;
public:
CLIST_LINK() { // constructor
data = next = nullptr;
}
CLIST_LINK(const CLIST_LINK &) = delete;
void operator=(const CLIST_LINK &) = delete;
};
/**********************************************************************
* CLASS - CLIST
*
* Generic list class for singly linked CONS cell lists
**********************************************************************/
class TESS_API CLIST {
friend class CLIST_ITERATOR;
CLIST_LINK *last = nullptr; // End of list
//(Points to head)
CLIST_LINK *First() { // return first
return last != nullptr ? last->next : nullptr;
}
const CLIST_LINK *First() const { // return first
return last != nullptr ? last->next : nullptr;
}
public:
~CLIST() { // destructor
shallow_clear();
}
void internal_deep_clear( // destroy all links
void (*zapper)(void *)); // ptr to zapper functn
void shallow_clear(); // clear list but don't
// delete data elements
bool empty() const { // is list empty?
return !last;
}
bool singleton() const {
return last != nullptr ? (last == last->next) : false;
}
void shallow_copy( // dangerous!!
CLIST *from_list) { // beware destructors!!
last = from_list->last;
}
void assign_to_sublist( // to this list
CLIST_ITERATOR *start_it, // from list start
CLIST_ITERATOR *end_it); // from list end
int32_t length() const { //# elements in list
int32_t count = 0;
if (last != nullptr) {
count = 1;
for (auto it = last->next; it != last; it = it->next) {
count++;
}
}
return count;
}
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
// Assuming list has been sorted already, insert new_data to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
// If unique, then don't add duplicate entries.
// Returns true if the element was added to the list.
bool add_sorted(int comparator(const void *, const void *), bool unique, void *new_data);
// Assuming that the minuend and subtrahend are already sorted with
// the same comparison function, shallow clears this and then copies
// the set difference minuend - subtrahend to this, being the elements
// of minuend that do not compare equal to anything in subtrahend.
// If unique is true, any duplicates in minuend are also eliminated.
void set_subtract(int comparator(const void *, const void *), bool unique, CLIST *minuend,
CLIST *subtrahend);
};
/***********************************************************************
* CLASS - CLIST_ITERATOR
*
* Generic iterator class for singly linked lists with embedded
*links
**********************************************************************/
class TESS_API CLIST_ITERATOR {
friend void CLIST::assign_to_sublist(CLIST_ITERATOR *, CLIST_ITERATOR *);
CLIST *list; // List being iterated
CLIST_LINK *prev; // prev element
CLIST_LINK *current; // current element
CLIST_LINK *next; // next element
CLIST_LINK *cycle_pt; // point we are cycling the list to.
bool ex_current_was_last; // current extracted was end of list
bool ex_current_was_cycle_pt; // current extracted was cycle point
bool started_cycling; // Have we moved off the start?
CLIST_LINK *extract_sublist( // from this current...
CLIST_ITERATOR *other_it); // to other current
public:
CLIST_ITERATOR() { // constructor
list = nullptr;
} // unassigned list
CLIST_ITERATOR( // constructor
CLIST *list_to_iterate);
void set_to_list( // change list
CLIST *list_to_iterate);
void add_after_then_move( // add after current &
void *new_data); // move to new
void add_after_stay_put( // add after current &
void *new_data); // stay at current
void add_before_then_move( // add before current &
void *new_data); // move to new
void add_before_stay_put( // add before current &
void *new_data); // stay at current
void add_list_after( // add a list &
CLIST *list_to_add); // stay at current
void add_list_before( // add a list &
CLIST *list_to_add); // move to it 1st item
void *data() { // get current data
#ifndef NDEBUG
if (!list) {
NO_LIST.error("CLIST_ITERATOR::data", ABORT);
}
#endif
return current->data;
}
void *data_relative( // get data + or - ...
int8_t offset); // offset from current
void *forward(); // move to next element
void *extract(); // remove from list
void *move_to_first(); // go to start of list
void *move_to_last(); // go to end of list
void mark_cycle_pt(); // remember current
bool empty() const { // is list empty?
return list->empty();
}
bool current_extracted() const { // current extracted?
return !current;
}
bool at_first() const; // Current is first?
bool at_last() const; // Current is last?
bool cycled_list() const; // Completed a cycle?
void add_to_end( // add at end &
void *new_data); // don't move
void exchange( // positions of 2 links
CLIST_ITERATOR *other_it); // other iterator
int32_t length() const; //# elements in list
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
};
/***********************************************************************
* CLIST_ITERATOR::set_to_list
*
* (Re-)initialise the iterator to point to the start of the list_to_iterate
* over.
**********************************************************************/
inline void CLIST_ITERATOR::set_to_list( // change list
CLIST *list_to_iterate) {
list = list_to_iterate;
prev = list->last;
current = list->First();
next = current != nullptr ? current->next : nullptr;
cycle_pt = nullptr; // await explicit set
started_cycling = false;
ex_current_was_last = false;
ex_current_was_cycle_pt = false;
}
/***********************************************************************
* CLIST_ITERATOR::CLIST_ITERATOR
*
* CONSTRUCTOR - set iterator to specified list;
**********************************************************************/
inline CLIST_ITERATOR::CLIST_ITERATOR(CLIST *list_to_iterate) {
set_to_list(list_to_iterate);
}
/***********************************************************************
* CLIST_ITERATOR::add_after_then_move
*
* Add a new element to the list after the current element and move the
* iterator to the new element.
**********************************************************************/
inline void CLIST_ITERATOR::add_after_then_move( // element to add
void *new_data) {
#ifndef NDEBUG
if (!new_data) {
BAD_PARAMETER.error("CLIST_ITERATOR::add_after_then_move", ABORT, "new_data is nullptr");
}
#endif
auto new_element = new CLIST_LINK;
new_element->data = new_data;
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
} else {
new_element->next = next;
if (current) { // not extracted
current->next = new_element;
prev = current;
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* CLIST_ITERATOR::add_after_stay_put
*
* Add a new element to the list after the current element but do not move
* the iterator to the new element.
**********************************************************************/
inline void CLIST_ITERATOR::add_after_stay_put( // element to add
void *new_data) {
#ifndef NDEBUG
if (!new_data) {
BAD_PARAMETER.error("CLIST_ITERATOR::add_after_stay_put", ABORT, "new_data is nullptr");
}
#endif
auto new_element = new CLIST_LINK;
new_element->data = new_data;
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = false;
current = nullptr;
} else {
new_element->next = next;
if (current) { // not extracted
current->next = new_element;
if (prev == current) {
prev = new_element;
}
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
ex_current_was_last = false;
}
}
next = new_element;
}
}
/***********************************************************************
* CLIST_ITERATOR::add_before_then_move
*
* Add a new element to the list before the current element and move the
* iterator to the new element.
**********************************************************************/
inline void CLIST_ITERATOR::add_before_then_move( // element to add
void *new_data) {
#ifndef NDEBUG
if (!new_data) {
BAD_PARAMETER.error("CLIST_ITERATOR::add_before_then_move", ABORT, "new_data is nullptr");
}
#endif
auto new_element = new CLIST_LINK;
new_element->data = new_data;
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
} else {
prev->next = new_element;
if (current) { // not extracted
new_element->next = current;
next = current;
} else { // current extracted
new_element->next = next;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* CLIST_ITERATOR::add_before_stay_put
*
* Add a new element to the list before the current element but don't move the
* iterator to the new element.
**********************************************************************/
inline void CLIST_ITERATOR::add_before_stay_put( // element to add
void *new_data) {
#ifndef NDEBUG
if (!new_data) {
BAD_PARAMETER.error("CLIST_ITERATOR::add_before_stay_put", ABORT, "new_data is nullptr");
}
#endif
auto new_element = new CLIST_LINK;
new_element->data = new_data;
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = true;
current = nullptr;
} else {
prev->next = new_element;
if (current) { // not extracted
new_element->next = current;
if (next == current) {
next = new_element;
}
} else { // current extracted
new_element->next = next;
if (ex_current_was_last) {
list->last = new_element;
}
}
prev = new_element;
}
}
/***********************************************************************
* CLIST_ITERATOR::add_list_after
*
* Insert another list to this list after the current element but don't move
*the
* iterator.
**********************************************************************/
inline void CLIST_ITERATOR::add_list_after(CLIST *list_to_add) {
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
next = list->First();
ex_current_was_last = true;
current = nullptr;
} else {
if (current) { // not extracted
current->next = list_to_add->First();
if (current == list->last) {
list->last = list_to_add->last;
}
list_to_add->last->next = next;
next = current->next;
} else { // current extracted
prev->next = list_to_add->First();
if (ex_current_was_last) {
list->last = list_to_add->last;
ex_current_was_last = false;
}
list_to_add->last->next = next;
next = prev->next;
}
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* CLIST_ITERATOR::add_list_before
*
* Insert another list to this list before the current element. Move the
* iterator to the start of the inserted elements
* iterator.
**********************************************************************/
inline void CLIST_ITERATOR::add_list_before(CLIST *list_to_add) {
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
current = list->First();
next = current->next;
ex_current_was_last = false;
} else {
prev->next = list_to_add->First();
if (current) { // not extracted
list_to_add->last->next = current;
} else { // current extracted
list_to_add->last->next = next;
if (ex_current_was_last) {
list->last = list_to_add->last;
}
if (ex_current_was_cycle_pt) {
cycle_pt = prev->next;
}
}
current = prev->next;
next = current->next;
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* CLIST_ITERATOR::extract
*
* Do extraction by removing current from the list, deleting the cons cell
* and returning the data to the caller, but NOT updating the iterator. (So
* that any calling loop can do this.) The iterator's current points to
* nullptr. If the data is to be deleted, this is the callers responsibility.
**********************************************************************/
inline void *CLIST_ITERATOR::extract() {
#ifndef NDEBUG
if (!current) { // list empty or
// element extracted
NULL_CURRENT.error("CLIST_ITERATOR::extract", ABORT);
}
#endif
if (list->singleton()) {
// Special case where we do need to change the iterator.
prev = next = list->last = nullptr;
} else {
prev->next = next; // remove from list
if (current == list->last) {
list->last = prev;
ex_current_was_last = true;
} else {
ex_current_was_last = false;
}
}
// Always set ex_current_was_cycle_pt so an add/forward will work in a loop.
ex_current_was_cycle_pt = (current == cycle_pt);
auto extracted_data = current->data;
delete (current); // destroy CONS cell
current = nullptr;
return extracted_data;
}
/***********************************************************************
* CLIST_ITERATOR::move_to_first()
*
* Move current so that it is set to the start of the list.
* Return data just in case anyone wants it.
**********************************************************************/
inline void *CLIST_ITERATOR::move_to_first() {
current = list->First();
prev = list->last;
next = current != nullptr ? current->next : nullptr;
return current != nullptr ? current->data : nullptr;
}
/***********************************************************************
* CLIST_ITERATOR::mark_cycle_pt()
*
* Remember the current location so that we can tell whether we've returned
* to this point later.
*
* If the current point is deleted either now, or in the future, the cycle
* point will be set to the next item which is set to current. This could be
* by a forward, add_after_then_move or add_after_then_move.
**********************************************************************/
inline void CLIST_ITERATOR::mark_cycle_pt() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("CLIST_ITERATOR::mark_cycle_pt", ABORT);
}
#endif
if (current) {
cycle_pt = current;
} else {
ex_current_was_cycle_pt = true;
}
started_cycling = false;
}
/***********************************************************************
* CLIST_ITERATOR::at_first()
*
* Are we at the start of the list?
*
**********************************************************************/
inline bool CLIST_ITERATOR::at_first() const {
// we're at a deleted
return ((list->empty()) || (current == list->First()) ||
((current == nullptr) && (prev == list->last) && // NON-last pt between
!ex_current_was_last)); // first and last
}
/***********************************************************************
* CLIST_ITERATOR::at_last()
*
* Are we at the end of the list?
*
**********************************************************************/
inline bool CLIST_ITERATOR::at_last() const {
// we're at a deleted
return ((list->empty()) || (current == list->last) ||
((current == nullptr) && (prev == list->last) && // last point between
ex_current_was_last)); // first and last
}
/***********************************************************************
* CLIST_ITERATOR::cycled_list()
*
* Have we returned to the cycle_pt since it was set?
*
**********************************************************************/
inline bool CLIST_ITERATOR::cycled_list() const {
return ((list->empty()) || ((current == cycle_pt) && started_cycling));
}
/***********************************************************************
* CLIST_ITERATOR::length()
*
* Return the length of the list
*
**********************************************************************/
inline int32_t CLIST_ITERATOR::length() const {
return list->length();
}
/***********************************************************************
* CLIST_ITERATOR::sort()
*
* Sort the elements of the list, then reposition at the start.
*
**********************************************************************/
inline void CLIST_ITERATOR::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
list->sort(comparator);
move_to_first();
}
/***********************************************************************
* CLIST_ITERATOR::add_to_end
*
* Add a new element to the end of the list without moving the iterator.
* This is provided because a single linked list cannot move to the last as
* the iterator couldn't set its prev pointer. Adding to the end is
* essential for implementing
queues.
**********************************************************************/
inline void CLIST_ITERATOR::add_to_end( // element to add
void *new_data) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("CLIST_ITERATOR::add_to_end", ABORT);
}
if (!new_data) {
BAD_PARAMETER.error("CLIST_ITERATOR::add_to_end", ABORT, "new_data is nullptr");
}
#endif
if (this->at_last()) {
this->add_after_stay_put(new_data);
} else {
if (this->at_first()) {
this->add_before_stay_put(new_data);
list->last = prev;
} else { // Iteratr is elsewhere
auto new_element = new CLIST_LINK;
new_element->data = new_data;
new_element->next = list->last->next;
list->last->next = new_element;
list->last = new_element;
}
}
}
template <typename CLASSNAME>
class X_CLIST : public CLIST {
public:
X_CLIST() = default;
X_CLIST(const X_CLIST &) = delete;
X_CLIST &operator=(const X_CLIST &) = delete;
void deep_clear() {
internal_deep_clear([](void *link) {delete static_cast<CLASSNAME *>(link);});
}
};
#define CLISTIZEH(CLASSNAME) \
class CLASSNAME##_CLIST : public X_CLIST<CLASSNAME> { \
using X_CLIST<CLASSNAME>::X_CLIST; \
}; \
struct CLASSNAME##_C_IT : X_ITER<CLIST_ITERATOR, CLASSNAME> { \
using X_ITER<CLIST_ITERATOR, CLASSNAME>::X_ITER; \
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/clst.h
|
C++
|
apache-2.0
| 21,843
|
/**********************************************************************
* File: elst.cpp (Formerly elist.c)
* Description: Embedded list handling code which is not in the include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "elst.h"
#include <cstdlib>
namespace tesseract {
/***********************************************************************
* ELIST::internal_clear
*
* Used by the destructor and the "clear" member function of derived list
* classes to destroy all the elements on the list.
* The calling function passes a "zapper" function which can be called to
* delete each element of the list, regardless of its derived type. This
* technique permits a generic clear function to destroy elements of
* different derived types correctly, without requiring virtual functions and
* the consequential memory overhead.
**********************************************************************/
void ELIST::internal_clear( // destroy all links
void (*zapper)(void *)) {
// ptr to zapper functn
ELIST_LINK *ptr;
ELIST_LINK *next;
if (!empty()) {
ptr = last->next; // set to first
last->next = nullptr; // break circle
last = nullptr; // set list empty
while (ptr) {
next = ptr->next;
zapper(ptr);
ptr = next;
}
}
}
/***********************************************************************
* ELIST::assign_to_sublist
*
* The list is set to a sublist of another list. "This" list must be empty
* before this function is invoked. The two iterators passed must refer to
* the same list, different from "this" one. The sublist removed is the
* inclusive list from start_it's current position to end_it's current
* position. If this range passes over the end of the source list then the
* source list has its end set to the previous element of start_it. The
* extracted sublist is unaffected by the end point of the source list, its
* end point is always the end_it position.
**********************************************************************/
void ELIST::assign_to_sublist( // to this list
ELIST_ITERATOR *start_it, // from list start
ELIST_ITERATOR *end_it) { // from list end
constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist");
if (!empty()) {
LIST_NOT_EMPTY.error("ELIST.assign_to_sublist", ABORT);
}
last = start_it->extract_sublist(end_it);
}
/***********************************************************************
* ELIST::sort
*
* Sort elements on list
* NB If you don't like the const declarations in the comparator, coerce yours:
* ( int (*)(const void *, const void *)
**********************************************************************/
void ELIST::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
// Allocate an array of pointers, one per list element.
auto count = length();
if (count > 0) {
// ptr array to sort
std::vector<ELIST_LINK *> base;
base.reserve(count);
ELIST_ITERATOR it(this);
// Extract all elements, putting the pointers in the array.
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
base.push_back(it.extract());
}
// Sort the pointer array.
qsort(&base[0], count, sizeof(base[0]), comparator);
// Rebuild the list from the sorted pointers.
for (auto current : base) {
it.add_to_end(current);
}
}
}
// Assuming list has been sorted already, insert new_link to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
// If unique is set to true and comparator() returns 0 (an entry with the
// same information as the one contained in new_link is already in the
// list) - new_link is not added to the list and the function returns the
// pointer to the identical entry that already exists in the list
// (otherwise the function returns new_link).
ELIST_LINK *ELIST::add_sorted_and_find(int comparator(const void *, const void *), bool unique,
ELIST_LINK *new_link) {
// Check for adding at the end.
if (last == nullptr || comparator(&last, &new_link) < 0) {
if (last == nullptr) {
new_link->next = new_link;
} else {
new_link->next = last->next;
last->next = new_link;
}
last = new_link;
} else {
// Need to use an iterator.
ELIST_ITERATOR it(this);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
ELIST_LINK *link = it.data();
int compare = comparator(&link, &new_link);
if (compare > 0) {
break;
} else if (unique && compare == 0) {
return link;
}
}
if (it.cycled_list()) {
it.add_to_end(new_link);
} else {
it.add_before_then_move(new_link);
}
}
return new_link;
}
/***********************************************************************
* MEMBER FUNCTIONS OF CLASS: ELIST_ITERATOR
* =========================================
**********************************************************************/
/***********************************************************************
* ELIST_ITERATOR::forward
*
* Move the iterator to the next element of the list.
* REMEMBER: ALL LISTS ARE CIRCULAR.
**********************************************************************/
ELIST_LINK *ELIST_ITERATOR::forward() {
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST_ITERATOR::forward", ABORT);
#endif
if (list->empty()) {
return nullptr;
}
if (current) { // not removed so
// set previous
prev = current;
started_cycling = true;
// In case next is deleted by another iterator, get next from current.
current = current->next;
} else {
if (ex_current_was_cycle_pt) {
cycle_pt = next;
}
current = next;
}
#ifndef NDEBUG
if (!current)
NULL_DATA.error("ELIST_ITERATOR::forward", ABORT);
#endif
next = current->next;
#ifndef NDEBUG
if (!next) {
NULL_NEXT.error("ELIST_ITERATOR::forward", ABORT,
"This is: %p Current is: %p",
static_cast<void *>(this),
static_cast<void *>(current));
}
#endif
return current;
}
/***********************************************************************
* ELIST_ITERATOR::data_relative
*
* Return the data pointer to the element "offset" elements from current.
* "offset" must not be less than -1.
* (This function can't be INLINEd because it contains a loop)
**********************************************************************/
ELIST_LINK *ELIST_ITERATOR::data_relative( // get data + or - ...
int8_t offset) { // offset from current
ELIST_LINK *ptr;
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST_ITERATOR::data_relative", ABORT);
if (list->empty())
EMPTY_LIST.error("ELIST_ITERATOR::data_relative", ABORT);
if (offset < -1)
BAD_PARAMETER.error("ELIST_ITERATOR::data_relative", ABORT, "offset < -l");
#endif
if (offset == -1) {
ptr = prev;
} else {
for (ptr = current ? current : prev; offset-- > 0; ptr = ptr->next) {
;
}
}
#ifndef NDEBUG
if (!ptr)
NULL_DATA.error("ELIST_ITERATOR::data_relative", ABORT);
#endif
return ptr;
}
/***********************************************************************
* ELIST_ITERATOR::move_to_last()
*
* Move current so that it is set to the end of the list.
* Return data just in case anyone wants it.
* (This function can't be INLINEd because it contains a loop)
**********************************************************************/
ELIST_LINK *ELIST_ITERATOR::move_to_last() {
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST_ITERATOR::move_to_last", ABORT);
#endif
while (current != list->last) {
forward();
}
return current;
}
/***********************************************************************
* ELIST_ITERATOR::exchange()
*
* Given another iterator, whose current element is a different element on
* the same list list OR an element of another list, exchange the two current
* elements. On return, each iterator points to the element which was the
* other iterators current on entry.
* (This function hasn't been in-lined because its a bit big!)
**********************************************************************/
void ELIST_ITERATOR::exchange( // positions of 2 links
ELIST_ITERATOR *other_it) { // other iterator
constexpr ERRCODE DONT_EXCHANGE_DELETED("Can't exchange deleted elements of lists");
ELIST_LINK *old_current;
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST_ITERATOR::exchange", ABORT);
if (!other_it)
BAD_PARAMETER.error("ELIST_ITERATOR::exchange", ABORT, "other_it nullptr");
if (!(other_it->list))
NO_LIST.error("ELIST_ITERATOR::exchange", ABORT, "other_it");
#endif
/* Do nothing if either list is empty or if both iterators reference the same
link */
if ((list->empty()) || (other_it->list->empty()) || (current == other_it->current)) {
return;
}
/* Error if either current element is deleted */
if (!current || !other_it->current) {
DONT_EXCHANGE_DELETED.error("ELIST_ITERATOR.exchange", ABORT);
}
/* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements
(other before this); non-doubleton adjacent elements (this before other);
non-adjacent elements. */
// adjacent links
if ((next == other_it->current) || (other_it->next == current)) {
// doubleton list
if ((next == other_it->current) && (other_it->next == current)) {
prev = next = current;
other_it->prev = other_it->next = other_it->current;
} else { // non-doubleton with
// adjacent links
// other before this
if (other_it->next == current) {
other_it->prev->next = current;
other_it->current->next = next;
current->next = other_it->current;
other_it->next = other_it->current;
prev = current;
} else { // this before other
prev->next = other_it->current;
current->next = other_it->next;
other_it->current->next = current;
next = current;
other_it->prev = other_it->current;
}
}
} else { // no overlap
prev->next = other_it->current;
current->next = other_it->next;
other_it->prev->next = current;
other_it->current->next = next;
}
/* update end of list pointer when necessary (remember that the 2 iterators
may iterate over different lists!) */
if (list->last == current) {
list->last = other_it->current;
}
if (other_it->list->last == other_it->current) {
other_it->list->last = current;
}
if (current == cycle_pt) {
cycle_pt = other_it->cycle_pt;
}
if (other_it->current == other_it->cycle_pt) {
other_it->cycle_pt = cycle_pt;
}
/* The actual exchange - in all cases*/
old_current = current;
current = other_it->current;
other_it->current = old_current;
}
/***********************************************************************
* ELIST_ITERATOR::extract_sublist()
*
* This is a private member, used only by ELIST::assign_to_sublist.
* Given another iterator for the same list, extract the links from THIS to
* OTHER inclusive, link them into a new circular list, and return a
* pointer to the last element.
* (Can't inline this function because it contains a loop)
**********************************************************************/
ELIST_LINK *ELIST_ITERATOR::extract_sublist( // from this current
ELIST_ITERATOR *other_it) { // to other current
#ifndef NDEBUG
constexpr ERRCODE BAD_EXTRACTION_PTS("Can't extract sublist from points on different lists");
constexpr ERRCODE DONT_EXTRACT_DELETED("Can't extract a sublist marked by deleted points");
#endif
constexpr ERRCODE BAD_SUBLIST("Can't find sublist end point in original list");
ELIST_ITERATOR temp_it = *this;
ELIST_LINK *end_of_new_list;
#ifndef NDEBUG
if (!other_it)
BAD_PARAMETER.error("ELIST_ITERATOR::extract_sublist", ABORT, "other_it nullptr");
if (!list)
NO_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT);
if (list != other_it->list)
BAD_EXTRACTION_PTS.error("ELIST_ITERATOR.extract_sublist", ABORT);
if (list->empty())
EMPTY_LIST.error("ELIST_ITERATOR::extract_sublist", ABORT);
if (!current || !other_it->current)
DONT_EXTRACT_DELETED.error("ELIST_ITERATOR.extract_sublist", ABORT);
#endif
ex_current_was_last = other_it->ex_current_was_last = false;
ex_current_was_cycle_pt = false;
other_it->ex_current_was_cycle_pt = false;
temp_it.mark_cycle_pt();
do { // walk sublist
if (temp_it.cycled_list()) { // can't find end pt
BAD_SUBLIST.error("ELIST_ITERATOR.extract_sublist", ABORT);
}
if (temp_it.at_last()) {
list->last = prev;
ex_current_was_last = other_it->ex_current_was_last = true;
}
if (temp_it.current == cycle_pt) {
ex_current_was_cycle_pt = true;
}
if (temp_it.current == other_it->cycle_pt) {
other_it->ex_current_was_cycle_pt = true;
}
temp_it.forward();
} while (temp_it.prev != other_it->current);
// circularise sublist
other_it->current->next = current;
end_of_new_list = other_it->current;
// sublist = whole list
if (prev == other_it->current) {
list->last = nullptr;
prev = current = next = nullptr;
other_it->prev = other_it->current = other_it->next = nullptr;
} else {
prev->next = other_it->next;
current = other_it->current = nullptr;
next = other_it->next;
other_it->prev = prev;
}
return end_of_new_list;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/elst.cpp
|
C++
|
apache-2.0
| 14,649
|
/**********************************************************************
* File: elst.h (Formerly elist.h)
* Description: Embedded list module include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef ELST_H
#define ELST_H
#include "list.h"
#include "lsterr.h"
#include "serialis.h"
#include <cstdio>
namespace tesseract {
class ELIST_ITERATOR;
/**********************************************************************
This module implements list classes and iterators.
The following list types and iterators are provided:
List type List Class Iterator Class Element Class
--------- ---------- -------------- -------------
Embedded list ELIST
ELIST_ITERATOR
ELIST_LINK
(Single linked)
Embedded list ELIST2
ELIST2_ITERATOR
ELIST2_LINK
(Double linked)
Cons List CLIST
CLIST_ITERATOR
CLIST_LINK
(Single linked)
An embedded list is where the list pointers are provided by a generic class.
Data types to be listed inherit from the generic class. Data is thus linked
in only ONE list at any one time.
A cons list has a separate structure for a "cons cell". This contains the
list pointer(s) AND a pointer to the data structure held on the list. A
structure can be on many cons lists at the same time, and the structure does
not need to inherit from any generic class in order to be on the list.
The implementation of lists is very careful about space and speed overheads.
This is why many embedded lists are provided. The same concerns mean that
in-line type coercion is done, rather than use virtual functions. This is
cumbersome in that each data type to be listed requires its own iterator and
list class - though macros can generate these. It also prevents heterogeneous
lists.
**********************************************************************/
/**********************************************************************
* CLASS - ELIST_LINK
*
* Generic link class for singly linked lists with
*embedded links
*
* Note: No destructor - elements are assumed to be destroyed EITHER after
* they have been extracted from a list OR by the ELIST destructor which
* walks the list.
**********************************************************************/
class ELIST_LINK {
friend class ELIST_ITERATOR;
friend class ELIST;
ELIST_LINK *next;
public:
ELIST_LINK() {
next = nullptr;
}
// constructor
// The special copy constructor is used by lots of classes.
ELIST_LINK(const ELIST_LINK &) {
next = nullptr;
}
// The special assignment operator is used by lots of classes.
void operator=(const ELIST_LINK &) {
next = nullptr;
}
};
/**********************************************************************
* CLASS - ELIST
*
* Generic list class for singly linked lists with embedded links
**********************************************************************/
class TESS_API ELIST {
friend class ELIST_ITERATOR;
ELIST_LINK *last = nullptr; // End of list
//(Points to head)
ELIST_LINK *First() { // return first
return last ? last->next : nullptr;
}
public:
// destroy all links
void internal_clear(void (*zapper)(void *));
bool empty() const {
return !last;
}
bool singleton() const {
return last ? (last == last->next) : false;
}
void shallow_copy( // dangerous!!
ELIST *from_list) { // beware destructors!!
last = from_list->last;
}
// ptr to copier functn
void internal_deep_copy(ELIST_LINK *(*copier)(ELIST_LINK *),
const ELIST *list); // list being copied
void assign_to_sublist( // to this list
ELIST_ITERATOR *start_it, // from list start
ELIST_ITERATOR *end_it); // from list end
// # elements in list
int32_t length() const {
int32_t count = 0;
if (last != nullptr) {
count = 1;
for (auto it = last->next; it != last; it = it->next) {
count++;
}
}
return count;
}
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
// Assuming list has been sorted already, insert new_link to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
// If unique is set to true and comparator() returns 0 (an entry with the
// same information as the one contained in new_link is already in the
// list) - new_link is not added to the list and the function returns the
// pointer to the identical entry that already exists in the list
// (otherwise the function returns new_link).
ELIST_LINK *add_sorted_and_find(int comparator(const void *, const void *), bool unique,
ELIST_LINK *new_link);
// Same as above, but returns true if the new entry was inserted, false
// if the identical entry already existed in the list.
bool add_sorted(int comparator(const void *, const void *), bool unique, ELIST_LINK *new_link) {
return (add_sorted_and_find(comparator, unique, new_link) == new_link);
}
};
/***********************************************************************
* CLASS - ELIST_ITERATOR
*
* Generic iterator class for singly linked lists with
*embedded links
**********************************************************************/
class TESS_API ELIST_ITERATOR {
friend void ELIST::assign_to_sublist(ELIST_ITERATOR *, ELIST_ITERATOR *);
ELIST *list; // List being iterated
ELIST_LINK *prev; // prev element
ELIST_LINK *current; // current element
ELIST_LINK *next; // next element
ELIST_LINK *cycle_pt; // point we are cycling the list to.
bool ex_current_was_last; // current extracted was end of list
bool ex_current_was_cycle_pt; // current extracted was cycle point
bool started_cycling; // Have we moved off the start?
ELIST_LINK *extract_sublist( // from this current...
ELIST_ITERATOR *other_it); // to other current
public:
ELIST_ITERATOR() { // constructor
list = nullptr;
} // unassigned list
explicit ELIST_ITERATOR(ELIST *list_to_iterate);
void set_to_list( // change list
ELIST *list_to_iterate);
void add_after_then_move( // add after current &
ELIST_LINK *new_link); // move to new
void add_after_stay_put( // add after current &
ELIST_LINK *new_link); // stay at current
void add_before_then_move( // add before current &
ELIST_LINK *new_link); // move to new
void add_before_stay_put( // add before current &
ELIST_LINK *new_link); // stay at current
void add_list_after( // add a list &
ELIST *list_to_add); // stay at current
void add_list_before( // add a list &
ELIST *list_to_add); // move to it 1st item
ELIST_LINK *data() { // get current data
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::data", ABORT);
}
if (!current) {
NULL_DATA.error("ELIST_ITERATOR::data", ABORT);
}
#endif
return current;
}
ELIST_LINK *data_relative( // get data + or - ...
int8_t offset); // offset from current
ELIST_LINK *forward(); // move to next element
ELIST_LINK *extract(); // remove from list
ELIST_LINK *move_to_first(); // go to start of list
ELIST_LINK *move_to_last(); // go to end of list
void mark_cycle_pt(); // remember current
bool empty() const { // is list empty?
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::empty", ABORT);
}
#endif
return list->empty();
}
bool current_extracted() const { // current extracted?
return !current;
}
bool at_first() const; // Current is first?
bool at_last() const; // Current is last?
bool cycled_list() const; // Completed a cycle?
void add_to_end( // add at end &
ELIST_LINK *new_link); // don't move
void exchange( // positions of 2 links
ELIST_ITERATOR *other_it); // other iterator
//# elements in list
int32_t length() const {
return list->length();
}
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
};
/***********************************************************************
* ELIST_ITERATOR::set_to_list
*
* (Re-)initialise the iterator to point to the start of the list_to_iterate
* over.
**********************************************************************/
inline void ELIST_ITERATOR::set_to_list( // change list
ELIST *list_to_iterate) {
#ifndef NDEBUG
if (!list_to_iterate) {
BAD_PARAMETER.error("ELIST_ITERATOR::set_to_list", ABORT, "list_to_iterate is nullptr");
}
#endif
list = list_to_iterate;
prev = list->last;
current = list->First();
next = current ? current->next : nullptr;
cycle_pt = nullptr; // await explicit set
started_cycling = false;
ex_current_was_last = false;
ex_current_was_cycle_pt = false;
}
/***********************************************************************
* ELIST_ITERATOR::ELIST_ITERATOR
*
* CONSTRUCTOR - set iterator to specified list;
**********************************************************************/
inline ELIST_ITERATOR::ELIST_ITERATOR(ELIST *list_to_iterate) {
set_to_list(list_to_iterate);
}
/***********************************************************************
* ELIST_ITERATOR::add_after_then_move
*
* Add a new element to the list after the current element and move the
* iterator to the new element.
**********************************************************************/
inline void ELIST_ITERATOR::add_after_then_move( // element to add
ELIST_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_after_then_move", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_after_then_move", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST_ITERATOR::add_after_then_move", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
} else {
new_element->next = next;
if (current) { // not extracted
current->next = new_element;
prev = current;
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* ELIST_ITERATOR::add_after_stay_put
*
* Add a new element to the list after the current element but do not move
* the iterator to the new element.
**********************************************************************/
inline void ELIST_ITERATOR::add_after_stay_put( // element to add
ELIST_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_after_stay_put", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_after_stay_put", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST_ITERATOR::add_after_stay_put", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = false;
current = nullptr;
} else {
new_element->next = next;
if (current) { // not extracted
current->next = new_element;
if (prev == current) {
prev = new_element;
}
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
ex_current_was_last = false;
}
}
next = new_element;
}
}
/***********************************************************************
* ELIST_ITERATOR::add_before_then_move
*
* Add a new element to the list before the current element and move the
* iterator to the new element.
**********************************************************************/
inline void ELIST_ITERATOR::add_before_then_move( // element to add
ELIST_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_before_then_move", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_before_then_move", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST_ITERATOR::add_before_then_move", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
} else {
prev->next = new_element;
if (current) { // not extracted
new_element->next = current;
next = current;
} else { // current extracted
new_element->next = next;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* ELIST_ITERATOR::add_before_stay_put
*
* Add a new element to the list before the current element but don't move the
* iterator to the new element.
**********************************************************************/
inline void ELIST_ITERATOR::add_before_stay_put( // element to add
ELIST_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_before_stay_put", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_before_stay_put", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST_ITERATOR::add_before_stay_put", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = true;
current = nullptr;
} else {
prev->next = new_element;
if (current) { // not extracted
new_element->next = current;
if (next == current) {
next = new_element;
}
} else { // current extracted
new_element->next = next;
if (ex_current_was_last) {
list->last = new_element;
}
}
prev = new_element;
}
}
/***********************************************************************
* ELIST_ITERATOR::add_list_after
*
* Insert another list to this list after the current element but don't move
*the
* iterator.
**********************************************************************/
inline void ELIST_ITERATOR::add_list_after(ELIST *list_to_add) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_list_after", ABORT);
}
if (!list_to_add) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_list_after", ABORT, "list_to_add is nullptr");
}
#endif
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
next = list->First();
ex_current_was_last = true;
current = nullptr;
} else {
if (current) { // not extracted
current->next = list_to_add->First();
if (current == list->last) {
list->last = list_to_add->last;
}
list_to_add->last->next = next;
next = current->next;
} else { // current extracted
prev->next = list_to_add->First();
if (ex_current_was_last) {
list->last = list_to_add->last;
ex_current_was_last = false;
}
list_to_add->last->next = next;
next = prev->next;
}
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* ELIST_ITERATOR::add_list_before
*
* Insert another list to this list before the current element. Move the
* iterator to the start of the inserted elements
* iterator.
**********************************************************************/
inline void ELIST_ITERATOR::add_list_before(ELIST *list_to_add) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_list_before", ABORT);
}
if (!list_to_add) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_list_before", ABORT, "list_to_add is nullptr");
}
#endif
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
current = list->First();
next = current->next;
ex_current_was_last = false;
} else {
prev->next = list_to_add->First();
if (current) { // not extracted
list_to_add->last->next = current;
} else { // current extracted
list_to_add->last->next = next;
if (ex_current_was_last) {
list->last = list_to_add->last;
}
if (ex_current_was_cycle_pt) {
cycle_pt = prev->next;
}
}
current = prev->next;
next = current->next;
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* ELIST_ITERATOR::extract
*
* Do extraction by removing current from the list, returning it to the
* caller, but NOT updating the iterator. (So that any calling loop can do
* this.) The iterator's current points to nullptr. If the extracted element
* is to be deleted, this is the callers responsibility.
**********************************************************************/
inline ELIST_LINK *ELIST_ITERATOR::extract() {
ELIST_LINK *extracted_link;
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::extract", ABORT);
}
if (!current) { // list empty or
// element extracted
NULL_CURRENT.error("ELIST_ITERATOR::extract", ABORT);
}
#endif
if (list->singleton()) {
// Special case where we do need to change the iterator.
prev = next = list->last = nullptr;
} else {
prev->next = next; // remove from list
ex_current_was_last = (current == list->last);
if (ex_current_was_last) {
list->last = prev;
}
}
// Always set ex_current_was_cycle_pt so an add/forward will work in a loop.
ex_current_was_cycle_pt = (current == cycle_pt);
extracted_link = current;
extracted_link->next = nullptr; // for safety
current = nullptr;
return extracted_link;
}
/***********************************************************************
* ELIST_ITERATOR::move_to_first()
*
* Move current so that it is set to the start of the list.
* Return data just in case anyone wants it.
**********************************************************************/
inline ELIST_LINK *ELIST_ITERATOR::move_to_first() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::move_to_first", ABORT);
}
#endif
current = list->First();
prev = list->last;
next = current ? current->next : nullptr;
return current;
}
/***********************************************************************
* ELIST_ITERATOR::mark_cycle_pt()
*
* Remember the current location so that we can tell whether we've returned
* to this point later.
*
* If the current point is deleted either now, or in the future, the cycle
* point will be set to the next item which is set to current. This could be
* by a forward, add_after_then_move or add_after_then_move.
**********************************************************************/
inline void ELIST_ITERATOR::mark_cycle_pt() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::mark_cycle_pt", ABORT);
}
#endif
if (current) {
cycle_pt = current;
} else {
ex_current_was_cycle_pt = true;
}
started_cycling = false;
}
/***********************************************************************
* ELIST_ITERATOR::at_first()
*
* Are we at the start of the list?
*
**********************************************************************/
inline bool ELIST_ITERATOR::at_first() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::at_first", ABORT);
}
#endif
// we're at a deleted
return ((list->empty()) || (current == list->First()) ||
((current == nullptr) && (prev == list->last) && // NON-last pt between
!ex_current_was_last)); // first and last
}
/***********************************************************************
* ELIST_ITERATOR::at_last()
*
* Are we at the end of the list?
*
**********************************************************************/
inline bool ELIST_ITERATOR::at_last() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::at_last", ABORT);
}
#endif
// we're at a deleted
return ((list->empty()) || (current == list->last) ||
((current == nullptr) && (prev == list->last) && // last point between
ex_current_was_last)); // first and last
}
/***********************************************************************
* ELIST_ITERATOR::cycled_list()
*
* Have we returned to the cycle_pt since it was set?
*
**********************************************************************/
inline bool ELIST_ITERATOR::cycled_list() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::cycled_list", ABORT);
}
#endif
return ((list->empty()) || ((current == cycle_pt) && started_cycling));
}
/***********************************************************************
* ELIST_ITERATOR::sort()
*
* Sort the elements of the list, then reposition at the start.
*
**********************************************************************/
inline void ELIST_ITERATOR::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::sort", ABORT);
}
#endif
list->sort(comparator);
move_to_first();
}
/***********************************************************************
* ELIST_ITERATOR::add_to_end
*
* Add a new element to the end of the list without moving the iterator.
* This is provided because a single linked list cannot move to the last as
* the iterator couldn't set its prev pointer. Adding to the end is
* essential for implementing
queues.
**********************************************************************/
inline void ELIST_ITERATOR::add_to_end( // element to add
ELIST_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST_ITERATOR::add_to_end", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST_ITERATOR::add_to_end", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST_ITERATOR::add_to_end", ABORT);
}
#endif
if (this->at_last()) {
this->add_after_stay_put(new_element);
} else {
if (this->at_first()) {
this->add_before_stay_put(new_element);
list->last = new_element;
} else { // Iteratr is elsewhere
new_element->next = list->last->next;
list->last->next = new_element;
list->last = new_element;
}
}
}
#define ELISTIZEH(CLASSNAME) \
class CLASSNAME##_LIST : public X_LIST<ELIST, ELIST_ITERATOR, CLASSNAME> { \
using X_LIST<ELIST, ELIST_ITERATOR, CLASSNAME>::X_LIST; \
}; \
class CLASSNAME##_IT : public X_ITER<ELIST_ITERATOR, CLASSNAME> { \
using X_ITER<ELIST_ITERATOR, CLASSNAME>::X_ITER; \
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/elst.h
|
C++
|
apache-2.0
| 25,159
|
/**********************************************************************
* File: elst2.cpp (Formerly elist2.c)
* Description: Doubly linked embedded list code not in the include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "elst2.h"
#include <cstdlib>
namespace tesseract {
/***********************************************************************
* ELIST2::internal_clear
*
* Used by the destructor and the "clear" member function of derived list
* classes to destroy all the elements on the list.
* The calling function passes a "zapper" function which can be called to
* delete each element of the list, regardless of its derived type. This
* technique permits a generic clear function to destroy elements of
* different derived types correctly, without requiring virtual functions and
* the consequential memory overhead.
**********************************************************************/
void ELIST2::internal_clear( // destroy all links
void (*zapper)(void *)) {
// ptr to zapper functn
ELIST2_LINK *ptr;
ELIST2_LINK *next;
if (!empty()) {
ptr = last->next; // set to first
last->next = nullptr; // break circle
last = nullptr; // set list empty
while (ptr) {
next = ptr->next;
zapper(ptr);
ptr = next;
}
}
}
/***********************************************************************
* ELIST2::assign_to_sublist
*
* The list is set to a sublist of another list. "This" list must be empty
* before this function is invoked. The two iterators passed must refer to
* the same list, different from "this" one. The sublist removed is the
* inclusive list from start_it's current position to end_it's current
* position. If this range passes over the end of the source list then the
* source list has its end set to the previous element of start_it. The
* extracted sublist is unaffected by the end point of the source list, its
* end point is always the end_it position.
**********************************************************************/
void ELIST2::assign_to_sublist( // to this list
ELIST2_ITERATOR *start_it, // from list start
ELIST2_ITERATOR *end_it) { // from list end
constexpr ERRCODE LIST_NOT_EMPTY("Destination list must be empty before extracting a sublist");
if (!empty()) {
LIST_NOT_EMPTY.error("ELIST2.assign_to_sublist", ABORT);
}
last = start_it->extract_sublist(end_it);
}
/***********************************************************************
* ELIST2::sort
*
* Sort elements on list
* NB If you don't like the const declarations in the comparator, coerce yours:
* (int (*)(const void *, const void *)
**********************************************************************/
void ELIST2::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
// Allocate an array of pointers, one per list element.
auto count = length();
if (count > 0) {
// ptr array to sort
std::vector<ELIST2_LINK *> base;
base.reserve(count);
ELIST2_ITERATOR it(this);
// Extract all elements, putting the pointers in the array.
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
base.push_back(it.extract());
}
// Sort the pointer array.
qsort(&base[0], count, sizeof(base[0]), comparator);
// Rebuild the list from the sorted pointers.
for (auto current : base) {
it.add_to_end(current);
}
}
}
// Assuming list has been sorted already, insert new_link to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
void ELIST2::add_sorted(int comparator(const void *, const void *), ELIST2_LINK *new_link) {
// Check for adding at the end.
if (last == nullptr || comparator(&last, &new_link) < 0) {
if (last == nullptr) {
new_link->next = new_link;
new_link->prev = new_link;
} else {
new_link->next = last->next;
new_link->prev = last;
last->next = new_link;
new_link->next->prev = new_link;
}
last = new_link;
} else {
// Need to use an iterator.
ELIST2_ITERATOR it(this);
for (it.mark_cycle_pt(); !it.cycled_list(); it.forward()) {
ELIST2_LINK *link = it.data();
if (comparator(&link, &new_link) > 0) {
break;
}
}
if (it.cycled_list()) {
it.add_to_end(new_link);
} else {
it.add_before_then_move(new_link);
}
}
}
/***********************************************************************
* MEMBER FUNCTIONS OF CLASS: ELIST2_ITERATOR
* ==========================================
**********************************************************************/
/***********************************************************************
* ELIST2_ITERATOR::forward
*
* Move the iterator to the next element of the list.
* REMEMBER: ALL LISTS ARE CIRCULAR.
**********************************************************************/
ELIST2_LINK *ELIST2_ITERATOR::forward() {
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST2_ITERATOR::forward", ABORT);
#endif
if (list->empty()) {
return nullptr;
}
if (current) { // not removed so
// set previous
prev = current;
started_cycling = true;
// In case next is deleted by another iterator, get it from the current.
current = current->next;
} else {
if (ex_current_was_cycle_pt) {
cycle_pt = next;
}
current = next;
}
#ifndef NDEBUG
if (!current)
NULL_DATA.error("ELIST2_ITERATOR::forward", ABORT);
#endif
next = current->next;
#ifndef NDEBUG
if (!next) {
NULL_NEXT.error("ELIST2_ITERATOR::forward", ABORT,
"This is: %p Current is: %p",
static_cast<void *>(this),
static_cast<void *>(current));
}
#endif
return current;
}
/***********************************************************************
* ELIST2_ITERATOR::backward
*
* Move the iterator to the previous element of the list.
* REMEMBER: ALL LISTS ARE CIRCULAR.
**********************************************************************/
ELIST2_LINK *ELIST2_ITERATOR::backward() {
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST2_ITERATOR::backward", ABORT);
#endif
if (list->empty()) {
return nullptr;
}
if (current) { // not removed so
// set previous
next = current;
started_cycling = true;
// In case prev is deleted by another iterator, get it from current.
current = current->prev;
} else {
if (ex_current_was_cycle_pt) {
cycle_pt = prev;
}
current = prev;
}
#ifndef NDEBUG
if (!current)
NULL_DATA.error("ELIST2_ITERATOR::backward", ABORT);
if (!prev) {
NULL_PREV.error("ELIST2_ITERATOR::backward", ABORT,
"This is: %p Current is: %p",
static_cast<void *>(this),
static_cast<void *>(current));
}
#endif
prev = current->prev;
return current;
}
/***********************************************************************
* ELIST2_ITERATOR::data_relative
*
* Return the data pointer to the element "offset" elements from current.
* (This function can't be INLINEd because it contains a loop)
**********************************************************************/
ELIST2_LINK *ELIST2_ITERATOR::data_relative( // get data + or - ..
int8_t offset) { // offset from current
ELIST2_LINK *ptr;
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST2_ITERATOR::data_relative", ABORT);
if (list->empty())
EMPTY_LIST.error("ELIST2_ITERATOR::data_relative", ABORT);
#endif
if (offset < 0) {
for (ptr = current ? current : next; offset++ < 0; ptr = ptr->prev) {
;
}
} else {
for (ptr = current ? current : prev; offset-- > 0; ptr = ptr->next) {
;
}
}
#ifndef NDEBUG
if (!ptr)
NULL_DATA.error("ELIST2_ITERATOR::data_relative", ABORT);
#endif
return ptr;
}
/***********************************************************************
* ELIST2_ITERATOR::exchange()
*
* Given another iterator, whose current element is a different element on
* the same list list OR an element of another list, exchange the two current
* elements. On return, each iterator points to the element which was the
* other iterators current on entry.
* (This function hasn't been in-lined because its a bit big!)
**********************************************************************/
void ELIST2_ITERATOR::exchange( // positions of 2 links
ELIST2_ITERATOR *other_it) { // other iterator
constexpr ERRCODE DONT_EXCHANGE_DELETED("Can't exchange deleted elements of lists");
ELIST2_LINK *old_current;
#ifndef NDEBUG
if (!list)
NO_LIST.error("ELIST2_ITERATOR::exchange", ABORT);
if (!other_it)
BAD_PARAMETER.error("ELIST2_ITERATOR::exchange", ABORT, "other_it nullptr");
if (!(other_it->list))
NO_LIST.error("ELIST2_ITERATOR::exchange", ABORT, "other_it");
#endif
/* Do nothing if either list is empty or if both iterators reference the same
link */
if ((list->empty()) || (other_it->list->empty()) || (current == other_it->current)) {
return;
}
/* Error if either current element is deleted */
if (!current || !other_it->current) {
DONT_EXCHANGE_DELETED.error("ELIST2_ITERATOR.exchange", ABORT);
}
/* Now handle the 4 cases: doubleton list; non-doubleton adjacent elements
(other before this); non-doubleton adjacent elements (this before other);
non-adjacent elements. */
// adjacent links
if ((next == other_it->current) || (other_it->next == current)) {
// doubleton list
if ((next == other_it->current) && (other_it->next == current)) {
prev = next = current;
other_it->prev = other_it->next = other_it->current;
} else { // non-doubleton with
// adjacent links
// other before this
if (other_it->next == current) {
other_it->prev->next = current;
other_it->current->next = next;
other_it->current->prev = current;
current->next = other_it->current;
current->prev = other_it->prev;
next->prev = other_it->current;
other_it->next = other_it->current;
prev = current;
} else { // this before other
prev->next = other_it->current;
current->next = other_it->next;
current->prev = other_it->current;
other_it->current->next = current;
other_it->current->prev = prev;
other_it->next->prev = current;
next = current;
other_it->prev = other_it->current;
}
}
} else { // no overlap
prev->next = other_it->current;
current->next = other_it->next;
current->prev = other_it->prev;
next->prev = other_it->current;
other_it->prev->next = current;
other_it->current->next = next;
other_it->current->prev = prev;
other_it->next->prev = current;
}
/* update end of list pointer when necessary (remember that the 2 iterators
may iterate over different lists!) */
if (list->last == current) {
list->last = other_it->current;
}
if (other_it->list->last == other_it->current) {
other_it->list->last = current;
}
if (current == cycle_pt) {
cycle_pt = other_it->cycle_pt;
}
if (other_it->current == other_it->cycle_pt) {
other_it->cycle_pt = cycle_pt;
}
/* The actual exchange - in all cases*/
old_current = current;
current = other_it->current;
other_it->current = old_current;
}
/***********************************************************************
* ELIST2_ITERATOR::extract_sublist()
*
* This is a private member, used only by ELIST2::assign_to_sublist.
* Given another iterator for the same list, extract the links from THIS to
* OTHER inclusive, link them into a new circular list, and return a
* pointer to the last element.
* (Can't inline this function because it contains a loop)
**********************************************************************/
ELIST2_LINK *ELIST2_ITERATOR::extract_sublist( // from this current
ELIST2_ITERATOR *other_it) { // to other current
#ifndef NDEBUG
constexpr ERRCODE BAD_EXTRACTION_PTS("Can't extract sublist from points on different lists");
constexpr ERRCODE DONT_EXTRACT_DELETED("Can't extract a sublist marked by deleted points");
#endif
constexpr ERRCODE BAD_SUBLIST("Can't find sublist end point in original list");
ELIST2_ITERATOR temp_it = *this;
ELIST2_LINK *end_of_new_list;
#ifndef NDEBUG
if (!other_it)
BAD_PARAMETER.error("ELIST2_ITERATOR::extract_sublist", ABORT, "other_it nullptr");
if (!list)
NO_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT);
if (list != other_it->list)
BAD_EXTRACTION_PTS.error("ELIST2_ITERATOR.extract_sublist", ABORT);
if (list->empty())
EMPTY_LIST.error("ELIST2_ITERATOR::extract_sublist", ABORT);
if (!current || !other_it->current)
DONT_EXTRACT_DELETED.error("ELIST2_ITERATOR.extract_sublist", ABORT);
#endif
ex_current_was_last = other_it->ex_current_was_last = false;
ex_current_was_cycle_pt = false;
other_it->ex_current_was_cycle_pt = false;
temp_it.mark_cycle_pt();
do { // walk sublist
if (temp_it.cycled_list()) { // can't find end pt
BAD_SUBLIST.error("ELIST2_ITERATOR.extract_sublist", ABORT);
}
if (temp_it.at_last()) {
list->last = prev;
ex_current_was_last = other_it->ex_current_was_last = true;
}
if (temp_it.current == cycle_pt) {
ex_current_was_cycle_pt = true;
}
if (temp_it.current == other_it->cycle_pt) {
other_it->ex_current_was_cycle_pt = true;
}
temp_it.forward();
}
// do INCLUSIVE list
while (temp_it.prev != other_it->current);
// circularise sublist
other_it->current->next = current;
// circularise sublist
current->prev = other_it->current;
end_of_new_list = other_it->current;
// sublist = whole list
if (prev == other_it->current) {
list->last = nullptr;
prev = current = next = nullptr;
other_it->prev = other_it->current = other_it->next = nullptr;
} else {
prev->next = other_it->next;
other_it->next->prev = prev;
current = other_it->current = nullptr;
next = other_it->next;
other_it->prev = prev;
}
return end_of_new_list;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/elst2.cpp
|
C++
|
apache-2.0
| 15,314
|
/**********************************************************************
* File: elst2.h (Formerly elist2.h)
* Description: Double linked embedded list module include file.
* Author: Phil Cheatle
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef ELST2_H
#define ELST2_H
#include "list.h"
#include "lsterr.h"
#include "serialis.h"
#include <cstdio>
namespace tesseract {
class ELIST2_ITERATOR;
/**********************************************************************
DESIGN NOTE
===========
It would probably be possible to implement the ELIST2 classes as derived
classes from ELIST. I haven't done this because:
a) I think it would be harder to understand the code
(Though the problem with not inheriting is that changes to ELIST must be
reflected in ELIST2 and vice versa)
b) Most of the code is inline so:
i) The duplication in source does not affect the run time code size - the
code is copied inline anyway!
ii) The compiler should have a bit less work to do!
**********************************************************************/
/**********************************************************************
* CLASS - ELIST2_LINK
*
* Generic link class for doubly linked lists with embedded links
*
* Note: No destructor - elements are assumed to be destroyed EITHER after
* they have been extracted from a list OR by the ELIST2 destructor which
* walks the list.
**********************************************************************/
class ELIST2_LINK {
friend class ELIST2_ITERATOR;
friend class ELIST2;
ELIST2_LINK *prev;
ELIST2_LINK *next;
public:
ELIST2_LINK() { // constructor
prev = next = nullptr;
}
ELIST2_LINK(const ELIST2_LINK &) = delete;
// The assignment operator is required for WERD.
void operator=(const ELIST2_LINK &) {
prev = next = nullptr;
}
};
/**********************************************************************
* CLASS - ELIST2
*
* Generic list class for doubly linked lists with embedded links
**********************************************************************/
class TESS_API ELIST2 {
friend class ELIST2_ITERATOR;
ELIST2_LINK *last = nullptr; // End of list
//(Points to head)
ELIST2_LINK *First() { // return first
return last ? last->next : nullptr;
}
public:
// destroy all links
void internal_clear(void (*zapper)(void *));
bool empty() const { // is list empty?
return !last;
}
bool singleton() const {
return last ? (last == last->next) : false;
}
void shallow_copy( // dangerous!!
ELIST2 *from_list) { // beware destructors!!
last = from_list->last;
}
// ptr to copier functn
void internal_deep_copy(ELIST2_LINK *(*copier)(ELIST2_LINK *),
const ELIST2 *list); // list being copied
void assign_to_sublist( // to this list
ELIST2_ITERATOR *start_it, // from list start
ELIST2_ITERATOR *end_it); // from list end
// # elements in list
int32_t length() const {
int32_t count = 0;
if (last != nullptr) {
count = 1;
for (auto it = last->next; it != last; it = it->next) {
count++;
}
}
return count;
}
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
// Assuming list has been sorted already, insert new_link to
// keep the list sorted according to the same comparison function.
// Comparison function is the same as used by sort, i.e. uses double
// indirection. Time is O(1) to add to beginning or end.
// Time is linear to add pre-sorted items to an empty list.
void add_sorted(int comparator(const void *, const void *), ELIST2_LINK *new_link);
};
/***********************************************************************
* CLASS - ELIST2_ITERATOR
*
* Generic iterator class for doubly linked lists with embedded
*links
**********************************************************************/
class TESS_API ELIST2_ITERATOR {
friend void ELIST2::assign_to_sublist(ELIST2_ITERATOR *, ELIST2_ITERATOR *);
ELIST2 *list; // List being iterated
ELIST2_LINK *prev; // prev element
ELIST2_LINK *current; // current element
ELIST2_LINK *next; // next element
ELIST2_LINK *cycle_pt; // point we are cycling the list to.
bool ex_current_was_last; // current extracted was end of list
bool ex_current_was_cycle_pt; // current extracted was cycle point
bool started_cycling; // Have we moved off the start?
ELIST2_LINK *extract_sublist( // from this current...
ELIST2_ITERATOR *other_it); // to other current
public:
ELIST2_ITERATOR( // constructor
ELIST2 *list_to_iterate);
void set_to_list( // change list
ELIST2 *list_to_iterate);
void add_after_then_move( // add after current &
ELIST2_LINK *new_link); // move to new
void add_after_stay_put( // add after current &
ELIST2_LINK *new_link); // stay at current
void add_before_then_move( // add before current &
ELIST2_LINK *new_link); // move to new
void add_before_stay_put( // add before current &
ELIST2_LINK *new_link); // stay at current
void add_list_after( // add a list &
ELIST2 *list_to_add); // stay at current
void add_list_before( // add a list &
ELIST2 *list_to_add); // move to it 1st item
ELIST2_LINK *data() { // get current data
#ifndef NDEBUG
if (!current) {
NULL_DATA.error("ELIST2_ITERATOR::data", ABORT);
}
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::data", ABORT);
}
#endif
return current;
}
ELIST2_LINK *data_relative( // get data + or - ...
int8_t offset); // offset from current
ELIST2_LINK *forward(); // move to next element
ELIST2_LINK *backward(); // move to prev element
ELIST2_LINK *extract(); // remove from list
// go to start of list
ELIST2_LINK *move_to_first();
ELIST2_LINK *move_to_last(); // go to end of list
void mark_cycle_pt(); // remember current
bool empty() const { // is list empty?
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::empty", ABORT);
}
#endif
return list->empty();
}
bool current_extracted() const { // current extracted?
return !current;
}
bool at_first() const; // Current is first?
bool at_last() const; // Current is last?
bool cycled_list() const; // Completed a cycle?
void add_to_end( // add at end &
ELIST2_LINK *new_link); // don't move
void exchange( // positions of 2 links
ELIST2_ITERATOR *other_it); // other iterator
//# elements in list
int32_t length() const {
return list->length();
}
void sort( // sort elements
int comparator( // comparison routine
const void *, const void *));
private:
// Don't use the following constructor.
ELIST2_ITERATOR() = delete;
};
/***********************************************************************
* ELIST2_ITERATOR::set_to_list
*
* (Re-)initialise the iterator to point to the start of the list_to_iterate
* over.
**********************************************************************/
inline void ELIST2_ITERATOR::set_to_list( // change list
ELIST2 *list_to_iterate) {
#ifndef NDEBUG
if (!list_to_iterate) {
BAD_PARAMETER.error("ELIST2_ITERATOR::set_to_list", ABORT, "list_to_iterate is nullptr");
}
#endif
list = list_to_iterate;
prev = list->last;
current = list->First();
next = current ? current->next : nullptr;
cycle_pt = nullptr; // await explicit set
started_cycling = false;
ex_current_was_last = false;
ex_current_was_cycle_pt = false;
}
/***********************************************************************
* ELIST2_ITERATOR::ELIST2_ITERATOR
*
* CONSTRUCTOR - set iterator to specified list;
**********************************************************************/
inline ELIST2_ITERATOR::ELIST2_ITERATOR(ELIST2 *list_to_iterate) {
set_to_list(list_to_iterate);
}
/***********************************************************************
* ELIST2_ITERATOR::add_after_then_move
*
* Add a new element to the list after the current element and move the
* iterator to the new element.
**********************************************************************/
inline void ELIST2_ITERATOR::add_after_then_move( // element to add
ELIST2_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_after_then_move", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_after_then_move", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST2_ITERATOR::add_after_then_move", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
new_element->prev = new_element;
list->last = new_element;
prev = next = new_element;
} else {
new_element->next = next;
next->prev = new_element;
if (current) { // not extracted
new_element->prev = current;
current->next = new_element;
prev = current;
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
new_element->prev = prev;
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* ELIST2_ITERATOR::add_after_stay_put
*
* Add a new element to the list after the current element but do not move
* the iterator to the new element.
**********************************************************************/
inline void ELIST2_ITERATOR::add_after_stay_put( // element to add
ELIST2_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_after_stay_put", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_after_stay_put", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST2_ITERATOR::add_after_stay_put", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
new_element->prev = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = false;
current = nullptr;
} else {
new_element->next = next;
next->prev = new_element;
if (current) { // not extracted
new_element->prev = current;
current->next = new_element;
if (prev == current) {
prev = new_element;
}
if (current == list->last) {
list->last = new_element;
}
} else { // current extracted
new_element->prev = prev;
prev->next = new_element;
if (ex_current_was_last) {
list->last = new_element;
ex_current_was_last = false;
}
}
next = new_element;
}
}
/***********************************************************************
* ELIST2_ITERATOR::add_before_then_move
*
* Add a new element to the list before the current element and move the
* iterator to the new element.
**********************************************************************/
inline void ELIST2_ITERATOR::add_before_then_move( // element to add
ELIST2_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_before_then_move", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_before_then_move", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST2_ITERATOR::add_before_then_move", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
new_element->prev = new_element;
list->last = new_element;
prev = next = new_element;
} else {
prev->next = new_element;
new_element->prev = prev;
if (current) { // not extracted
new_element->next = current;
current->prev = new_element;
next = current;
} else { // current extracted
new_element->next = next;
next->prev = new_element;
if (ex_current_was_last) {
list->last = new_element;
}
if (ex_current_was_cycle_pt) {
cycle_pt = new_element;
}
}
}
current = new_element;
}
/***********************************************************************
* ELIST2_ITERATOR::add_before_stay_put
*
* Add a new element to the list before the current element but don't move the
* iterator to the new element.
**********************************************************************/
inline void ELIST2_ITERATOR::add_before_stay_put( // element to add
ELIST2_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_before_stay_put", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_before_stay_put", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST2_ITERATOR::add_before_stay_put", ABORT);
}
#endif
if (list->empty()) {
new_element->next = new_element;
new_element->prev = new_element;
list->last = new_element;
prev = next = new_element;
ex_current_was_last = true;
current = nullptr;
} else {
prev->next = new_element;
new_element->prev = prev;
if (current) { // not extracted
new_element->next = current;
current->prev = new_element;
if (next == current) {
next = new_element;
}
} else { // current extracted
new_element->next = next;
next->prev = new_element;
if (ex_current_was_last) {
list->last = new_element;
}
}
prev = new_element;
}
}
/***********************************************************************
* ELIST2_ITERATOR::add_list_after
*
* Insert another list to this list after the current element but don't move
*the
* iterator.
**********************************************************************/
inline void ELIST2_ITERATOR::add_list_after(ELIST2 *list_to_add) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_list_after", ABORT);
}
if (!list_to_add) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_list_after", ABORT, "list_to_add is nullptr");
}
#endif
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
next = list->First();
ex_current_was_last = true;
current = nullptr;
} else {
if (current) { // not extracted
current->next = list_to_add->First();
current->next->prev = current;
if (current == list->last) {
list->last = list_to_add->last;
}
list_to_add->last->next = next;
next->prev = list_to_add->last;
next = current->next;
} else { // current extracted
prev->next = list_to_add->First();
prev->next->prev = prev;
if (ex_current_was_last) {
list->last = list_to_add->last;
ex_current_was_last = false;
}
list_to_add->last->next = next;
next->prev = list_to_add->last;
next = prev->next;
}
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* ELIST2_ITERATOR::add_list_before
*
* Insert another list to this list before the current element. Move the
* iterator to the start of the inserted elements
* iterator.
**********************************************************************/
inline void ELIST2_ITERATOR::add_list_before(ELIST2 *list_to_add) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_list_before", ABORT);
}
if (!list_to_add) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_list_before", ABORT, "list_to_add is nullptr");
}
#endif
if (!list_to_add->empty()) {
if (list->empty()) {
list->last = list_to_add->last;
prev = list->last;
current = list->First();
next = current->next;
ex_current_was_last = false;
} else {
prev->next = list_to_add->First();
prev->next->prev = prev;
if (current) { // not extracted
list_to_add->last->next = current;
current->prev = list_to_add->last;
} else { // current extracted
list_to_add->last->next = next;
next->prev = list_to_add->last;
if (ex_current_was_last) {
list->last = list_to_add->last;
}
if (ex_current_was_cycle_pt) {
cycle_pt = prev->next;
}
}
current = prev->next;
next = current->next;
}
list_to_add->last = nullptr;
}
}
/***********************************************************************
* ELIST2_ITERATOR::extract
*
* Do extraction by removing current from the list, returning it to the
* caller, but NOT updating the iterator. (So that any calling loop can do
* this.) The iterator's current points to nullptr. If the extracted element
* is to be deleted, this is the callers responsibility.
**********************************************************************/
inline ELIST2_LINK *ELIST2_ITERATOR::extract() {
ELIST2_LINK *extracted_link;
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::extract", ABORT);
}
if (!current) { // list empty or
// element extracted
NULL_CURRENT.error("ELIST2_ITERATOR::extract", ABORT);
}
#endif
if (list->singleton()) {
// Special case where we do need to change the iterator.
prev = next = list->last = nullptr;
} else {
prev->next = next; // remove from list
next->prev = prev;
if (current == list->last) {
list->last = prev;
ex_current_was_last = true;
} else {
ex_current_was_last = false;
}
}
// Always set ex_current_was_cycle_pt so an add/forward will work in a loop.
ex_current_was_cycle_pt = (current == cycle_pt);
extracted_link = current;
extracted_link->next = nullptr; // for safety
extracted_link->prev = nullptr; // for safety
current = nullptr;
return extracted_link;
}
/***********************************************************************
* ELIST2_ITERATOR::move_to_first()
*
* Move current so that it is set to the start of the list.
* Return data just in case anyone wants it.
**********************************************************************/
inline ELIST2_LINK *ELIST2_ITERATOR::move_to_first() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::move_to_first", ABORT);
}
#endif
current = list->First();
prev = list->last;
next = current ? current->next : nullptr;
return current;
}
/***********************************************************************
* ELIST2_ITERATOR::move_to_last()
*
* Move current so that it is set to the end of the list.
* Return data just in case anyone wants it.
**********************************************************************/
inline ELIST2_LINK *ELIST2_ITERATOR::move_to_last() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::move_to_last", ABORT);
}
#endif
current = list->last;
prev = current ? current->prev : nullptr;
next = current ? current->next : nullptr;
return current;
}
/***********************************************************************
* ELIST2_ITERATOR::mark_cycle_pt()
*
* Remember the current location so that we can tell whether we've returned
* to this point later.
*
* If the current point is deleted either now, or in the future, the cycle
* point will be set to the next item which is set to current. This could be
* by a forward, add_after_then_move or add_after_then_move.
**********************************************************************/
inline void ELIST2_ITERATOR::mark_cycle_pt() {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::mark_cycle_pt", ABORT);
}
#endif
if (current) {
cycle_pt = current;
} else {
ex_current_was_cycle_pt = true;
}
started_cycling = false;
}
/***********************************************************************
* ELIST2_ITERATOR::at_first()
*
* Are we at the start of the list?
*
**********************************************************************/
inline bool ELIST2_ITERATOR::at_first() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::at_first", ABORT);
}
#endif
// we're at a deleted
return ((list->empty()) || (current == list->First()) ||
((current == nullptr) && (prev == list->last) && // NON-last pt between
!ex_current_was_last)); // first and last
}
/***********************************************************************
* ELIST2_ITERATOR::at_last()
*
* Are we at the end of the list?
*
**********************************************************************/
inline bool ELIST2_ITERATOR::at_last() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::at_last", ABORT);
}
#endif
// we're at a deleted
return ((list->empty()) || (current == list->last) ||
((current == nullptr) && (prev == list->last) && // last point between
ex_current_was_last)); // first and last
}
/***********************************************************************
* ELIST2_ITERATOR::cycled_list()
*
* Have we returned to the cycle_pt since it was set?
*
**********************************************************************/
inline bool ELIST2_ITERATOR::cycled_list() const {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::cycled_list", ABORT);
}
#endif
return ((list->empty()) || ((current == cycle_pt) && started_cycling));
}
/***********************************************************************
* ELIST2_ITERATOR::sort()
*
* Sort the elements of the list, then reposition at the start.
*
**********************************************************************/
inline void ELIST2_ITERATOR::sort( // sort elements
int comparator( // comparison routine
const void *, const void *)) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::sort", ABORT);
}
#endif
list->sort(comparator);
move_to_first();
}
/***********************************************************************
* ELIST2_ITERATOR::add_to_end
*
* Add a new element to the end of the list without moving the iterator.
* This is provided because a single linked list cannot move to the last as
* the iterator couldn't set its prev pointer. Adding to the end is
* essential for implementing
queues.
**********************************************************************/
inline void ELIST2_ITERATOR::add_to_end( // element to add
ELIST2_LINK *new_element) {
#ifndef NDEBUG
if (!list) {
NO_LIST.error("ELIST2_ITERATOR::add_to_end", ABORT);
}
if (!new_element) {
BAD_PARAMETER.error("ELIST2_ITERATOR::add_to_end", ABORT, "new_element is nullptr");
}
if (new_element->next) {
STILL_LINKED.error("ELIST2_ITERATOR::add_to_end", ABORT);
}
#endif
if (this->at_last()) {
this->add_after_stay_put(new_element);
} else {
if (this->at_first()) {
this->add_before_stay_put(new_element);
list->last = new_element;
} else { // Iteratr is elsewhere
new_element->next = list->last->next;
new_element->prev = list->last;
list->last->next->prev = new_element;
list->last->next = new_element;
list->last = new_element;
}
}
}
#define ELIST2IZEH(CLASSNAME) \
class CLASSNAME##_LIST : public X_LIST<ELIST2, ELIST2_ITERATOR, CLASSNAME> { \
using X_LIST<ELIST2, ELIST2_ITERATOR, CLASSNAME>::X_LIST; \
}; \
struct CLASSNAME##_IT : X_ITER<ELIST2_ITERATOR, CLASSNAME> { \
using X_ITER<ELIST2_ITERATOR, CLASSNAME>::X_ITER; \
CLASSNAME *backward() { \
return reinterpret_cast<CLASSNAME *>(ELIST2_ITERATOR::backward()); \
} \
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/elst2.h
|
C++
|
apache-2.0
| 25,296
|
/**********************************************************************
* File: errcode.cpp (Formerly error.c)
* Description: Generic error handler function
* Author: Ray Smith
*
* (C) Copyright 1989, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "errcode.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream> // for std::cerr
#include <sstream> // for std::stringstream
namespace tesseract {
constexpr ERRCODE BADERRACTION("Illegal error action");
#define MAX_MSG 1024
/**********************************************************************
* error
*
* Print an error message and continue, exit or abort according to action.
* Makes use of error messages and numbers in a common place.
*
**********************************************************************/
void ERRCODE::error( // handle error
const char *caller, // name of caller
TessErrorLogCode action, // action to take
const char *format, ... // special message
) const {
va_list args; // variable args
std::stringstream msg;
if (caller != nullptr) {
// name of caller
msg << caller << ':';
}
// actual message
msg << "Error:" << message;
if (format != nullptr) {
char str[MAX_MSG];
va_start(args, format); // variable list
// print remainder
std::vsnprintf(str, sizeof(str), format, args);
// ensure termination
str[sizeof(str) - 1] = '\0';
va_end(args);
msg << ':' << str;
}
std::cerr << msg.str() << '\n';
switch (action) {
case DBG:
case TESSLOG:
return; // report only
case TESSEXIT:
case ABORT:
#if !defined(NDEBUG)
// Create a deliberate abnormal exit as the stack trace is more useful
// that way. This is done only in debug builds, because the
// error message "segmentation fault" confuses most normal users.
# if defined(__GNUC__)
__builtin_trap();
# else
*reinterpret_cast<int *>(0) = 0;
# endif
#endif
abort();
default:
BADERRACTION.error("error", ABORT);
}
}
void ERRCODE::error(const char *caller, TessErrorLogCode action) const {
error(caller, action, nullptr);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/errcode.cpp
|
C++
|
apache-2.0
| 2,823
|
/**********************************************************************
* File: errcode.h (Formerly error.h)
* Description: Header file for generic error handler class
* Author: Ray Smith
*
* (C) Copyright 1990, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef ERRCODE_H
#define ERRCODE_H
#include <tesseract/export.h> // for TESS_API
namespace tesseract {
/*Control parameters for error()*/
enum TessErrorLogCode {
DBG = -1, /*log without alert */
TESSLOG = 0, /*alert user */
TESSEXIT = 1, /*exit after error */
ABORT = 2 /*abort after error */
};
#if !defined(__GNUC__) && !defined(__attribute__)
# define __attribute__(attr) // compiler without support for __attribute__
#endif
class TESS_API ERRCODE { // error handler class
const char *message; // error message
public:
void error( // error print function
const char *caller, // function location
TessErrorLogCode action, // action to take
const char *format, ... // fprintf format
) const __attribute__((format(printf, 4, 5)));
void error(const char *caller, TessErrorLogCode action) const;
constexpr ERRCODE(const char *string) : message(string) {} // initialize with string
};
constexpr ERRCODE ASSERT_FAILED("Assert failed");
#define DO_NOTHING static_cast<void>(0)
#define ASSERT_HOST(x) \
(x) ? DO_NOTHING : ASSERT_FAILED.error(#x, ABORT, "in file %s, line %d", __FILE__, __LINE__)
#define ASSERT_HOST_MSG(x, ...) \
if (!(x)) { \
tprintf(__VA_ARGS__); \
ASSERT_FAILED.error(#x, ABORT, "in file %s, line %d", __FILE__, __LINE__); \
}
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/errcode.h
|
C++
|
apache-2.0
| 2,416
|
/**********************************************************************
* File: fileerr.h (Formerly filerr.h)
* Description: Errors for file utilities.
* Author: Ray Smith
*
* (C) Copyright 1990, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef FILEERR_H
#define FILEERR_H
#include "errcode.h"
namespace tesseract {
constexpr ERRCODE CANTOPENFILE("Can't open file");
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/fileerr.h
|
C++
|
apache-2.0
| 1,033
|
// Copyright 2012 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: genericheap.h
// Description: Template heap class.
// Author: Ray Smith, based on Dan Johnson's original code.
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_GENERICHEAP_H_
#define TESSERACT_CCUTIL_GENERICHEAP_H_
#include "errcode.h"
#include <vector>
namespace tesseract {
// GenericHeap requires 1 template argument:
// Pair will normally be either KDPairInc<Key, Data> or KDPairDec<Key, Data>
// for some arbitrary Key and scalar, smart pointer, or non-ownership pointer
// Data type, according to whether a MIN heap or a MAX heap is desired,
// respectively. Using KDPtrPairInc<Key, Data> or KDPtrPairDec<Key, Data>,
// GenericHeap can also handle simple Data pointers and own them.
// If no additional data is required, Pair can also be a scalar, since
// GenericHeap doesn't look inside it except for operator<.
//
// The heap is stored as a packed binary tree in an array hosted by a
// vector<Pair>, with the invariant that the children of each node are
// both NOT Pair::operator< the parent node. KDPairInc defines Pair::operator<
// to use Key::operator< to generate a MIN heap and KDPairDec defines
// Pair::operator< to use Key::operator> to generate a MAX heap by reversing
// all the comparisons.
// See http://en.wikipedia.org/wiki/Heap_(data_structure) for more detail on
// the basic heap implementation.
//
// Insertion and removal are both O(log n) and, unlike the STL heap, an
// explicit Reshuffle function allows a node to be repositioned in time O(log n)
// after changing its value.
//
// Accessing the element for revaluation is a more complex matter, since the
// index and pointer can be changed arbitrarily by heap operations.
// Revaluation can be done by making the Data type in the Pair derived from or
// contain a DoublePtr as its first data element, making it possible to convert
// the pointer to a Pair using KDPairInc::RecastDataPointer.
template <typename Pair>
class GenericHeap {
public:
GenericHeap() = default;
// The initial size is only a vector::reserve. It is not enforced as
// the size limit of the heap. Caller must implement their own enforcement.
explicit GenericHeap(int initial_size) {
heap_.reserve(initial_size);
}
// Simple accessors.
bool empty() const {
return heap_.empty();
}
int size() const {
return heap_.size();
}
int size_reserved() const {
return heap_.size_reserved();
}
void clear() {
// Clear truncates to 0 to keep the number reserved in tact.
heap_.clear();
}
// Provides access to the underlying vector.
// Caution! any changes that modify the keys will invalidate the heap!
std::vector<Pair> &heap() {
return heap_;
}
// Provides read-only access to an element of the underlying vector.
const Pair &get(int index) const {
return heap_[index];
}
// Add entry to the heap, keeping the smallest item at the top, by operator<.
// Note that *entry is used as the source of operator=, but it is non-const
// to allow for a smart pointer to be contained within.
// Time = O(log n).
void Push(Pair *entry) {
int hole_index = heap_.size();
// Make a hole in the end of heap_ and sift it up to be the correct
// location for the new *entry. To avoid needing a default constructor
// for primitive types, and to allow for use of DoublePtr in the Pair
// somewhere, we have to incur a double copy here.
heap_.push_back(*entry);
*entry = heap_.back();
hole_index = SiftUp(hole_index, *entry);
heap_[hole_index] = *entry;
}
// Get the value of the top (smallest, defined by operator< ) element.
const Pair &PeekTop() const {
return heap_[0];
}
// Get the value of the worst (largest, defined by operator< ) element.
const Pair &PeekWorst() const {
return heap_[IndexOfWorst()];
}
// Removes the top element of the heap. If entry is not nullptr, the element
// is copied into *entry, otherwise it is discarded.
// Returns false if the heap was already empty.
// Time = O(log n).
bool Pop(Pair *entry) {
int new_size = heap_.size() - 1;
if (new_size < 0) {
return false; // Already empty.
}
if (entry != nullptr) {
*entry = heap_[0];
}
if (new_size > 0) {
// Sift the hole at the start of the heap_ downwards to match the last
// element.
Pair hole_pair = heap_[new_size];
heap_.resize(new_size);
int hole_index = SiftDown(0, hole_pair);
heap_[hole_index] = std::move(hole_pair);
} else {
heap_.resize(new_size);
}
return true;
}
// Removes the MAXIMUM element of the heap. (MIN from a MAX heap.) If entry is
// not nullptr, the element is copied into *entry, otherwise it is discarded.
// Time = O(n). Returns false if the heap was already empty.
bool PopWorst(Pair *entry) {
int worst_index = IndexOfWorst();
if (worst_index < 0) {
return false; // It cannot be empty!
}
// Extract the worst element from the heap, leaving a hole at worst_index.
if (entry != nullptr) {
*entry = heap_[worst_index];
}
int heap_size = heap_.size() - 1;
if (heap_size > 0) {
// Sift the hole upwards to match the last element of the heap_
Pair hole_pair = heap_[heap_size];
int hole_index = SiftUp(worst_index, hole_pair);
heap_[hole_index] = hole_pair;
}
heap_.resize(heap_size);
return true;
}
// Returns the index of the worst element. Time = O(n/2).
int IndexOfWorst() const {
int heap_size = heap_.size();
if (heap_size == 0) {
return -1; // It cannot be empty!
}
// Find the maximum element. Its index is guaranteed to be greater than
// the index of the parent of the last element, since by the heap invariant
// the parent must be less than or equal to the children.
int worst_index = heap_size - 1;
int end_parent = ParentNode(worst_index);
for (int i = worst_index - 1; i > end_parent; --i) {
if (heap_[worst_index] < heap_[i]) {
worst_index = i;
}
}
return worst_index;
}
// The pointed-to Pair has changed its key value, so the location of pair
// is reshuffled to maintain the heap invariant.
// Must be a valid pointer to an element of the heap_!
// Caution! Since GenericHeap is based on vector, reallocs may occur
// whenever the vector is extended and elements may get shuffled by any
// Push or Pop operation. Therefore use this function only if Data in Pair is
// of type DoublePtr, derived (first) from DoublePtr, or has a DoublePtr as
// its first element. Reshuffles the heap to maintain the invariant.
// Time = O(log n).
void Reshuffle(Pair *pair) {
int index = pair - &heap_[0];
Pair hole_pair = heap_[index];
index = SiftDown(index, hole_pair);
index = SiftUp(index, hole_pair);
heap_[index] = std::move(hole_pair);
}
private:
// A hole in the heap exists at hole_index, and we want to fill it with the
// given pair. SiftUp sifts the hole upward to the correct position and
// returns the destination index without actually putting pair there.
int SiftUp(int hole_index, const Pair &pair) {
int parent;
while (hole_index > 0 && pair < heap_[parent = ParentNode(hole_index)]) {
heap_[hole_index] = heap_[parent];
hole_index = parent;
}
return hole_index;
}
// A hole in the heap exists at hole_index, and we want to fill it with the
// given pair. SiftDown sifts the hole downward to the correct position and
// returns the destination index without actually putting pair there.
int SiftDown(int hole_index, const Pair &pair) {
int heap_size = heap_.size();
int child;
while ((child = LeftChild(hole_index)) < heap_size) {
if (child + 1 < heap_size && heap_[child + 1] < heap_[child]) {
++child;
}
if (heap_[child] < pair) {
heap_[hole_index] = heap_[child];
hole_index = child;
} else {
break;
}
}
return hole_index;
}
// Functions to navigate the tree. Unlike the original implementation, we
// store the root at index 0.
int ParentNode(int index) const {
return (index + 1) / 2 - 1;
}
int LeftChild(int index) const {
return index * 2 + 1;
}
private:
std::vector<Pair> heap_;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_GENERICHEAP_H_
|
2301_81045437/tesseract
|
src/ccutil/genericheap.h
|
C++
|
apache-2.0
| 9,158
|
///////////////////////////////////////////////////////////////////////
// File: genericvector.h
// Description: Generic vector class
// Author: Daria Antonova
//
// (C) Copyright 2007, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
//
#ifndef TESSERACT_CCUTIL_GENERICVECTOR_H_
#define TESSERACT_CCUTIL_GENERICVECTOR_H_
#include "helpers.h"
#include "serialis.h"
#include <algorithm>
#include <cassert>
#include <climits> // for LONG_MAX
#include <cstdint> // for uint32_t
#include <cstdio>
#include <cstdlib>
#include <functional> // for std::function
namespace tesseract {
// Use PointerVector<T> below in preference to GenericVector<T*>, as that
// provides automatic deletion of pointers, [De]Serialize that works, and
// sort that works.
template <typename T>
class GenericVector {
public:
GenericVector() {
init(kDefaultVectorSize);
}
// Copy
GenericVector(const GenericVector &other) {
this->init(other.size());
this->operator+=(other);
}
GenericVector<T> &operator+=(const GenericVector &other);
GenericVector<T> &operator=(const GenericVector &other);
~GenericVector();
// Reserve some memory.
void reserve(int size);
// Double the size of the internal array.
void double_the_size();
// Resizes to size and sets all values to t.
void init_to_size(int size, const T &t);
void resize(int size, const T &t);
// Resizes to size without any initialization.
void resize_no_init(int size) {
reserve(size);
size_used_ = size;
}
// Return the size used.
unsigned size() const {
return size_used_;
}
// Workaround to avoid g++ -Wsign-compare warnings.
size_t unsigned_size() const {
static_assert(sizeof(size_used_) <= sizeof(size_t), "Wow! sizeof(size_t) < sizeof(int32_t)!!");
assert(0 <= size_used_);
return static_cast<size_t>(size_used_);
}
int size_reserved() const {
return size_reserved_;
}
// Return true if empty.
bool empty() const {
return size_used_ == 0;
}
// Return the object from an index.
T &at(int index) const {
assert(index >= 0 && index < size_used_);
return data_[index];
}
T &back() const;
T &operator[](int index) const;
// Returns the last object and removes it.
T pop_back();
// Return the index of the T object.
int get_index(const T &object) const;
// Push an element in the end of the array
int push_back(T object);
void operator+=(const T &t);
// Set the value at the given index
void set(const T &t, int index);
// Insert t at the given index, push other elements to the right.
void insert(const T &t, int index);
// Removes an element at the given index and
// shifts the remaining elements to the left.
void remove(int index);
// Truncates the array to the given size by removing the end.
// If the current size is less, the array is not expanded.
void truncate(int size) {
if (size < size_used_) {
size_used_ = size;
}
}
// Add a callback to be called to delete the elements when the array took
// their ownership.
void set_clear_callback(const std::function<void(T)> &cb) {
clear_cb_ = cb;
}
// Clear the array, calling the clear callback function if any.
// All the owned callbacks are also deleted.
// If you don't want the callbacks to be deleted, before calling clear, set
// the callback to nullptr.
void clear();
// Delete objects pointed to by data_[i]
void delete_data_pointers();
// This method clears the current object, then, does a shallow copy of
// its argument, and finally invalidates its argument.
// Callbacks are moved to the current object;
void move(GenericVector<T> *from);
// Read/Write the array to a file. This does _NOT_ read/write the callbacks.
// The callback given must be permanent since they will be called more than
// once. The given callback will be deleted at the end.
// If the callbacks are nullptr, then the data is simply read/written using
// fread (and swapping)/fwrite.
// Returns false on error or if the callback returns false.
// DEPRECATED. Use [De]Serialize[Classes] instead.
bool write(FILE *f, const std::function<bool(FILE *, const T &)> &cb) const;
bool read(TFile *f, const std::function<bool(TFile *, T *)> &cb);
// Writes a vector of simple types to the given file. Assumes that bitwise
// read/write of T will work. Returns false in case of error.
// TODO(rays) Change all callers to use TFile and remove deprecated methods.
bool Serialize(FILE *fp) const;
bool Serialize(TFile *fp) const;
// Reads a vector of simple types from the given file. Assumes that bitwise
// read/write will work with ReverseN according to sizeof(T).
// Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
// TFile is assumed to know about swapping.
bool DeSerialize(bool swap, FILE *fp);
bool DeSerialize(TFile *fp);
// Writes a vector of classes to the given file. Assumes the existence of
// bool T::Serialize(FILE* fp) const that returns false in case of error.
// Returns false in case of error.
bool SerializeClasses(FILE *fp) const;
// Reads a vector of classes from the given file. Assumes the existence of
// bool T::Deserialize(bool swap, FILE* fp) that returns false in case of
// error. Also needs T::T() and T::T(constT&), as init_to_size is used in
// this function. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerializeClasses(TFile *fp);
// Reverses the elements of the vector.
void reverse() {
for (int i = 0; i < size_used_ / 2; ++i) {
std::swap(data_[i], data_[size_used_ - 1 - i]);
}
}
// Sorts the members of this vector using the less than comparator (cmp_lt),
// which compares the values. Useful for GenericVectors to primitive types.
// Will not work so great for pointers (unless you just want to sort some
// pointers). You need to provide a specialization to sort_cmp to use
// your type.
void sort();
// Sort the array into the order defined by the qsort function comparator.
// The comparator function is as defined by qsort, ie. it receives pointers
// to two Ts and returns negative if the first element is to appear earlier
// in the result and positive if it is to appear later, with 0 for equal.
void sort(int (*comparator)(const void *, const void *)) {
qsort(data_, size_used_, sizeof(*data_), comparator);
}
// Swaps the elements with the given indices.
void swap(int index1, int index2) {
if (index1 != index2) {
T tmp = data_[index1];
data_[index1] = data_[index2];
data_[index2] = tmp;
}
}
// Returns true if all elements of *this are within the given range.
// Only uses operator<
/*bool WithinBounds(const T& rangemin, const T& rangemax) const {
for (int i = 0; i < size_used_; ++i) {
if (data_[i] < rangemin || rangemax < data_[i]) {
return false;
}
}
return true;
}*/
protected:
// Init the object, allocating size memory.
void init(int size);
// We are assuming that the object generally placed in the
// vector are small enough that for efficiency it makes sense
// to start with a larger initial size.
static const int kDefaultVectorSize = 4;
int32_t size_used_{};
int32_t size_reserved_{};
T *data_;
std::function<void(T)> clear_cb_;
};
// The default FileReader loads the whole file into the vector of char,
// returning false on error.
inline bool LoadDataFromFile(const char *filename, GenericVector<char> *data) {
bool result = false;
FILE *fp = fopen(filename, "rb");
if (fp != nullptr) {
fseek(fp, 0, SEEK_END);
auto size = std::ftell(fp);
fseek(fp, 0, SEEK_SET);
// Trying to open a directory on Linux sets size to LONG_MAX. Catch it here.
if (size > 0 && size < LONG_MAX) {
// reserve an extra byte in case caller wants to append a '\0' character
data->reserve(size + 1);
data->resize_no_init(size);
result = static_cast<long>(fread(&(*data)[0], 1, size, fp)) == size;
}
fclose(fp);
}
return result;
}
// The default FileWriter writes the vector of char to the filename file,
// returning false on error.
inline bool SaveDataToFile(const GenericVector<char> &data, const char *filename) {
FILE *fp = fopen(filename, "wb");
if (fp == nullptr) {
return false;
}
bool result = fwrite(&data[0], 1, data.size(), fp) == data.size();
fclose(fp);
return result;
}
// Used by sort()
// return < 0 if t1 < t2
// return 0 if t1 == t2
// return > 0 if t1 > t2
template <typename T>
int sort_cmp(const void *t1, const void *t2) {
const T *a = static_cast<const T *>(t1);
const T *b = static_cast<const T *>(t2);
if (*a < *b) {
return -1;
}
if (*b < *a) {
return 1;
}
return 0;
}
// Used by PointerVector::sort()
// return < 0 if t1 < t2
// return 0 if t1 == t2
// return > 0 if t1 > t2
template <typename T>
int sort_ptr_cmp(const void *t1, const void *t2) {
const T *a = *static_cast<T *const *>(t1);
const T *b = *static_cast<T *const *>(t2);
if (*a < *b) {
return -1;
}
if (*b < *a) {
return 1;
}
return 0;
}
// Subclass for a vector of pointers. Use in preference to GenericVector<T*>
// as it provides automatic deletion and correct serialization, with the
// corollary that all copy operations are deep copies of the pointed-to objects.
template <typename T>
class PointerVector : public GenericVector<T *> {
public:
PointerVector() : GenericVector<T *>() {}
explicit PointerVector(int size) : GenericVector<T *>(size) {}
~PointerVector() {
// Clear must be called here, even though it is called again by the base,
// as the base will call the wrong clear.
clear();
}
// Copy must be deep, as the pointers will be automatically deleted on
// destruction.
PointerVector(const PointerVector &other) : GenericVector<T *>(other) {
this->init(other.size());
this->operator+=(other);
}
PointerVector<T> &operator+=(const PointerVector &other) {
this->reserve(this->size_used_ + other.size_used_);
for (unsigned i = 0; i < other.size(); ++i) {
this->push_back(new T(*other.data_[i]));
}
return *this;
}
PointerVector<T> &operator=(const PointerVector &other) {
if (&other != this) {
this->truncate(0);
this->operator+=(other);
}
return *this;
}
// Removes an element at the given index and
// shifts the remaining elements to the left.
void remove(int index) {
delete GenericVector<T *>::data_[index];
GenericVector<T *>::remove(index);
}
// Truncates the array to the given size by removing the end.
// If the current size is less, the array is not expanded.
void truncate(int size) {
for (int i = size; i < GenericVector<T *>::size_used_; ++i) {
delete GenericVector<T *>::data_[i];
}
GenericVector<T *>::truncate(size);
}
// Clear the array, calling the clear callback function if any.
// All the owned callbacks are also deleted.
// If you don't want the callbacks to be deleted, before calling clear, set
// the callback to nullptr.
void clear() {
GenericVector<T *>::delete_data_pointers();
GenericVector<T *>::clear();
}
// Writes a vector of (pointers to) classes to the given file. Assumes the
// existence of bool T::Serialize(FILE*) const that returns false in case of
// error. There is no Serialize for simple types, as you would have a
// normal GenericVector of those.
// Returns false in case of error.
bool Serialize(FILE *fp) const {
int32_t used = GenericVector<T *>::size_used_;
if (fwrite(&used, sizeof(used), 1, fp) != 1) {
return false;
}
for (int i = 0; i < used; ++i) {
int8_t non_null = GenericVector<T *>::data_[i] != nullptr;
if (fwrite(&non_null, sizeof(non_null), 1, fp) != 1) {
return false;
}
if (non_null && !GenericVector<T *>::data_[i]->Serialize(fp)) {
return false;
}
}
return true;
}
bool Serialize(TFile *fp) const {
int32_t used = GenericVector<T *>::size_used_;
if (fp->FWrite(&used, sizeof(used), 1) != 1) {
return false;
}
for (int i = 0; i < used; ++i) {
int8_t non_null = GenericVector<T *>::data_[i] != nullptr;
if (fp->FWrite(&non_null, sizeof(non_null), 1) != 1) {
return false;
}
if (non_null && !GenericVector<T *>::data_[i]->Serialize(fp)) {
return false;
}
}
return true;
}
// Reads a vector of (pointers to) classes to the given file. Assumes the
// existence of bool T::DeSerialize(bool, Tfile*) const that returns false in
// case of error. There is no Serialize for simple types, as you would have a
// normal GenericVector of those.
// If swap is true, assumes a big/little-endian swap is needed.
// Also needs T::T(), as new T is used in this function.
// Returns false in case of error.
bool DeSerialize(bool swap, FILE *fp) {
uint32_t reserved;
if (fread(&reserved, sizeof(reserved), 1, fp) != 1) {
return false;
}
if (swap) {
Reverse32(&reserved);
}
// Arbitrarily limit the number of elements to protect against bad data.
assert(reserved <= UINT16_MAX);
if (reserved > UINT16_MAX) {
return false;
}
GenericVector<T *>::reserve(reserved);
truncate(0);
for (uint32_t i = 0; i < reserved; ++i) {
int8_t non_null;
if (fread(&non_null, sizeof(non_null), 1, fp) != 1) {
return false;
}
T *item = nullptr;
if (non_null != 0) {
item = new T;
if (!item->DeSerialize(swap, fp)) {
delete item;
return false;
}
this->push_back(item);
} else {
// Null elements should keep their place in the vector.
this->push_back(nullptr);
}
}
return true;
}
// Sorts the items pointed to by the members of this vector using
// t::operator<().
void sort() {
this->GenericVector<T *>::sort(&sort_ptr_cmp<T>);
}
};
template <typename T>
void GenericVector<T>::init(int size) {
size_used_ = 0;
if (size <= 0) {
data_ = nullptr;
size_reserved_ = 0;
} else {
if (size < kDefaultVectorSize) {
size = kDefaultVectorSize;
}
data_ = new T[size];
size_reserved_ = size;
}
clear_cb_ = nullptr;
}
template <typename T>
GenericVector<T>::~GenericVector() {
clear();
}
// Reserve some memory. If the internal array contains elements, they are
// copied.
template <typename T>
void GenericVector<T>::reserve(int size) {
if (size_reserved_ >= size || size <= 0) {
return;
}
if (size < kDefaultVectorSize) {
size = kDefaultVectorSize;
}
T *new_array = new T[size];
for (int i = 0; i < size_used_; ++i) {
new_array[i] = data_[i];
}
delete[] data_;
data_ = new_array;
size_reserved_ = size;
}
template <typename T>
void GenericVector<T>::double_the_size() {
if (size_reserved_ == 0) {
reserve(kDefaultVectorSize);
} else {
reserve(2 * size_reserved_);
}
}
// Resizes to size and sets all values to t.
template <typename T>
void GenericVector<T>::init_to_size(int size, const T &t) {
reserve(size);
size_used_ = size;
for (int i = 0; i < size; ++i) {
data_[i] = t;
}
}
template <typename T>
void GenericVector<T>::resize(int size, const T &t) {
init_to_size(size, t);
}
template <typename T>
T &GenericVector<T>::operator[](int index) const {
assert(index >= 0 && index < size_used_);
return data_[index];
}
template <typename T>
T &GenericVector<T>::back() const {
assert(size_used_ > 0);
return data_[size_used_ - 1];
}
// Returns the last object and removes it.
template <typename T>
T GenericVector<T>::pop_back() {
assert(size_used_ > 0);
return data_[--size_used_];
}
// Return the object from an index.
template <typename T>
void GenericVector<T>::set(const T &t, int index) {
assert(index >= 0 && index < size_used_);
data_[index] = t;
}
// Shifts the rest of the elements to the right to make
// space for the new elements and inserts the given element
// at the specified index.
template <typename T>
void GenericVector<T>::insert(const T &t, int index) {
assert(index >= 0 && index <= size_used_);
if (size_reserved_ == size_used_) {
double_the_size();
}
for (int i = size_used_; i > index; --i) {
data_[i] = data_[i - 1];
}
data_[index] = t;
size_used_++;
}
// Removes an element at the given index and
// shifts the remaining elements to the left.
template <typename T>
void GenericVector<T>::remove(int index) {
assert(index >= 0 && index < size_used_);
for (int i = index; i < size_used_ - 1; ++i) {
data_[i] = data_[i + 1];
}
size_used_--;
}
// Return the index of the T object.
template <typename T>
int GenericVector<T>::get_index(const T &object) const {
for (int i = 0; i < size_used_; ++i) {
if (object == data_[i]) {
return i;
}
}
return -1;
}
// Add an element in the array
template <typename T>
int GenericVector<T>::push_back(T object) {
int index = 0;
if (size_used_ == size_reserved_) {
double_the_size();
}
index = size_used_++;
data_[index] = std::move(object);
return index;
}
template <typename T>
void GenericVector<T>::operator+=(const T &t) {
push_back(t);
}
template <typename T>
GenericVector<T> &GenericVector<T>::operator+=(const GenericVector &other) {
this->reserve(size_used_ + other.size_used_);
for (unsigned i = 0; i < other.size(); ++i) {
this->operator+=(other.data_[i]);
}
return *this;
}
template <typename T>
GenericVector<T> &GenericVector<T>::operator=(const GenericVector &other) {
if (&other != this) {
this->truncate(0);
this->operator+=(other);
}
return *this;
}
// Clear the array, calling the callback function if any.
template <typename T>
void GenericVector<T>::clear() {
if (size_reserved_ > 0 && clear_cb_ != nullptr) {
for (int i = 0; i < size_used_; ++i) {
clear_cb_(data_[i]);
}
}
delete[] data_;
data_ = nullptr;
size_used_ = 0;
size_reserved_ = 0;
clear_cb_ = nullptr;
}
template <typename T>
void GenericVector<T>::delete_data_pointers() {
for (int i = 0; i < size_used_; ++i) {
delete data_[i];
}
}
template <typename T>
bool GenericVector<T>::write(FILE *f, const std::function<bool(FILE *, const T &)> &cb) const {
if (fwrite(&size_reserved_, sizeof(size_reserved_), 1, f) != 1) {
return false;
}
if (fwrite(&size_used_, sizeof(size_used_), 1, f) != 1) {
return false;
}
if (cb != nullptr) {
for (int i = 0; i < size_used_; ++i) {
if (!cb(f, data_[i])) {
return false;
}
}
} else {
if (fwrite(data_, sizeof(T), size_used_, f) != unsigned_size()) {
return false;
}
}
return true;
}
template <typename T>
bool GenericVector<T>::read(TFile *f, const std::function<bool(TFile *, T *)> &cb) {
int32_t reserved;
if (f->FReadEndian(&reserved, sizeof(reserved), 1) != 1) {
return false;
}
reserve(reserved);
if (f->FReadEndian(&size_used_, sizeof(size_used_), 1) != 1) {
return false;
}
if (cb != nullptr) {
for (int i = 0; i < size_used_; ++i) {
if (!cb(f, data_ + i)) {
return false;
}
}
} else {
if (f->FReadEndian(data_, sizeof(T), size_used_) != static_cast<unsigned>(size_used_)) {
return false;
}
}
return true;
}
// Writes a vector of simple types to the given file. Assumes that bitwise
// read/write of T will work. Returns false in case of error.
template <typename T>
bool GenericVector<T>::Serialize(FILE *fp) const {
if (fwrite(&size_used_, sizeof(size_used_), 1, fp) != 1) {
return false;
}
if (fwrite(data_, sizeof(*data_), size_used_, fp) != unsigned_size()) {
return false;
}
return true;
}
template <typename T>
bool GenericVector<T>::Serialize(TFile *fp) const {
if (fp->FWrite(&size_used_, sizeof(size_used_), 1) != 1) {
return false;
}
if (fp->FWrite(data_, sizeof(*data_), size_used_) != size_used_) {
return false;
}
return true;
}
// Reads a vector of simple types from the given file. Assumes that bitwise
// read/write will work with ReverseN according to sizeof(T).
// Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
template <typename T>
bool GenericVector<T>::DeSerialize(bool swap, FILE *fp) {
uint32_t reserved;
if (fread(&reserved, sizeof(reserved), 1, fp) != 1) {
return false;
}
if (swap) {
Reverse32(&reserved);
}
// Arbitrarily limit the number of elements to protect against bad data.
assert(reserved <= UINT16_MAX);
if (reserved > UINT16_MAX) {
return false;
}
reserve(reserved);
size_used_ = reserved;
if (fread(data_, sizeof(T), size_used_, fp) != unsigned_size()) {
return false;
}
if (swap) {
for (int i = 0; i < size_used_; ++i) {
ReverseN(&data_[i], sizeof(data_[i]));
}
}
return true;
}
template <typename T>
bool GenericVector<T>::DeSerialize(TFile *fp) {
uint32_t reserved;
if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) {
return false;
}
// Arbitrarily limit the number of elements to protect against bad data.
const uint32_t limit = 50000000;
assert(reserved <= limit);
if (reserved > limit) {
return false;
}
reserve(reserved);
size_used_ = reserved;
return fp->FReadEndian(data_, sizeof(T), size_used_) == size_used_;
}
// Writes a vector of classes to the given file. Assumes the existence of
// bool T::Serialize(FILE* fp) const that returns false in case of error.
// Returns false in case of error.
template <typename T>
bool GenericVector<T>::SerializeClasses(FILE *fp) const {
if (fwrite(&size_used_, sizeof(size_used_), 1, fp) != 1) {
return false;
}
for (int i = 0; i < size_used_; ++i) {
if (!data_[i].Serialize(fp)) {
return false;
}
}
return true;
}
// Reads a vector of classes from the given file. Assumes the existence of
// bool T::Deserialize(bool swap, FILE* fp) that returns false in case of
// error. Also needs T::T() and T::T(constT&), as init_to_size is used in
// this function. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
template <typename T>
bool GenericVector<T>::DeSerializeClasses(TFile *fp) {
int32_t reserved;
if (fp->FReadEndian(&reserved, sizeof(reserved), 1) != 1) {
return false;
}
T empty;
init_to_size(reserved, empty);
for (int i = 0; i < reserved; ++i) {
if (!data_[i].DeSerialize(fp)) {
return false;
}
}
return true;
}
// This method clear the current object, then, does a shallow copy of
// its argument, and finally invalidates its argument.
template <typename T>
void GenericVector<T>::move(GenericVector<T> *from) {
this->clear();
this->data_ = from->data_;
this->size_reserved_ = from->size_reserved_;
this->size_used_ = from->size_used_;
this->clear_cb_ = from->clear_cb_;
from->data_ = nullptr;
from->clear_cb_ = nullptr;
from->size_used_ = 0;
from->size_reserved_ = 0;
}
template <typename T>
void GenericVector<T>::sort() {
sort(&sort_cmp<T>);
}
} // namespace tesseract
#endif // TESSERACT_CCUTIL_GENERICVECTOR_H_
|
2301_81045437/tesseract
|
src/ccutil/genericvector.h
|
C++
|
apache-2.0
| 23,840
|
/******************************************************************************
*
* File: helpers.h
* Description: General utility functions
* Author: Daria Antonova
*
* (c) Copyright 2009, Google Inc.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
*****************************************************************************/
#ifndef TESSERACT_CCUTIL_HELPERS_H_
#define TESSERACT_CCUTIL_HELPERS_H_
#include <cassert>
#include <climits> // for INT_MIN, INT_MAX
#include <cmath> // std::isfinite
#include <cstdio>
#include <cstring>
#include <algorithm> // for std::find
#include <functional>
#include <random>
#include <string>
#include <vector>
#include "serialis.h"
namespace tesseract {
// Copy a std::string to a newly allocated char *.
// TODO: Remove this function once the related code has been converted
// to use std::string.
inline char *copy_string(const std::string &from) {
auto length = from.length();
char *target_string = new char[length + 1];
from.copy(target_string, length);
target_string[length] = '\0';
return target_string;
}
template <class T>
inline bool contains(const std::vector<T> &data, const T &value) {
return std::find(data.begin(), data.end(), value) != data.end();
}
inline const std::vector<std::string> split(const std::string &s, char c) {
std::string buff;
std::vector<std::string> v;
for (auto n : s) {
if (n != c) {
buff += n;
} else if (n == c && !buff.empty()) {
v.push_back(buff);
buff.clear();
}
}
if (!buff.empty()) {
v.push_back(buff);
}
return v;
}
// A simple linear congruential random number generator.
class TRand {
public:
// Sets the seed to the given value.
void set_seed(uint64_t seed) {
e.seed(seed);
}
// Sets the seed using a hash of a string.
void set_seed(const std::string &str) {
std::hash<std::string> hasher;
set_seed(static_cast<uint64_t>(hasher(str)));
}
// Returns an integer in the range 0 to INT32_MAX.
int32_t IntRand() {
return e();
}
// Returns a floating point value in the range [-range, range].
double SignedRand(double range) {
return range * 2.0 * IntRand() / INT32_MAX - range;
}
// Returns a floating point value in the range [0, range].
double UnsignedRand(double range) {
return range * IntRand() / INT32_MAX;
}
private:
std::minstd_rand e;
};
// Remove newline (if any) at the end of the string.
inline void chomp_string(char *str) {
int last_index = static_cast<int>(strlen(str)) - 1;
while (last_index >= 0 && (str[last_index] == '\n' || str[last_index] == '\r')) {
str[last_index--] = '\0';
}
}
// return the smallest multiple of block_size greater than or equal to n.
inline int RoundUp(int n, int block_size) {
return block_size * ((n + block_size - 1) / block_size);
}
// Clip a numeric value to the interval [lower_bound, upper_bound].
template <typename T>
inline T ClipToRange(const T &x, const T &lower_bound, const T &upper_bound) {
if (x < lower_bound) {
return lower_bound;
}
if (x > upper_bound) {
return upper_bound;
}
return x;
}
// Extend the range [lower_bound, upper_bound] to include x.
template <typename T1, typename T2>
inline void UpdateRange(const T1 &x, T2 *lower_bound, T2 *upper_bound) {
if (x < *lower_bound) {
*lower_bound = x;
}
if (x > *upper_bound) {
*upper_bound = x;
}
}
// Decrease lower_bound to be <= x_lo AND increase upper_bound to be >= x_hi.
template <typename T1, typename T2>
inline void UpdateRange(const T1 &x_lo, const T1 &x_hi, T2 *lower_bound, T2 *upper_bound) {
if (x_lo < *lower_bound) {
*lower_bound = x_lo;
}
if (x_hi > *upper_bound) {
*upper_bound = x_hi;
}
}
// Intersect the range [*lower2, *upper2] with the range [lower1, upper1],
// putting the result back in [*lower2, *upper2].
// If non-intersecting ranges are given, we end up with *lower2 > *upper2.
template <typename T>
inline void IntersectRange(const T &lower1, const T &upper1, T *lower2, T *upper2) {
if (lower1 > *lower2) {
*lower2 = lower1;
}
if (upper1 < *upper2) {
*upper2 = upper1;
}
}
// Proper modulo arithmetic operator. Returns a mod b that works for -ve a.
// For any integer a and positive b, returns r : 0<=r<b and a=n*b + r for
// some integer n.
inline int Modulo(int a, int b) {
return (a % b + b) % b;
}
// Integer division operator with rounding that works for negative input.
// Returns a divided by b, rounded to the nearest integer, without double
// counting at 0. With simple rounding 1/3 = 0, 0/3 = 0 -1/3 = 0, -2/3 = 0,
// -3/3 = 0 and -4/3 = -1.
// I want 1/3 = 0, 0/3 = 0, -1/3 = 0, -2/3 = -1, -3/3 = -1 and -4/3 = -1.
inline int DivRounded(int a, int b) {
if (b < 0) {
return -DivRounded(a, -b);
}
return a >= 0 ? (a + b / 2) / b : (a - b / 2) / b;
}
// Return a double cast to int with rounding.
inline int IntCastRounded(double x) {
assert(std::isfinite(x));
assert(x < INT_MAX);
assert(x > INT_MIN);
return x >= 0.0 ? static_cast<int>(x + 0.5) : -static_cast<int>(-x + 0.5);
}
// Return a float cast to int with rounding.
inline int IntCastRounded(float x) {
assert(std::isfinite(x));
return x >= 0.0F ? static_cast<int>(x + 0.5F) : -static_cast<int>(-x + 0.5F);
}
// Reverse the order of bytes in a n byte quantity for big/little-endian switch.
inline void ReverseN(void *ptr, int num_bytes) {
assert(num_bytes == 1 || num_bytes == 2 || num_bytes == 4 || num_bytes == 8);
char *cptr = static_cast<char *>(ptr);
int halfsize = num_bytes / 2;
for (int i = 0; i < halfsize; ++i) {
char tmp = cptr[i];
cptr[i] = cptr[num_bytes - 1 - i];
cptr[num_bytes - 1 - i] = tmp;
}
}
// Reverse the order of bytes in a 32 bit quantity for big/little-endian switch.
inline void Reverse32(void *ptr) {
ReverseN(ptr, 4);
}
// Reads a vector of simple types from the given file. Assumes that bitwise
// read/write will work with ReverseN according to sizeof(T).
// Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
template <typename T>
bool DeSerialize(bool swap, FILE *fp, std::vector<T> &data) {
uint32_t size;
if (fread(&size, sizeof(size), 1, fp) != 1) {
return false;
}
if (swap) {
Reverse32(&size);
}
// Arbitrarily limit the number of elements to protect against bad data.
assert(size <= UINT16_MAX);
if (size > UINT16_MAX) {
return false;
}
// TODO: optimize.
data.resize(size);
if (size > 0) {
if (fread(&data[0], sizeof(T), size, fp) != size) {
return false;
}
if (swap) {
for (uint32_t i = 0; i < size; ++i) {
ReverseN(&data[i], sizeof(T));
}
}
}
return true;
}
// Writes a vector of simple types to the given file. Assumes that bitwise
// read/write of T will work. Returns false in case of error.
template <typename T>
bool Serialize(FILE *fp, const std::vector<T> &data) {
uint32_t size = data.size();
if (fwrite(&size, sizeof(size), 1, fp) != 1) {
return false;
} else if constexpr (std::is_class<T>::value) {
// Serialize a tesseract class.
for (auto &item : data) {
if (!item.Serialize(fp)) {
return false;
}
}
} else if constexpr (std::is_pointer<T>::value) {
// Serialize pointers.
for (auto &item : data) {
uint8_t non_null = (item != nullptr);
if (!Serialize(fp, &non_null)) {
return false;
}
if (non_null) {
if (!item->Serialize(fp)) {
return false;
}
}
}
} else if (size > 0) {
if (fwrite(&data[0], sizeof(T), size, fp) != size) {
return false;
}
}
return true;
}
} // namespace tesseract
#endif // TESSERACT_CCUTIL_HELPERS_H_
|
2301_81045437/tesseract
|
src/ccutil/helpers.h
|
C++
|
apache-2.0
| 8,234
|
/******************************************************************************
** Filename: host.h
** Purpose: This is the system independent typedefs and defines
** Author: MN, JG, MD
**
** (c) Copyright Hewlett-Packard Company, 1988-1996.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#ifndef TESSERACT_CCUTIL_HOST_H_
#define TESSERACT_CCUTIL_HOST_H_
#include <tesseract/export.h>
#include <climits>
#include <limits>
/* _WIN32 */
#ifdef _WIN32
# ifndef NOMINMAX
# define NOMINMAX
# endif /* NOMINMAX */
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# undef min
# undef max
#endif // _WIN32
#ifndef _WIN32
# ifndef PATH_MAX
# define MAX_PATH 4096
# else
# define MAX_PATH PATH_MAX
# endif
#endif
namespace tesseract {
// Return true if x is within tolerance of y
template <class T>
bool NearlyEqual(T x, T y, T tolerance) {
T diff = x - y;
return diff <= tolerance && -diff <= tolerance;
}
} // namespace tesseract
#endif // TESSERACT_CCUTIL_HOST_H_
|
2301_81045437/tesseract
|
src/ccutil/host.h
|
C++
|
apache-2.0
| 1,572
|
///////////////////////////////////////////////////////////////////////
// File: indexmapbidi.cpp
// Description: Bi-directional mapping between a sparse and compact space.
// Author: rays@google.com (Ray Smith)
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "helpers.h"
#include "indexmapbidi.h"
#include "serialis.h"
namespace tesseract {
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
IndexMap::~IndexMap() = default;
// SparseToCompact takes a sparse index to an index in the compact space.
// Uses a binary search to find the result. For faster speed use
// IndexMapBiDi, but that takes more memory.
int IndexMap::SparseToCompact(int sparse_index) const {
auto pos = std::upper_bound(compact_map_.begin(), compact_map_.end(), sparse_index);
if (pos > compact_map_.begin()) {
--pos;
}
auto result = pos - compact_map_.begin();
return compact_map_[result] == sparse_index ? result : -1;
}
// Copy from the input.
void IndexMap::CopyFrom(const IndexMap &src) {
sparse_size_ = src.sparse_size_;
compact_map_ = src.compact_map_;
}
void IndexMap::CopyFrom(const IndexMapBiDi &src) {
sparse_size_ = src.SparseSize();
compact_map_ = src.compact_map_;
}
// Writes to the given file. Returns false in case of error.
bool IndexMap::Serialize(FILE *fp) const {
return tesseract::Serialize(fp, &sparse_size_) && tesseract::Serialize(fp, compact_map_);
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool IndexMap::DeSerialize(bool swap, FILE *fp) {
uint32_t sparse_size;
if (!tesseract::DeSerialize(fp, &sparse_size)) {
return false;
}
if (swap) {
ReverseN(&sparse_size, sizeof(sparse_size));
}
// Arbitrarily limit the number of elements to protect against bad data.
if (sparse_size > UINT16_MAX) {
return false;
}
sparse_size_ = sparse_size;
return tesseract::DeSerialize(swap, fp, compact_map_);
}
// Destructor.
// It is defined here, so the compiler can create a single vtable
// instead of weak vtables in every compilation unit.
IndexMapBiDi::~IndexMapBiDi() = default;
// Top-level init function in a single call to initialize a map to select
// a single contiguous subrange [start, end) of the sparse space to be mapped
// 1 to 1 to the compact space, with all other elements of the sparse space
// left unmapped.
// No need to call Setup after this.
void IndexMapBiDi::InitAndSetupRange(int sparse_size, int start, int end) {
Init(sparse_size, false);
for (int i = start; i < end; ++i) {
SetMap(i, true);
}
Setup();
}
// Initializes just the sparse_map_ to the given size with either all
// forward indices mapped (all_mapped = true) or none (all_mapped = false).
// Call Setup immediately after, or make calls to SetMap first to adjust the
// mapping and then call Setup before using the map.
void IndexMapBiDi::Init(int size, bool all_mapped) {
if (!all_mapped) {
sparse_map_.clear();
}
sparse_map_.resize(size, -1);
if (all_mapped) {
for (int i = 0; i < size; ++i) {
sparse_map_[i] = i;
}
}
}
// Sets a given index in the sparse_map_ to be mapped or not.
void IndexMapBiDi::SetMap(int sparse_index, bool mapped) {
sparse_map_[sparse_index] = mapped ? 0 : -1;
}
// Sets up the sparse_map_ and compact_map_ properly after Init and
// some calls to SetMap. Assumes an ordered 1-1 map from set indices
// in the forward map to the compact space.
void IndexMapBiDi::Setup() {
int compact_size = 0;
for (int &i : sparse_map_) {
if (i >= 0) {
i = compact_size++;
}
}
compact_map_.clear();
compact_map_.resize(compact_size, -1);
for (size_t i = 0; i < sparse_map_.size(); ++i) {
if (sparse_map_[i] >= 0) {
compact_map_[sparse_map_[i]] = i;
}
}
sparse_size_ = sparse_map_.size();
}
// Copy from the input.
void IndexMapBiDi::CopyFrom(const IndexMapBiDi &src) {
sparse_map_ = src.sparse_map_;
compact_map_ = src.compact_map_;
sparse_size_ = sparse_map_.size();
}
// Merges the two compact space indices. May be called many times, but
// the merges must be concluded by a call to CompleteMerges.
// Returns true if a merge was actually performed.
bool IndexMapBiDi::Merge(int compact_index1, int compact_index2) {
// Find the current master index for index1 and index2.
compact_index1 = MasterCompactIndex(compact_index1);
compact_index2 = MasterCompactIndex(compact_index2);
// Be sure that index1 < index2.
if (compact_index1 > compact_index2) {
int tmp = compact_index1;
compact_index1 = compact_index2;
compact_index2 = tmp;
} else if (compact_index1 == compact_index2) {
return false;
}
// To save iterating over all sparse_map_ entries, simply make the master
// entry for index2 point to index1.
// This leaves behind a potential chain of parents that needs to be chased,
// as above.
sparse_map_[compact_map_[compact_index2]] = compact_index1;
if (compact_index1 >= 0) {
compact_map_[compact_index2] = compact_map_[compact_index1];
}
return true;
}
// Completes one or more Merge operations by further compacting the
// compact space. Unused compact space indices are removed, and the used
// ones above shuffled down to fill the gaps.
// Example:
// Input sparse_map_: (x indicates -1)
// x x 0 x 2 x x 4 x 0 x 2 x
// Output sparse_map_:
// x x 0 x 1 x x 2 x 0 x 1 x
// Output compact_map_:
// 2 4 7.
void IndexMapBiDi::CompleteMerges() {
// Ensure each sparse_map_entry contains a master compact_map_ index.
int compact_size = 0;
for (int &i : sparse_map_) {
int compact_index = MasterCompactIndex(i);
i = compact_index;
if (compact_index >= compact_size) {
compact_size = compact_index + 1;
}
}
// Re-generate the compact_map leaving holes for unused indices.
compact_map_.clear();
compact_map_.resize(compact_size, -1);
for (size_t i = 0; i < sparse_map_.size(); ++i) {
if (sparse_map_[i] >= 0) {
if (compact_map_[sparse_map_[i]] == -1) {
compact_map_[sparse_map_[i]] = i;
}
}
}
// Compact the compact_map, leaving tmp_compact_map saying where each
// index went to in the compacted map.
std::vector<int32_t> tmp_compact_map(compact_size, -1);
compact_size = 0;
for (size_t i = 0; i < compact_map_.size(); ++i) {
if (compact_map_[i] >= 0) {
tmp_compact_map[i] = compact_size;
compact_map_[compact_size++] = compact_map_[i];
}
}
compact_map_.resize(compact_size);
// Now modify the entries in the sparse map to point to the new locations.
for (int &i : sparse_map_) {
if (i >= 0) {
i = tmp_compact_map[i];
}
}
}
// Writes to the given file. Returns false in case of error.
bool IndexMapBiDi::Serialize(FILE *fp) const {
if (!IndexMap::Serialize(fp)) {
return false;
}
// Make a vector containing the rest of the map. If the map is many-to-one
// then each additional sparse entry needs to be stored.
// Normally we store only the compact map to save space.
std::vector<int32_t> remaining_pairs;
for (unsigned i = 0; i < sparse_map_.size(); ++i) {
if (sparse_map_[i] >= 0 && static_cast<unsigned>(compact_map_[sparse_map_[i]]) != i) {
remaining_pairs.push_back(i);
remaining_pairs.push_back(sparse_map_[i]);
}
}
return tesseract::Serialize(fp, remaining_pairs);
}
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool IndexMapBiDi::DeSerialize(bool swap, FILE *fp) {
if (!IndexMap::DeSerialize(swap, fp)) {
return false;
}
std::vector<int32_t> remaining_pairs;
if (!tesseract::DeSerialize(swap, fp, remaining_pairs)) {
return false;
}
sparse_map_.clear();
sparse_map_.resize(sparse_size_, -1);
for (unsigned i = 0; i < compact_map_.size(); ++i) {
sparse_map_[compact_map_[i]] = i;
}
for (size_t i = 0; i < remaining_pairs.size(); ++i) {
int sparse_index = remaining_pairs[i++];
sparse_map_[sparse_index] = remaining_pairs[i];
}
return true;
}
// Bulk calls to SparseToCompact.
// Maps the given array of sparse indices to an array of compact indices.
// Assumes the input is sorted. The output indices are sorted and uniqued.
// Return value is the number of "missed" features, being features that
// don't map to the compact feature space.
int IndexMapBiDi::MapFeatures(const std::vector<int> &sparse, std::vector<int> *compact) const {
compact->clear();
int num_features = sparse.size();
int missed_features = 0;
int prev_good_feature = -1;
for (int f = 0; f < num_features; ++f) {
int feature = sparse_map_[sparse[f]];
if (feature >= 0) {
if (feature != prev_good_feature) {
compact->push_back(feature);
prev_good_feature = feature;
}
} else {
++missed_features;
}
}
return missed_features;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/ccutil/indexmapbidi.cpp
|
C++
|
apache-2.0
| 9,593
|
///////////////////////////////////////////////////////////////////////
// File: indexmapbidi.h
// Description: Bi-directional mapping between a sparse and compact space.
// Author: rays@google.com (Ray Smith)
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_INDEXMAPBIDI_H_
#define TESSERACT_CCUTIL_INDEXMAPBIDI_H_
#include <tesseract/export.h> // for TESS_API
#include <cstdint> // for int32_t
#include <cstdio>
#include <vector>
namespace tesseract {
class IndexMapBiDi;
// Bidirectional one-to-one mapping between a sparse and a compact discrete
// space. Many entries in the sparse space are unmapped, but those that are
// mapped have a 1-1 mapping to (and from) the compact space, where all
// values are used. This is useful for forming subsets of larger collections,
// such as subsets of character sets, or subsets of binary feature spaces.
//
// This base class provides basic functionality with binary search for the
// SparseToCompact mapping to save memory.
// For a faster inverse mapping, or to allow a many-to-one mapping, use
// IndexMapBiDi below.
// NOTE: there are currently no methods to setup an IndexMap on its own!
// It must be initialized by copying from an IndexMapBiDi or by DeSerialize.
class TESS_API IndexMap {
public:
virtual ~IndexMap();
// SparseToCompact takes a sparse index to an index in the compact space.
// Uses a binary search to find the result. For faster speed use
// IndexMapBiDi, but that takes more memory.
virtual int SparseToCompact(int sparse_index) const;
// CompactToSparse takes a compact index to the corresponding index in the
// sparse space.
int CompactToSparse(int compact_index) const {
return compact_map_[compact_index];
}
// The size of the sparse space.
virtual int SparseSize() const {
return sparse_size_;
}
// The size of the compact space.
int CompactSize() const {
return compact_map_.size();
}
// Copy from the input.
void CopyFrom(const IndexMap &src);
void CopyFrom(const IndexMapBiDi &src);
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
protected:
// The sparse space covers integers in the range [0, sparse_size_-1].
int32_t sparse_size_;
// The compact space covers integers in the range [0, compact_map_.size()-1].
// Each element contains the corresponding sparse index.
std::vector<int32_t> compact_map_;
};
// Bidirectional many-to-one mapping between a sparse and a compact discrete
// space. As with IndexMap, many entries may be unmapped, but unlike IndexMap,
// of those that are, many may be mapped to the same compact index.
// If the map is many-to-one, it is not possible to directly obtain all the
// sparse indices that map to a single compact index.
// This map is time- rather than space-efficient. It stores the entire sparse
// space.
// IndexMapBiDi may be initialized in one of 3 ways:
// 1. Init(size, true);
// Setup();
// Sets a complete 1:1 mapping with no unmapped elements.
// 2. Init(size, false);
// for ... SetMap(index, true);
// Setup();
// Specifies precisely which sparse indices are mapped. The mapping is 1:1.
// 3. Either of the above, followed by:
// for ... Merge(index1, index2);
// CompleteMerges();
// Allows a many-to-one mapping by merging compact space indices.
class TESS_API IndexMapBiDi : public IndexMap {
public:
~IndexMapBiDi() override;
// Top-level init function in a single call to initialize a map to select
// a single contiguous subrange [start, end) of the sparse space to be mapped
// 1 to 1 to the compact space, with all other elements of the sparse space
// left unmapped.
// No need to call Setup after this.
void InitAndSetupRange(int sparse_size, int start, int end);
// Initializes just the sparse_map_ to the given size with either all
// forward indices mapped (all_mapped = true) or none (all_mapped = false).
// Call Setup immediately after, or make calls to SetMap first to adjust the
// mapping and then call Setup before using the map.
void Init(int size, bool all_mapped);
// Sets a given index in the sparse_map_ to be mapped or not.
void SetMap(int sparse_index, bool mapped);
// Sets up the sparse_map_ and compact_map_ properly after Init and
// some calls to SetMap. Assumes an ordered 1-1 map from set indices
// in the sparse space to the compact space.
void Setup();
// Merges the two compact space indices. May be called many times, but
// the merges must be concluded by a call to CompleteMerges.
// Returns true if a merge was actually performed.
bool Merge(int compact_index1, int compact_index2);
// Returns true if the given compact index has been deleted.
bool IsCompactDeleted(int index) const {
return MasterCompactIndex(index) < 0;
}
// Completes one or more Merge operations by further compacting the
// compact space.
void CompleteMerges();
// SparseToCompact takes a sparse index to an index in the compact space.
int SparseToCompact(int sparse_index) const override {
return sparse_map_[sparse_index];
}
// The size of the sparse space.
int SparseSize() const override {
return sparse_map_.size();
}
// Copy from the input.
void CopyFrom(const IndexMapBiDi &src);
// Writes to the given file. Returns false in case of error.
bool Serialize(FILE *fp) const;
// Reads from the given file. Returns false in case of error.
// If swap is true, assumes a big/little-endian swap is needed.
bool DeSerialize(bool swap, FILE *fp);
// Bulk calls to SparseToCompact.
// Maps the given array of sparse indices to an array of compact indices.
// Assumes the input is sorted. The output indices are sorted and uniqued.
// Return value is the number of "missed" features, being features that
// don't map to the compact feature space.
int MapFeatures(const std::vector<int> &sparse, std::vector<int> *compact) const;
private:
// Returns the master compact index for a given compact index.
// During a multiple merge operation, several compact indices may be
// combined, so we need to be able to find the master of all.
int MasterCompactIndex(int compact_index) const {
while (compact_index >= 0 && sparse_map_[compact_map_[compact_index]] != compact_index) {
compact_index = sparse_map_[compact_map_[compact_index]];
}
return compact_index;
}
// Direct look-up of the compact index for each element in sparse space.
std::vector<int32_t> sparse_map_;
};
} // namespace tesseract.
#endif // TESSERACT_CCUTIL_INDEXMAPBIDI_H_
|
2301_81045437/tesseract
|
src/ccutil/indexmapbidi.h
|
C++
|
apache-2.0
| 7,391
|
// Copyright 2012 Google Inc. All Rights Reserved.
// Author: rays@google.com (Ray Smith)
///////////////////////////////////////////////////////////////////////
// File: kdpair.h
// Description: Template pair class like STL pair but geared towards
// the Key+Data design pattern in which some data needs
// to be sorted or kept in a heap sorted on some separate key.
// Author: Ray Smith.
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_KDPAIR_H_
#define TESSERACT_CCUTIL_KDPAIR_H_
#include <vector>
namespace tesseract {
// A useful base struct to facilitate the common operation of sorting a vector
// of simple or smart-pointer data using a separate key. Similar to STL pair.
template <typename Key, typename Data>
struct KDPair {
KDPair() = default;
KDPair(Key k, Data d) : data_(d), key_(k) {}
int operator==(const KDPair<Key, Data> &other) const {
return key_ == other.key_;
}
Data &data() {
return data_;
}
const Data &data() const {
return data_;
}
Key &key() {
return key_;
}
const Key &key() const {
return key_;
}
// WARNING! Keep data as the first element! KDPairInc and KDPairDec depend
// on the order of these elements so they can downcast pointers appropriately
// for use by GenericHeap::Reshuffle.
Data data_;
Key key_;
};
// Specialization of KDPair to provide operator< for sorting in increasing order
// and recasting of data pointers for use with DoublePtr.
template <typename Key, typename Data>
struct KDPairInc : public KDPair<Key, Data> {
KDPairInc() = default;
KDPairInc(Key k, Data d) : KDPair<Key, Data>(k, d) {}
// Operator< facilitates sorting in increasing order.
int operator<(const KDPairInc<Key, Data> &other) const {
return this->key() < other.key();
}
// Returns the input Data pointer recast to a KDPairInc pointer.
// Just casts a pointer to the first element to a pointer to the whole struct.
static KDPairInc *RecastDataPointer(Data *data_ptr) {
return reinterpret_cast<KDPairInc *>(data_ptr);
}
};
// Specialization of KDPair to provide operator< for sorting in decreasing order
// and recasting of data pointers for use with DoublePtr.
template <typename Key, typename Data>
struct KDPairDec : public KDPair<Key, Data> {
KDPairDec() = default;
KDPairDec(Key k, Data d) : KDPair<Key, Data>(k, d) {}
// Operator< facilitates sorting in decreasing order by using operator> on
// the key values.
int operator<(const KDPairDec<Key, Data> &other) const {
return this->key() > other.key();
}
// Returns the input Data pointer recast to a KDPairDec pointer.
// Just casts a pointer to the first element to a pointer to the whole struct.
static KDPairDec *RecastDataPointer(Data *data_ptr) {
return reinterpret_cast<KDPairDec *>(data_ptr);
}
};
// A useful base class to facilitate the common operation of sorting a vector
// of owned pointer data using a separate key. This class owns its data pointer,
// deleting it when it has finished with it, and providing copy constructor and
// operator= that have move semantics so that the data does not get copied and
// only a single instance of KDPtrPair holds a specific data pointer.
template <typename Key, typename Data>
class KDPtrPair {
public:
KDPtrPair() : data_(nullptr) {}
KDPtrPair(Key k, Data *d) : data_(d), key_(k) {}
// Copy constructor steals the pointer from src and nulls it in src, thereby
// moving the (single) ownership of the data.
KDPtrPair(const KDPtrPair &src) : data_(src.data_), key_(src.key_) {
((KDPtrPair &)src).data_ = nullptr;
}
// Destructor deletes data, assuming it is the sole owner.
~KDPtrPair() {
delete this->data_;
this->data_ = nullptr;
}
// Operator= steals the pointer from src and nulls it in src, thereby
// moving the (single) ownership of the data.
void operator=(const KDPtrPair &src) {
delete this->data_;
this->data_ = src.data_;
((KDPtrPair &)src).data_ = nullptr;
this->key_ = src.key_;
}
int operator==(const KDPtrPair<Key, Data> &other) const {
return key_ == other.key_;
}
// Accessors.
const Key &key() const {
return key_;
}
void set_key(const Key &new_key) {
key_ = new_key;
}
const Data *data() const {
return data_;
}
// Sets the data pointer, taking ownership of the data.
void set_data(Data *new_data) {
delete data_;
data_ = new_data;
}
// Relinquishes ownership of the data pointer (setting it to nullptr).
Data *extract_data() {
Data *result = data_;
data_ = nullptr;
return result;
}
private:
// Data members are private to keep deletion of data_ encapsulated.
Data *data_;
Key key_;
};
// Specialization of KDPtrPair to provide operator< for sorting in increasing
// order.
template <typename Key, typename Data>
struct KDPtrPairInc : public KDPtrPair<Key, Data> {
// Since we are doing non-standard stuff we have to duplicate *all* the
// constructors and operator=.
KDPtrPairInc() : KDPtrPair<Key, Data>() {}
KDPtrPairInc(Key k, Data *d) : KDPtrPair<Key, Data>(k, d) {}
KDPtrPairInc(const KDPtrPairInc &src) : KDPtrPair<Key, Data>(src) {}
void operator=(const KDPtrPairInc &src) {
KDPtrPair<Key, Data>::operator=(src);
}
// Operator< facilitates sorting in increasing order.
int operator<(const KDPtrPairInc<Key, Data> &other) const {
return this->key() < other.key();
}
};
// Specialization of KDPtrPair to provide operator< for sorting in decreasing
// order.
template <typename Key, typename Data>
struct KDPtrPairDec : public KDPtrPair<Key, Data> {
// Since we are doing non-standard stuff we have to duplicate *all* the
// constructors and operator=.
KDPtrPairDec() : KDPtrPair<Key, Data>() {}
KDPtrPairDec(Key k, Data *d) : KDPtrPair<Key, Data>(k, d) {}
KDPtrPairDec(const KDPtrPairDec &src) : KDPtrPair<Key, Data>(src) {}
void operator=(const KDPtrPairDec &src) {
KDPtrPair<Key, Data>::operator=(src);
}
// Operator< facilitates sorting in decreasing order by using operator> on
// the key values.
int operator<(const KDPtrPairDec<Key, Data> &other) const {
return this->key() > other.key();
}
};
// Specialization for a pair of ints in increasing order.
using IntKDPair = KDPairInc<int, int>;
// Vector of IntKDPair.
class KDVector : public std::vector<IntKDPair> {
// TODO(rays) Add some code to manipulate a KDVector. For now there
// is nothing and this class is effectively a specialization typedef.
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_KDPAIR_H_
|
2301_81045437/tesseract
|
src/ccutil/kdpair.h
|
C++
|
apache-2.0
| 7,213
|
/**********************************************************************
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef LIST_ITERATOR_H
#define LIST_ITERATOR_H
#include <stdint.h>
namespace tesseract {
template <typename ITERATOR, typename CLASSNAME>
class X_ITER : public ITERATOR {
public:
X_ITER() = default;
template <typename U>
X_ITER(U *list) : ITERATOR(list) {}
CLASSNAME *data() {
return static_cast<CLASSNAME *>(ITERATOR::data());
}
CLASSNAME *data_relative(int8_t offset) {
return static_cast<CLASSNAME *>(ITERATOR::data_relative(offset));
}
CLASSNAME *forward() {
return static_cast<CLASSNAME *>(ITERATOR::forward());
}
CLASSNAME *extract() {
return static_cast<CLASSNAME *>(ITERATOR::extract());
}
};
template <typename CONTAINER, typename ITERATOR, typename CLASSNAME>
class X_LIST : public CONTAINER {
public:
X_LIST() = default;
X_LIST(const X_LIST &) = delete;
X_LIST &operator=(const X_LIST &) = delete;
~X_LIST() {
clear();
}
/* delete elements */
void clear() {
CONTAINER::internal_clear([](void *link) {delete reinterpret_cast<CLASSNAME *>(link);});
}
/* Become a deep copy of src_list */
template <typename U>
void deep_copy(const U *src_list, CLASSNAME *(*copier)(const CLASSNAME *)) {
X_ITER<ITERATOR, CLASSNAME> from_it(const_cast<U *>(src_list));
X_ITER<ITERATOR, CLASSNAME> to_it(this);
for (from_it.mark_cycle_pt(); !from_it.cycled_list(); from_it.forward())
to_it.add_after_then_move((*copier)(from_it.data()));
}
};
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/list.h
|
C++
|
apache-2.0
| 2,167
|
/**********************************************************************
* File: lsterr.h (Formerly listerr.h)
* Description: Errors shared by list modules
* Author: Phil Cheatle
*
* (C) Copyright 1990, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_LSTERR_H_
#define TESSERACT_CCUTIL_LSTERR_H_
#include "errcode.h" //must be last include
namespace tesseract {
#ifndef NDEBUG
constexpr ERRCODE NO_LIST("Iterator not set to a list");
constexpr ERRCODE NULL_DATA("List would have returned a nullptr data pointer");
constexpr ERRCODE NULL_CURRENT("List current position is nullptr");
constexpr ERRCODE NULL_NEXT("Next element on the list is nullptr");
constexpr ERRCODE NULL_PREV("Previous element on the list is nullptr");
constexpr ERRCODE EMPTY_LIST("List is empty");
constexpr ERRCODE BAD_PARAMETER("List parameter error");
constexpr ERRCODE STILL_LINKED("Attempting to add an element with non nullptr links, to a list");
#endif // !NDEBUG
} // namespace tesseract
#endif // TESSERACT_CCUTIL_LSTERR_H_
|
2301_81045437/tesseract
|
src/ccutil/lsterr.h
|
C++
|
apache-2.0
| 1,657
|
///////////////////////////////////////////////////////////////////////
// File: object_cache.h
// Description: A string indexed object cache.
// Author: David Eger
//
// (C) Copyright 2012, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_OBJECT_CACHE_H_
#define TESSERACT_CCUTIL_OBJECT_CACHE_H_
#include <functional> // for std::function
#include <mutex> // for std::mutex
#include <string>
#include <vector> // for std::vector
#include "ccutil.h"
#include "errcode.h"
namespace tesseract {
// A simple object cache which maps a string to an object of type T.
// Usually, these are expensive objects that are loaded from disk.
// Reference counting is performed, so every Get() needs to be followed later
// by a Free(). Actual deletion is accomplished by DeleteUnusedObjects().
template <typename T>
class ObjectCache {
public:
ObjectCache() = default;
~ObjectCache() {
std::lock_guard<std::mutex> guard(mu_);
for (auto &it : cache_) {
if (it.count > 0) {
tprintf(
"ObjectCache(%p)::~ObjectCache(): WARNING! LEAK! object %p "
"still has count %d (id %s)\n",
static_cast<void *>(this), static_cast<void *>(it.object),
it.count, it.id.c_str());
} else {
delete it.object;
it.object = nullptr;
}
}
}
// Return a pointer to the object identified by id.
// If we haven't yet loaded the object, use loader to load it.
// If loader fails to load it, record a nullptr entry in the cache
// and return nullptr -- further attempts to load will fail (even
// with a different loader) until DeleteUnusedObjects() is called.
// We delete the given loader.
T *Get(const std::string &id, std::function<T *()> loader) {
T *retval = nullptr;
std::lock_guard<std::mutex> guard(mu_);
for (auto &it : cache_) {
if (id == it.id) {
retval = it.object;
if (it.object != nullptr) {
it.count++;
}
return retval;
}
}
cache_.push_back(ReferenceCount());
ReferenceCount &rc = cache_.back();
rc.id = id;
retval = rc.object = loader();
rc.count = (retval != nullptr) ? 1 : 0;
return retval;
}
// Decrement the count for t.
// Return whether we knew about the given pointer.
bool Free(T *t) {
if (t == nullptr) {
return false;
}
std::lock_guard<std::mutex> guard(mu_);
for (auto &it : cache_) {
if (it.object == t) {
--it.count;
return true;
}
}
return false;
}
void DeleteUnusedObjects() {
std::lock_guard<std::mutex> guard(mu_);
cache_.erase(std::remove_if(cache_.begin(), cache_.end(),
[](const ReferenceCount &it) {
if (it.count <= 0) {
delete it.object;
return true;
} else {
return false;
}
}),
cache_.end());
}
private:
struct ReferenceCount {
std::string id; // A unique ID to identify the object (think path on disk)
T *object; // A copy of the object in memory. Can be delete'd.
int count; // A count of the number of active users of this object.
};
std::mutex mu_;
std::vector<ReferenceCount> cache_;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_OBJECT_CACHE_H_
|
2301_81045437/tesseract
|
src/ccutil/object_cache.h
|
C++
|
apache-2.0
| 4,114
|
/**********************************************************************
* File: params.cpp
* Description: Initialization and setting of Tesseract parameters.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "params.h"
#include "helpers.h" // for chomp_string
#include "host.h" // tesseract/export.h, windows.h for MAX_PATH
#include "serialis.h" // for TFile
#include "tprintf.h"
#include <climits> // for INT_MIN, INT_MAX
#include <cmath> // for NAN, std::isnan
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <locale> // for std::locale::classic
#include <sstream> // for std::stringstream
namespace tesseract {
tesseract::ParamsVectors *GlobalParams() {
static tesseract::ParamsVectors global_params = tesseract::ParamsVectors();
return &global_params;
}
bool ParamUtils::ReadParamsFile(const char *file, SetParamConstraint constraint,
ParamsVectors *member_params) {
TFile fp;
if (!fp.Open(file, nullptr)) {
tprintf("read_params_file: Can't open %s\n", file);
return true;
}
return ReadParamsFromFp(constraint, &fp, member_params);
}
bool ParamUtils::ReadParamsFromFp(SetParamConstraint constraint, TFile *fp,
ParamsVectors *member_params) {
char line[MAX_PATH]; // input line
bool anyerr = false; // true if any error
bool foundit; // found parameter
char *valptr; // value field
while (fp->FGets(line, MAX_PATH) != nullptr) {
if (line[0] != '\r' && line[0] != '\n' && line[0] != '#') {
chomp_string(line); // remove newline
for (valptr = line; *valptr && *valptr != ' ' && *valptr != '\t'; valptr++) {
;
}
if (*valptr) { // found blank
*valptr = '\0'; // make name a string
do {
valptr++; // find end of blanks
} while (*valptr == ' ' || *valptr == '\t');
}
foundit = SetParam(line, valptr, constraint, member_params);
if (!foundit) {
anyerr = true; // had an error
tprintf("Warning: Parameter not found: %s\n", line);
}
}
}
return anyerr;
}
bool ParamUtils::SetParam(const char *name, const char *value, SetParamConstraint constraint,
ParamsVectors *member_params) {
// Look for the parameter among string parameters.
auto *sp =
FindParam<StringParam>(name, GlobalParams()->string_params, member_params->string_params);
if (sp != nullptr && sp->constraint_ok(constraint)) {
sp->set_value(value);
}
if (*value == '\0') {
return (sp != nullptr);
}
// Look for the parameter among int parameters.
auto *ip = FindParam<IntParam>(name, GlobalParams()->int_params, member_params->int_params);
if (ip && ip->constraint_ok(constraint)) {
int intval = INT_MIN;
std::stringstream stream(value);
stream.imbue(std::locale::classic());
stream >> intval;
if (intval != INT_MIN) {
ip->set_value(intval);
}
}
// Look for the parameter among bool parameters.
auto *bp = FindParam<BoolParam>(name, GlobalParams()->bool_params, member_params->bool_params);
if (bp != nullptr && bp->constraint_ok(constraint)) {
if (*value == 'T' || *value == 't' || *value == 'Y' || *value == 'y' || *value == '1') {
bp->set_value(true);
} else if (*value == 'F' || *value == 'f' || *value == 'N' || *value == 'n' || *value == '0') {
bp->set_value(false);
}
}
// Look for the parameter among double parameters.
auto *dp =
FindParam<DoubleParam>(name, GlobalParams()->double_params, member_params->double_params);
if (dp != nullptr && dp->constraint_ok(constraint)) {
double doubleval = NAN;
std::stringstream stream(value);
stream.imbue(std::locale::classic());
stream >> doubleval;
if (!std::isnan(doubleval)) {
dp->set_value(doubleval);
}
}
return (sp || ip || bp || dp);
}
bool ParamUtils::GetParamAsString(const char *name, const ParamsVectors *member_params,
std::string *value) {
// Look for the parameter among string parameters.
auto *sp =
FindParam<StringParam>(name, GlobalParams()->string_params, member_params->string_params);
if (sp) {
*value = sp->c_str();
return true;
}
// Look for the parameter among int parameters.
auto *ip = FindParam<IntParam>(name, GlobalParams()->int_params, member_params->int_params);
if (ip) {
*value = std::to_string(int32_t(*ip));
return true;
}
// Look for the parameter among bool parameters.
auto *bp = FindParam<BoolParam>(name, GlobalParams()->bool_params, member_params->bool_params);
if (bp != nullptr) {
*value = bool(*bp) ? "1" : "0";
return true;
}
// Look for the parameter among double parameters.
auto *dp =
FindParam<DoubleParam>(name, GlobalParams()->double_params, member_params->double_params);
if (dp != nullptr) {
std::ostringstream stream;
stream.imbue(std::locale::classic());
stream << double(*dp);
*value = stream.str();
return true;
}
return false;
}
void ParamUtils::PrintParams(FILE *fp, const ParamsVectors *member_params) {
int num_iterations = (member_params == nullptr) ? 1 : 2;
std::ostringstream stream;
stream.imbue(std::locale::classic());
for (int v = 0; v < num_iterations; ++v) {
const ParamsVectors *vec = (v == 0) ? GlobalParams() : member_params;
for (auto int_param : vec->int_params) {
stream << int_param->name_str() << '\t' << (int32_t)(*int_param) << '\t'
<< int_param->info_str() << '\n';
}
for (auto bool_param : vec->bool_params) {
stream << bool_param->name_str() << '\t' << bool(*bool_param) << '\t'
<< bool_param->info_str() << '\n';
}
for (auto string_param : vec->string_params) {
stream << string_param->name_str() << '\t' << string_param->c_str() << '\t'
<< string_param->info_str() << '\n';
}
for (auto double_param : vec->double_params) {
stream << double_param->name_str() << '\t' << (double)(*double_param) << '\t'
<< double_param->info_str() << '\n';
}
}
fprintf(fp, "%s", stream.str().c_str());
}
// Resets all parameters back to default values;
void ParamUtils::ResetToDefaults(ParamsVectors *member_params) {
int num_iterations = (member_params == nullptr) ? 1 : 2;
for (int v = 0; v < num_iterations; ++v) {
ParamsVectors *vec = (v == 0) ? GlobalParams() : member_params;
for (auto ¶m : vec->int_params) {
param->ResetToDefault();
}
for (auto ¶m : vec->bool_params) {
param->ResetToDefault();
}
for (auto ¶m : vec->string_params) {
param->ResetToDefault();
}
for (auto ¶m : vec->double_params) {
param->ResetToDefault();
}
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/params.cpp
|
C++
|
apache-2.0
| 7,474
|
/**********************************************************************
* File: params.h
* Description: Class definitions of the *_VAR classes for tunable constants.
* Author: Ray Smith
*
* (C) Copyright 1991, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef PARAMS_H
#define PARAMS_H
#include <tesseract/export.h> // for TESS_API
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>
namespace tesseract {
class IntParam;
class BoolParam;
class StringParam;
class DoubleParam;
class TFile;
// Enum for constraints on what kind of params should be set by SetParam().
enum SetParamConstraint {
SET_PARAM_CONSTRAINT_NONE,
SET_PARAM_CONSTRAINT_DEBUG_ONLY,
SET_PARAM_CONSTRAINT_NON_DEBUG_ONLY,
SET_PARAM_CONSTRAINT_NON_INIT_ONLY,
};
struct ParamsVectors {
std::vector<IntParam *> int_params;
std::vector<BoolParam *> bool_params;
std::vector<StringParam *> string_params;
std::vector<DoubleParam *> double_params;
};
// Utility functions for working with Tesseract parameters.
class TESS_API ParamUtils {
public:
// Reads a file of parameter definitions and set/modify the values therein.
// If the filename begins with a + or -, the BoolVariables will be
// ORed or ANDed with any current values.
// Blank lines and lines beginning # are ignored.
// Values may have any whitespace after the name and are the rest of line.
static bool ReadParamsFile(const char *file, // filename to read
SetParamConstraint constraint, ParamsVectors *member_params);
// Read parameters from the given file pointer.
static bool ReadParamsFromFp(SetParamConstraint constraint, TFile *fp,
ParamsVectors *member_params);
// Set a parameters to have the given value.
static bool SetParam(const char *name, const char *value, SetParamConstraint constraint,
ParamsVectors *member_params);
// Returns the pointer to the parameter with the given name (of the
// appropriate type) if it was found in the vector obtained from
// GlobalParams() or in the given member_params.
template <class T>
static T *FindParam(const char *name, const std::vector<T *> &global_vec,
const std::vector<T *> &member_vec) {
for (auto *param : global_vec) {
if (strcmp(param->name_str(), name) == 0) {
return param;
}
}
for (auto *param : member_vec) {
if (strcmp(param->name_str(), name) == 0) {
return param;
}
}
return nullptr;
}
// Removes the given pointer to the param from the given vector.
template <class T>
static void RemoveParam(T *param_ptr, std::vector<T *> *vec) {
for (auto it = vec->begin(); it != vec->end(); ++it) {
if (*it == param_ptr) {
vec->erase(it);
break;
}
}
}
// Fetches the value of the named param as a string. Returns false if not
// found.
static bool GetParamAsString(const char *name, const ParamsVectors *member_params,
std::string *value);
// Print parameters to the given file.
static void PrintParams(FILE *fp, const ParamsVectors *member_params);
// Resets all parameters back to default values;
static void ResetToDefaults(ParamsVectors *member_params);
};
// Definition of various parameter types.
class Param {
public:
~Param() = default;
const char *name_str() const {
return name_;
}
const char *info_str() const {
return info_;
}
bool is_init() const {
return init_;
}
bool is_debug() const {
return debug_;
}
bool constraint_ok(SetParamConstraint constraint) const {
return (constraint == SET_PARAM_CONSTRAINT_NONE ||
(constraint == SET_PARAM_CONSTRAINT_DEBUG_ONLY && this->is_debug()) ||
(constraint == SET_PARAM_CONSTRAINT_NON_DEBUG_ONLY && !this->is_debug()) ||
(constraint == SET_PARAM_CONSTRAINT_NON_INIT_ONLY && !this->is_init()));
}
protected:
Param(const char *name, const char *comment, bool init)
: name_(name), info_(comment), init_(init) {
debug_ = (strstr(name, "debug") != nullptr) || (strstr(name, "display"));
}
const char *name_; // name of this parameter
const char *info_; // for menus
bool init_; // needs to be set before init
bool debug_;
};
class IntParam : public Param {
public:
IntParam(int32_t value, const char *name, const char *comment, bool init, ParamsVectors *vec)
: Param(name, comment, init) {
value_ = value;
default_ = value;
params_vec_ = &(vec->int_params);
vec->int_params.push_back(this);
}
~IntParam() {
ParamUtils::RemoveParam<IntParam>(this, params_vec_);
}
operator int32_t() const {
return value_;
}
void operator=(int32_t value) {
value_ = value;
}
void set_value(int32_t value) {
value_ = value;
}
void ResetToDefault() {
value_ = default_;
}
void ResetFrom(const ParamsVectors *vec) {
for (auto *param : vec->int_params) {
if (strcmp(param->name_str(), name_) == 0) {
// printf("overriding param %s=%d by =%d\n", name_, value_,
// param);
value_ = *param;
break;
}
}
}
private:
int32_t value_;
int32_t default_;
// Pointer to the vector that contains this param (not owned by this class).
std::vector<IntParam *> *params_vec_;
};
class BoolParam : public Param {
public:
BoolParam(bool value, const char *name, const char *comment, bool init, ParamsVectors *vec)
: Param(name, comment, init) {
value_ = value;
default_ = value;
params_vec_ = &(vec->bool_params);
vec->bool_params.push_back(this);
}
~BoolParam() {
ParamUtils::RemoveParam<BoolParam>(this, params_vec_);
}
operator bool() const {
return value_;
}
void operator=(bool value) {
value_ = value;
}
void set_value(bool value) {
value_ = value;
}
void ResetToDefault() {
value_ = default_;
}
void ResetFrom(const ParamsVectors *vec) {
for (auto *param : vec->bool_params) {
if (strcmp(param->name_str(), name_) == 0) {
// printf("overriding param %s=%s by =%s\n", name_, value_ ? "true" :
// "false", *param ? "true" : "false");
value_ = *param;
break;
}
}
}
private:
bool value_;
bool default_;
// Pointer to the vector that contains this param (not owned by this class).
std::vector<BoolParam *> *params_vec_;
};
class StringParam : public Param {
public:
StringParam(const char *value, const char *name, const char *comment, bool init,
ParamsVectors *vec)
: Param(name, comment, init) {
value_ = value;
default_ = value;
params_vec_ = &(vec->string_params);
vec->string_params.push_back(this);
}
~StringParam() {
ParamUtils::RemoveParam<StringParam>(this, params_vec_);
}
operator std::string &() {
return value_;
}
const char *c_str() const {
return value_.c_str();
}
bool contains(char c) const {
return value_.find(c) != std::string::npos;
}
bool empty() const {
return value_.empty();
}
bool operator==(const std::string &other) const {
return value_ == other;
}
void operator=(const std::string &value) {
value_ = value;
}
void set_value(const std::string &value) {
value_ = value;
}
void ResetToDefault() {
value_ = default_;
}
void ResetFrom(const ParamsVectors *vec) {
for (auto *param : vec->string_params) {
if (strcmp(param->name_str(), name_) == 0) {
// printf("overriding param %s=%s by =%s\n", name_, value_,
// param->c_str());
value_ = *param;
break;
}
}
}
private:
std::string value_;
std::string default_;
// Pointer to the vector that contains this param (not owned by this class).
std::vector<StringParam *> *params_vec_;
};
class DoubleParam : public Param {
public:
DoubleParam(double value, const char *name, const char *comment, bool init, ParamsVectors *vec)
: Param(name, comment, init) {
value_ = value;
default_ = value;
params_vec_ = &(vec->double_params);
vec->double_params.push_back(this);
}
~DoubleParam() {
ParamUtils::RemoveParam<DoubleParam>(this, params_vec_);
}
operator double() const {
return value_;
}
void operator=(double value) {
value_ = value;
}
void set_value(double value) {
value_ = value;
}
void ResetToDefault() {
value_ = default_;
}
void ResetFrom(const ParamsVectors *vec) {
for (auto *param : vec->double_params) {
if (strcmp(param->name_str(), name_) == 0) {
// printf("overriding param %s=%f by =%f\n", name_, value_,
// *param);
value_ = *param;
break;
}
}
}
private:
double value_;
double default_;
// Pointer to the vector that contains this param (not owned by this class).
std::vector<DoubleParam *> *params_vec_;
};
// Global parameter lists.
//
// To avoid the problem of undetermined order of static initialization
// global_params are accessed through the GlobalParams function that
// initializes the static pointer to global_params only on the first time
// GlobalParams() is called.
//
// TODO(daria): remove GlobalParams() when all global Tesseract
// parameters are converted to members.
TESS_API
ParamsVectors *GlobalParams();
/*************************************************************************
* Note on defining parameters.
*
* The values of the parameters defined with *_INIT_* macros are guaranteed
* to be loaded from config files before Tesseract initialization is done
* (there is no such guarantee for parameters defined with the other macros).
*************************************************************************/
#define INT_VAR_H(name) ::tesseract::IntParam name
#define BOOL_VAR_H(name) ::tesseract::BoolParam name
#define STRING_VAR_H(name) ::tesseract::StringParam name
#define double_VAR_H(name) ::tesseract::DoubleParam name
#define INT_VAR(name, val, comment) \
::tesseract::IntParam name(val, #name, comment, false, ::tesseract::GlobalParams())
#define BOOL_VAR(name, val, comment) \
::tesseract::BoolParam name(val, #name, comment, false, ::tesseract::GlobalParams())
#define STRING_VAR(name, val, comment) \
::tesseract::StringParam name(val, #name, comment, false, ::tesseract::GlobalParams())
#define double_VAR(name, val, comment) \
::tesseract::DoubleParam name(val, #name, comment, false, ::tesseract::GlobalParams())
#define INT_MEMBER(name, val, comment, vec) name(val, #name, comment, false, vec)
#define BOOL_MEMBER(name, val, comment, vec) name(val, #name, comment, false, vec)
#define STRING_MEMBER(name, val, comment, vec) name(val, #name, comment, false, vec)
#define double_MEMBER(name, val, comment, vec) name(val, #name, comment, false, vec)
#define INT_INIT_MEMBER(name, val, comment, vec) name(val, #name, comment, true, vec)
#define BOOL_INIT_MEMBER(name, val, comment, vec) name(val, #name, comment, true, vec)
#define STRING_INIT_MEMBER(name, val, comment, vec) name(val, #name, comment, true, vec)
#define double_INIT_MEMBER(name, val, comment, vec) name(val, #name, comment, true, vec)
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/ccutil/params.h
|
C++
|
apache-2.0
| 11,850
|
// Copyright 2006 Google Inc.
// All Rights Reserved.
// Author: renn
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include <cctype>
#include <climits> // for CHAR_BIT
#include <cmath>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <limits> // for std::numeric_limits
#include "scanutils.h"
enum Flags {
FL_SPLAT = 0x01, // Drop the value, do not assign
FL_INV = 0x02, // Character-set with inverse
FL_WIDTH = 0x04, // Field width specified
FL_MINUS = 0x08, // Negative number
};
enum Ranks {
RANK_CHAR = -2,
RANK_SHORT = -1,
RANK_INT = 0,
RANK_LONG = 1,
RANK_LONGLONG = 2,
RANK_PTR = std::numeric_limits<int>::max() // Special value used for pointers
};
const enum Ranks kMinRank = RANK_CHAR;
const enum Ranks kMaxRank = RANK_LONGLONG;
const enum Ranks kIntMaxRank = RANK_LONGLONG;
const enum Ranks kSizeTRank = RANK_LONG;
const enum Ranks kPtrDiffRank = RANK_LONG;
enum Bail {
BAIL_NONE = 0, // No error condition
BAIL_EOF, // Hit EOF
BAIL_ERR // Conversion mismatch
};
// Helper functions ------------------------------------------------------------
inline size_t LongBit() {
return CHAR_BIT * sizeof(long);
}
static inline int SkipSpace(FILE *s) {
int p;
while (isascii(p = fgetc(s)) && isspace(p)) {
;
}
ungetc(p, s); // Make sure next char is available for reading
return p;
}
static inline void SetBit(unsigned long *bitmap, unsigned int bit) {
bitmap[bit / LongBit()] |= 1UL << (bit % LongBit());
}
static inline int TestBit(unsigned long *bitmap, unsigned int bit) {
return static_cast<int>(bitmap[bit / LongBit()] >> (bit % LongBit())) & 1;
}
static inline int DigitValue(int ch, int base) {
if (ch >= '0' && ch <= '9') {
if (base >= 10 || ch <= '7') {
return ch - '0';
}
} else if (ch >= 'A' && ch <= 'Z' && base == 16) {
return ch - 'A' + 10;
} else if (ch >= 'a' && ch <= 'z' && base == 16) {
return ch - 'a' + 10;
}
return -1;
}
// IO (re-)implementations -----------------------------------------------------
static uintmax_t streamtoumax(FILE *s, int base) {
int minus = 0;
uintmax_t v = 0;
int d, c = 0;
for (c = fgetc(s); isascii(c) && isspace(c); c = fgetc(s)) {
;
}
// Single optional + or -
if (c == '-' || c == '+') {
minus = (c == '-');
c = fgetc(s);
}
// Assign correct base
if (base == 0) {
if (c == '0') {
c = fgetc(s);
if (c == 'x' || c == 'X') {
base = 16;
c = fgetc(s);
} else {
base = 8;
}
}
} else if (base == 16) {
if (c == '0') {
c = fgetc(s);
if (c == 'x' || c == 'X') {
c = fgetc(s);
}
}
}
// Actual number parsing
for (; (c != EOF) && (d = DigitValue(c, base)) >= 0; c = fgetc(s)) {
v = v * base + d;
}
ungetc(c, s);
return minus ? -v : v;
}
static double streamtofloat(FILE *s) {
bool minus = false;
uint64_t v = 0;
int d, c;
uint64_t k = 1;
uint64_t w = 0;
for (c = fgetc(s); isascii(c) && isspace(c); c = fgetc(s)) {
;
}
// Single optional + or -
if (c == '-' || c == '+') {
minus = (c == '-');
c = fgetc(s);
}
// Actual number parsing
for (; c != EOF && (d = DigitValue(c, 10)) >= 0; c = fgetc(s)) {
v = v * 10 + d;
}
if (c == '.') {
for (c = fgetc(s); c != EOF && (d = DigitValue(c, 10)) >= 0; c = fgetc(s)) {
w = w * 10 + d;
k *= 10;
}
}
double f = v + static_cast<double>(w) / k;
if (c == 'e' || c == 'E') {
c = fgetc(s);
int expsign = 1;
if (c == '-' || c == '+') {
expsign = (c == '-') ? -1 : 1;
c = fgetc(s);
}
int exponent = 0;
for (; (c != EOF) && (d = DigitValue(c, 10)) >= 0; c = fgetc(s)) {
exponent = exponent * 10 + d;
}
exponent *= expsign;
f *= pow(10.0, static_cast<double>(exponent));
}
ungetc(c, s);
return minus ? -f : f;
}
static int tvfscanf(FILE *stream, const char *format, va_list ap);
int tfscanf(FILE *stream, const char *format, ...) {
va_list ap;
int rv;
va_start(ap, format);
rv = tvfscanf(stream, format, ap);
va_end(ap);
return rv;
}
static int tvfscanf(FILE *stream, const char *format, va_list ap) {
const char *p = format;
char ch;
int q = 0;
uintmax_t val = 0;
int rank = RANK_INT; // Default rank
unsigned int width = UINT_MAX;
int base;
int flags = 0;
enum {
ST_NORMAL, // Ground state
ST_FLAGS, // Special flags
ST_WIDTH, // Field width
ST_MODIFIERS, // Length or conversion modifiers
ST_MATCH_INIT, // Initial state of %[ sequence
ST_MATCH, // Main state of %[ sequence
ST_MATCH_RANGE, // After - in a %[ sequence
} state = ST_NORMAL;
char *sarg = nullptr; // %s %c or %[ string argument
enum Bail bail = BAIL_NONE;
int converted = 0; // Successful conversions
unsigned long
matchmap[((1 << CHAR_BIT) + (CHAR_BIT * sizeof(long) - 1)) / (CHAR_BIT * sizeof(long))];
int matchinv = 0; // Is match map inverted?
unsigned char range_start = 0;
auto start_off = std::ftell(stream);
// Skip leading spaces
SkipSpace(stream);
while ((ch = *p++) && !bail) {
switch (state) {
case ST_NORMAL:
if (ch == '%') {
state = ST_FLAGS;
flags = 0;
rank = RANK_INT;
width = UINT_MAX;
} else if (isascii(ch) && isspace(ch)) {
SkipSpace(stream);
} else {
if (fgetc(stream) != ch) {
bail = BAIL_ERR; // Match failure
}
}
break;
case ST_FLAGS:
if (ch == '*') {
flags |= FL_SPLAT;
} else if ('0' <= ch && ch <= '9') {
width = (ch - '0');
state = ST_WIDTH;
flags |= FL_WIDTH;
} else {
state = ST_MODIFIERS;
p--; // Process this character again
}
break;
case ST_WIDTH:
if (ch >= '0' && ch <= '9') {
width = width * 10 + (ch - '0');
} else {
state = ST_MODIFIERS;
p--; // Process this character again
}
break;
case ST_MODIFIERS:
switch (ch) {
// Length modifiers - nonterminal sequences
case 'h':
rank--; // Shorter rank
break;
case 'l':
rank++; // Longer rank
break;
case 'j':
rank = kIntMaxRank;
break;
case 'z':
rank = kSizeTRank;
break;
case 't':
rank = kPtrDiffRank;
break;
case 'L':
case 'q':
rank = RANK_LONGLONG; // long double/long long
break;
default:
// Output modifiers - terminal sequences
state = ST_NORMAL; // Next state will be normal
if (rank < kMinRank) { // Canonicalize rank
rank = kMinRank;
} else if (rank > kMaxRank) {
rank = kMaxRank;
}
switch (ch) {
case 'P': // Upper case pointer
case 'p': // Pointer
rank = RANK_PTR;
base = 0;
goto scan_int;
case 'i': // Base-independent integer
base = 0;
goto scan_int;
case 'd': // Decimal integer
base = 10;
goto scan_int;
case 'o': // Octal integer
base = 8;
goto scan_int;
case 'u': // Unsigned decimal integer
base = 10;
goto scan_int;
case 'x': // Hexadecimal integer
case 'X':
base = 16;
goto scan_int;
case 'n': // Number of characters consumed
val = std::ftell(stream) - start_off;
goto set_integer;
scan_int:
q = SkipSpace(stream);
if (q <= 0) {
bail = BAIL_EOF;
break;
}
val = streamtoumax(stream, base);
// fall through
set_integer:
if (!(flags & FL_SPLAT)) {
converted++;
switch (rank) {
case RANK_CHAR:
*va_arg(ap, unsigned char *) = static_cast<unsigned char>(val);
break;
case RANK_SHORT:
*va_arg(ap, unsigned short *) = static_cast<unsigned short>(val);
break;
case RANK_INT:
*va_arg(ap, unsigned int *) = static_cast<unsigned int>(val);
break;
case RANK_LONG:
*va_arg(ap, unsigned long *) = static_cast<unsigned long>(val);
break;
case RANK_LONGLONG:
*va_arg(ap, unsigned long long *) = static_cast<unsigned long long>(val);
break;
case RANK_PTR:
*va_arg(ap, void **) = reinterpret_cast<void *>(static_cast<uintptr_t>(val));
break;
}
}
break;
case 'f': // Preliminary float value parsing
case 'g':
case 'G':
case 'e':
case 'E':
q = SkipSpace(stream);
if (q <= 0) {
bail = BAIL_EOF;
break;
}
{
double fval = streamtofloat(stream);
if (!(flags & FL_SPLAT)) {
if (rank == RANK_INT) {
*va_arg(ap, float *) = static_cast<float>(fval);
} else if (rank == RANK_LONG) {
*va_arg(ap, double *) = static_cast<double>(fval);
}
converted++;
}
}
break;
case 'c': // Character
width = (flags & FL_WIDTH) ? width : 1; // Default width == 1
sarg = va_arg(ap, char *);
while (width--) {
if ((q = fgetc(stream)) <= 0) {
bail = BAIL_EOF;
break;
}
if (!(flags & FL_SPLAT)) {
*sarg++ = q;
converted++;
}
}
break;
case 's': // String
{
if (!(flags & FL_SPLAT)) {
sarg = va_arg(ap, char *);
}
unsigned length = 0;
while (width--) {
q = fgetc(stream);
if ((isascii(q) && isspace(q)) || (q <= 0)) {
ungetc(q, stream);
break;
}
if (!(flags & FL_SPLAT)) {
sarg[length] = q;
}
length++;
}
if (length == 0) {
bail = BAIL_EOF;
} else if (!(flags & FL_SPLAT)) {
sarg[length] = '\0'; // Terminate output
converted++;
}
} break;
case '[': // Character range
sarg = va_arg(ap, char *);
state = ST_MATCH_INIT;
matchinv = 0;
memset(matchmap, 0, sizeof matchmap);
break;
case '%': // %% sequence
if (fgetc(stream) != '%') {
bail = BAIL_ERR;
}
break;
default: // Anything else
bail = BAIL_ERR; // Unknown sequence
break;
}
}
break;
case ST_MATCH_INIT: // Initial state for %[ match
if (ch == '^' && !(flags & FL_INV)) {
matchinv = 1;
} else {
SetBit(matchmap, static_cast<unsigned char>(ch));
state = ST_MATCH;
}
break;
case ST_MATCH: // Main state for %[ match
if (ch == ']') {
goto match_run;
} else if (ch == '-') {
range_start = static_cast<unsigned char>(ch);
state = ST_MATCH_RANGE;
} else {
SetBit(matchmap, static_cast<unsigned char>(ch));
}
break;
case ST_MATCH_RANGE: // %[ match after -
if (ch == ']') {
SetBit(matchmap, static_cast<unsigned char>('-'));
goto match_run;
} else {
int i;
for (i = range_start; i < (static_cast<unsigned char>(ch)); i++) {
SetBit(matchmap, i);
}
state = ST_MATCH;
}
break;
match_run: // Match expression finished
char *oarg = sarg;
while (width) {
q = fgetc(stream);
auto qc = static_cast<unsigned char>(q);
if (q <= 0 || !(TestBit(matchmap, qc) ^ matchinv)) {
ungetc(q, stream);
break;
}
if (!(flags & FL_SPLAT)) {
*sarg = q;
}
sarg++;
}
if (oarg == sarg) {
bail = (q <= 0) ? BAIL_EOF : BAIL_ERR;
} else if (!(flags & FL_SPLAT)) {
*sarg = '\0';
converted++;
}
break;
}
}
if (bail == BAIL_EOF && !converted) {
converted = -1; // Return EOF (-1)
}
return converted;
}
|
2301_81045437/tesseract
|
src/ccutil/scanutils.cpp
|
C++
|
apache-2.0
| 14,201
|
// Copyright 2006 Google Inc.
// All Rights Reserved.
// Author: renn
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef TESSERACT_CCUTIL_SCANUTILS_H_
#define TESSERACT_CCUTIL_SCANUTILS_H_
#include <tesseract/export.h>
#include <cstdio> // for FILE
/**
* fscanf variant to ensure correct reading regardless of locale.
*
* tfscanf parse a file stream according to the given format. See the fscanf
* manpage for more information, as this function attempts to mimic its
* behavior.
*
* @note Note that scientific floating-point notation is not supported.
*
*/
TESS_API
int tfscanf(FILE *stream, const char *format, ...);
#endif // TESSERACT_CCUTIL_SCANUTILS_H_
|
2301_81045437/tesseract
|
src/ccutil/scanutils.h
|
C
|
apache-2.0
| 1,177
|
/**********************************************************************
* File: serialis.cpp (Formerly serialmac.h)
* Description: Inline routines and macros for serialisation functions
* Author: Phil Cheatle
*
* (C) Copyright 1990, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#include "serialis.h"
#include "errcode.h"
#include "helpers.h" // for ReverseN
#include <climits> // for INT_MAX
#include <cstdio>
namespace tesseract {
// The default FileReader loads the whole file into the vector of char,
// returning false on error.
bool LoadDataFromFile(const char *filename, std::vector<char> *data) {
bool result = false;
FILE *fp = fopen(filename, "rb");
if (fp != nullptr) {
fseek(fp, 0, SEEK_END);
auto size = std::ftell(fp);
fseek(fp, 0, SEEK_SET);
// Trying to open a directory on Linux sets size to LONG_MAX. Catch it here.
if (size > 0 && size < LONG_MAX) {
// reserve an extra byte in case caller wants to append a '\0' character
data->reserve(size + 1);
data->resize(size); // TODO: optimize no init
result = static_cast<long>(fread(&(*data)[0], 1, size, fp)) == size;
}
fclose(fp);
}
return result;
}
// The default FileWriter writes the vector of char to the filename file,
// returning false on error.
bool SaveDataToFile(const std::vector<char> &data, const char *filename) {
FILE *fp = fopen(filename, "wb");
if (fp == nullptr) {
return false;
}
bool result = fwrite(&data[0], 1, data.size(), fp) == data.size();
fclose(fp);
return result;
}
TFile::TFile() {
}
TFile::~TFile() {
if (data_is_owned_) {
delete data_;
}
}
bool TFile::DeSerializeSize(int32_t *pSize) {
uint32_t size;
if (FReadEndian(&size, sizeof(size), 1) != 1) {
return false;
}
if (size > data_->size() / 4) {
// Reverse endianness.
swap_ = !swap_;
ReverseN(&size, 4);
}
*pSize = size;
return true;
}
bool TFile::DeSerializeSkip(size_t size) {
uint32_t len;
if (!DeSerialize(&len)) {
return false;
}
return Skip(len * size);
}
bool TFile::DeSerialize(std::string &data) {
uint32_t size;
if (!DeSerialize(&size)) {
return false;
} else if (size > 0) {
// TODO: optimize.
data.resize(size);
return DeSerialize(&data[0], size);
}
data.clear();
return true;
}
bool TFile::Serialize(const std::string &data) {
uint32_t size = data.size();
return Serialize(&size) && Serialize(data.c_str(), size);
}
bool TFile::DeSerialize(std::vector<char> &data) {
uint32_t size;
if (!DeSerialize(&size)) {
return false;
} else if (size > 0) {
// TODO: optimize.
data.resize(size);
return DeSerialize(&data[0], data.size());
}
data.clear();
return true;
}
bool TFile::Serialize(const std::vector<char> &data) {
uint32_t size = data.size();
if (!Serialize(&size)) {
return false;
} else if (size > 0) {
return Serialize(&data[0], size);
}
return true;
}
bool TFile::Skip(size_t count) {
offset_ += count;
return true;
}
bool TFile::Open(const char *filename, FileReader reader) {
if (!data_is_owned_) {
data_ = new std::vector<char>;
data_is_owned_ = true;
}
offset_ = 0;
is_writing_ = false;
swap_ = false;
if (reader == nullptr) {
return LoadDataFromFile(filename, data_);
} else {
return (*reader)(filename, data_);
}
}
bool TFile::Open(const char *data, size_t size) {
offset_ = 0;
if (!data_is_owned_) {
data_ = new std::vector<char>;
data_is_owned_ = true;
}
is_writing_ = false;
swap_ = false;
data_->resize(size); // TODO: optimize no init
memcpy(&(*data_)[0], data, size);
return true;
}
bool TFile::Open(FILE *fp, int64_t end_offset) {
offset_ = 0;
auto current_pos = std::ftell(fp);
if (current_pos < 0) {
// ftell failed.
return false;
}
if (end_offset < 0) {
if (fseek(fp, 0, SEEK_END)) {
return false;
}
end_offset = ftell(fp);
if (fseek(fp, current_pos, SEEK_SET)) {
return false;
}
}
size_t size = end_offset - current_pos;
is_writing_ = false;
swap_ = false;
if (!data_is_owned_) {
data_ = new std::vector<char>;
data_is_owned_ = true;
}
data_->resize(size); // TODO: optimize no init
return fread(&(*data_)[0], 1, size, fp) == size;
}
char *TFile::FGets(char *buffer, int buffer_size) {
ASSERT_HOST(!is_writing_);
int size = 0;
while (size + 1 < buffer_size && offset_ < data_->size()) {
buffer[size++] = (*data_)[offset_++];
if ((*data_)[offset_ - 1] == '\n') {
break;
}
}
if (size < buffer_size) {
buffer[size] = '\0';
}
return size > 0 ? buffer : nullptr;
}
size_t TFile::FReadEndian(void *buffer, size_t size, size_t count) {
auto num_read = FRead(buffer, size, count);
if (swap_ && size != 1) {
char *char_buffer = static_cast<char *>(buffer);
for (size_t i = 0; i < num_read; ++i, char_buffer += size) {
ReverseN(char_buffer, size);
}
}
return num_read;
}
size_t TFile::FRead(void *buffer, size_t size, size_t count) {
ASSERT_HOST(!is_writing_);
ASSERT_HOST(size > 0);
size_t required_size;
if (SIZE_MAX / size <= count) {
// Avoid integer overflow.
required_size = data_->size() - offset_;
} else {
required_size = size * count;
if (data_->size() - offset_ < required_size) {
required_size = data_->size() - offset_;
}
}
if (required_size > 0 && buffer != nullptr) {
memcpy(buffer, &(*data_)[offset_], required_size);
}
offset_ += required_size;
return required_size / size;
}
void TFile::Rewind() {
ASSERT_HOST(!is_writing_);
offset_ = 0;
}
void TFile::OpenWrite(std::vector<char> *data) {
offset_ = 0;
if (data != nullptr) {
if (data_is_owned_) {
delete data_;
}
data_ = data;
data_is_owned_ = false;
} else if (!data_is_owned_) {
data_ = new std::vector<char>;
data_is_owned_ = true;
}
is_writing_ = true;
swap_ = false;
data_->clear();
}
bool TFile::CloseWrite(const char *filename, FileWriter writer) {
ASSERT_HOST(is_writing_);
if (writer == nullptr) {
return SaveDataToFile(*data_, filename);
} else {
return (*writer)(*data_, filename);
}
}
size_t TFile::FWrite(const void *buffer, size_t size, size_t count) {
ASSERT_HOST(is_writing_);
ASSERT_HOST(size > 0);
ASSERT_HOST(SIZE_MAX / size > count);
size_t total = size * count;
const char *buf = static_cast<const char *>(buffer);
// This isn't very efficient, but memory is so fast compared to disk
// that it is relatively unimportant, and very simple.
for (size_t i = 0; i < total; ++i) {
data_->push_back(buf[i]);
}
return count;
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/ccutil/serialis.cpp
|
C++
|
apache-2.0
| 7,297
|
/**********************************************************************
* File: serialis.h (Formerly serialmac.h)
* Description: Inline routines and macros for serialisation functions
* Author: Phil Cheatle
*
* (C) Copyright 1990, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef SERIALIS_H
#define SERIALIS_H
#include <tesseract/baseapi.h> // FileReader
#include <cstdint> // uint8_t
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <type_traits>
#include <vector> // std::vector
namespace tesseract {
// Return number of elements of an array.
template <typename T, size_t N>
constexpr size_t countof(T const (&)[N]) noexcept {
return N;
}
// Function to write a std::vector<char> to a whole file.
// Returns false on failure.
using FileWriter = bool (*)(const std::vector<char> &data, const char *filename);
TESS_API
bool LoadDataFromFile(const char *filename, std::vector<char> *data);
TESS_API
bool SaveDataToFile(const std::vector<char> &data, const char *filename);
// Deserialize data from file.
template <typename T>
bool DeSerialize(FILE *fp, T *data, size_t n = 1) {
return fread(data, sizeof(T), n, fp) == n;
}
// Serialize data to file.
template <typename T>
bool Serialize(FILE *fp, const T *data, size_t n = 1) {
return fwrite(data, sizeof(T), n, fp) == n;
}
// Simple file class.
// Allows for portable file input from memory and from foreign file systems.
class TESS_API TFile {
public:
TFile();
~TFile();
// All the Open methods load the whole file into memory for reading.
// Opens a file with a supplied reader, or nullptr to use the default.
// Note that mixed read/write is not supported.
bool Open(const char *filename, FileReader reader);
// From an existing memory buffer.
bool Open(const char *data, size_t size);
// From an open file and an end offset.
bool Open(FILE *fp, int64_t end_offset);
// Sets the value of the swap flag, so that FReadEndian does the right thing.
void set_swap(bool value) {
swap_ = value;
}
// Deserialize data.
bool DeSerializeSize(int32_t *data);
bool DeSerializeSkip(size_t size = 1);
bool DeSerialize(std::string &data);
bool DeSerialize(std::vector<char> &data);
//bool DeSerialize(std::vector<std::string> &data);
template <typename T>
bool DeSerialize(T *data, size_t count = 1) {
return FReadEndian(data, sizeof(T), count) == count;
}
template <typename T>
bool DeSerialize(std::vector<T> &data) {
uint32_t size;
if (!DeSerialize(&size)) {
return false;
} else if (size == 0) {
data.clear();
} else if (size > 50000000) {
// Arbitrarily limit the number of elements to protect against bad data.
return false;
} else if constexpr (std::is_same<T, std::string>::value) {
// Deserialize a string.
// TODO: optimize.
data.resize(size);
for (auto &item : data) {
if (!DeSerialize(item)) {
return false;
}
}
} else if constexpr (std::is_class<T>::value) {
// Deserialize a tesseract class.
// TODO: optimize.
data.resize(size);
for (auto &item : data) {
if (!item.DeSerialize(this)) {
return false;
}
}
} else if constexpr (std::is_pointer<T>::value) {
// Deserialize pointers.
// TODO: optimize.
data.resize(size);
for (uint32_t i = 0; i < size; i++) {
uint8_t non_null;
if (!DeSerialize(&non_null)) {
return false;
}
if (non_null) {
typedef typename std::remove_pointer<T>::type ST;
auto item = new ST;
if (!item->DeSerialize(this)) {
delete item;
return false;
}
data[i] = item;
}
}
} else {
// Deserialize a non-class.
// TODO: optimize.
data.resize(size);
return DeSerialize(&data[0], size);
}
return true;
}
// Serialize data.
bool Serialize(const std::string &data);
bool Serialize(const std::vector<char> &data);
template <typename T>
bool Serialize(const T *data, size_t count = 1) {
return FWrite(data, sizeof(T), count) == count;
}
template <typename T>
bool Serialize(const std::vector<T> &data) {
// Serialize number of elements first.
uint32_t size = data.size();
if (!Serialize(&size)) {
return false;
} else if constexpr (std::is_same<T, std::string>::value) {
// Serialize strings.
for (auto &&string : data) {
if (!Serialize(string)) {
return false;
}
}
} else if constexpr (std::is_class<T>::value) {
// Serialize a tesseract class.
for (auto &item : data) {
if (!item.Serialize(this)) {
return false;
}
}
} else if constexpr (std::is_pointer<T>::value) {
// Serialize pointers.
for (auto &item : data) {
uint8_t non_null = (item != nullptr);
if (!Serialize(&non_null)) {
return false;
}
if (non_null) {
if (!item->Serialize(this)) {
return false;
}
}
}
} else if (size > 0) {
// Serialize a non-class.
return Serialize(&data[0], size);
}
return true;
}
// Skip data.
bool Skip(size_t count);
// Reads a line like fgets. Returns nullptr on EOF, otherwise buffer.
// Reads at most buffer_size bytes, including '\0' terminator, even if
// the line is longer. Does nothing if buffer_size <= 0.
char *FGets(char *buffer, int buffer_size);
// Replicates fread, followed by a swap of the bytes if needed, returning the
// number of items read. If swap_ is true then the count items will each have
// size bytes reversed.
size_t FReadEndian(void *buffer, size_t size, size_t count);
// Replicates fread, returning the number of items read.
size_t FRead(void *buffer, size_t size, size_t count);
// Resets the TFile as if it has been Opened, but nothing read.
// Only allowed while reading!
void Rewind();
// Open for writing. Either supply a non-nullptr data with OpenWrite before
// calling FWrite, (no close required), or supply a nullptr data to OpenWrite
// and call CloseWrite to write to a file after the FWrites.
void OpenWrite(std::vector<char> *data);
bool CloseWrite(const char *filename, FileWriter writer);
// Replicates fwrite, returning the number of items written.
// To use fprintf, use snprintf and FWrite.
size_t FWrite(const void *buffer, size_t size, size_t count);
private:
// The buffered data from the file.
std::vector<char> *data_ = nullptr;
// The number of bytes used so far.
unsigned offset_ = 0;
// True if the data_ pointer is owned by *this.
bool data_is_owned_ = false;
// True if the TFile is open for writing.
bool is_writing_ = false;
// True if bytes need to be swapped in FReadEndian.
bool swap_ = false;
};
} // namespace tesseract.
#endif
|
2301_81045437/tesseract
|
src/ccutil/serialis.h
|
C++
|
apache-2.0
| 7,525
|
///////////////////////////////////////////////////////////////////////
// File: sorthelper.h
// Description: Generic sort and maxfinding class.
// Author: Ray Smith
//
// (C) Copyright 2010, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_SORTHELPER_H_
#define TESSERACT_CCUTIL_SORTHELPER_H_
#include <cstdlib>
#include <vector>
namespace tesseract {
// Generic class to provide functions based on a <value,count> pair.
// T is the value type.
// The class keeps a count of each value and can return the most frequent
// value or a sorted array of the values with counts.
// Note that this class uses linear search for adding. It is better
// to use the STATS class to get the mode of a large number of values
// in a small space. SortHelper is better to get the mode of a small number
// of values from a large space.
// T must have a copy constructor.
template <typename T>
class SortHelper {
public:
// Simple pair class to hold the values and counts.
template <typename PairT>
struct SortPair {
PairT value;
int count;
};
// qsort function to sort by decreasing count.
static int SortPairsByCount(const void *v1, const void *v2) {
const auto *p1 = static_cast<const SortPair<T> *>(v1);
const auto *p2 = static_cast<const SortPair<T> *>(v2);
return p2->count - p1->count;
}
// qsort function to sort by decreasing value.
static int SortPairsByValue(const void *v1, const void *v2) {
const auto *p1 = static_cast<const SortPair<T> *>(v1);
const auto *p2 = static_cast<const SortPair<T> *>(v2);
if (p2->value - p1->value < 0) {
return -1;
}
if (p2->value - p1->value > 0) {
return 1;
}
return 0;
}
// Constructor takes a hint of the array size, but it need not be accurate.
explicit SortHelper(int sizehint) {
counts_.reserve(sizehint);
}
// Add a value that may be a duplicate of an existing value.
// Uses a linear search.
void Add(T value, int count) {
// Linear search for value.
for (auto &it : counts_) {
if (it.value == value) {
it.count += count;
return;
}
}
SortPair<T> new_pair = {value, count};
counts_.push_back(SortPair<T>(new_pair));
}
// Returns the frequency of the most frequent value.
// If max_value is not nullptr, returns the most frequent value.
// If the array is empty, returns -INT32_MAX and max_value is unchanged.
int MaxCount(T *max_value) const {
int best_count = -INT32_MAX;
for (auto &it : counts_) {
if (it.count > best_count) {
best_count = it.count;
if (max_value != nullptr) {
*max_value = it.value;
}
}
}
return best_count;
}
// Returns the data array sorted by decreasing frequency.
const std::vector<SortPair<T>> &SortByCount() {
counts_.sort(&SortPairsByCount);
return counts_;
}
// Returns the data array sorted by decreasing value.
const std::vector<SortPair<T>> &SortByValue() {
counts_.sort(&SortPairsByValue);
return counts_;
}
private:
std::vector<SortPair<T>> counts_;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_SORTHELPER_H_.
|
2301_81045437/tesseract
|
src/ccutil/sorthelper.h
|
C++
|
apache-2.0
| 3,767
|
///////////////////////////////////////////////////////////////////////
// File: tessdatamanager.cpp
// Description: Functions to handle loading/combining tesseract data files.
// Author: Daria Antonova
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "tessdatamanager.h"
#include <cstdio>
#include <string>
#if defined(HAVE_LIBARCHIVE)
# include <archive.h>
# include <archive_entry.h>
#endif
#include <tesseract/version.h>
#include "errcode.h"
#include "helpers.h"
#include "params.h"
#include "serialis.h"
#include "tprintf.h"
namespace tesseract {
TessdataManager::TessdataManager() : reader_(nullptr), is_loaded_(false), swap_(false) {
SetVersionString(TESSERACT_VERSION_STR);
}
TessdataManager::TessdataManager(FileReader reader)
: reader_(reader), is_loaded_(false), swap_(false) {
SetVersionString(TESSERACT_VERSION_STR);
}
// Lazily loads from the given filename. Won't actually read the file
// until it needs it.
void TessdataManager::LoadFileLater(const char *data_file_name) {
Clear();
data_file_name_ = data_file_name;
}
#if defined(HAVE_LIBARCHIVE)
bool TessdataManager::LoadArchiveFile(const char *filename) {
bool result = false;
archive *a = archive_read_new();
if (a != nullptr) {
archive_read_support_filter_all(a);
archive_read_support_format_all(a);
if (archive_read_open_filename(a, filename, 8192) == ARCHIVE_OK) {
archive_entry *ae;
while (archive_read_next_header(a, &ae) == ARCHIVE_OK) {
const char *component = archive_entry_pathname(ae);
if (component != nullptr) {
TessdataType type;
if (TessdataTypeFromFileName(component, &type)) {
int64_t size = archive_entry_size(ae);
if (size > 0) {
entries_[type].resize(size);
if (archive_read_data(a, &entries_[type][0], size) == size) {
is_loaded_ = true;
}
}
}
}
}
result = is_loaded_;
}
archive_read_free(a);
}
return result;
}
#endif
bool TessdataManager::Init(const char *data_file_name) {
std::vector<char> data;
if (reader_ == nullptr) {
#if defined(HAVE_LIBARCHIVE)
if (LoadArchiveFile(data_file_name)) {
return true;
}
#endif
if (!LoadDataFromFile(data_file_name, &data)) {
return false;
}
} else {
if (!(*reader_)(data_file_name, &data)) {
return false;
}
}
return LoadMemBuffer(data_file_name, &data[0], data.size());
}
// Loads from the given memory buffer as if a file.
bool TessdataManager::LoadMemBuffer(const char *name, const char *data, int size) {
// TODO: This method supports only the proprietary file format.
Clear();
data_file_name_ = name;
TFile fp;
fp.Open(data, size);
uint32_t num_entries;
if (!fp.DeSerialize(&num_entries)) {
return false;
}
swap_ = num_entries > kMaxNumTessdataEntries;
fp.set_swap(swap_);
if (swap_) {
ReverseN(&num_entries, sizeof(num_entries));
}
if (num_entries > kMaxNumTessdataEntries) {
return false;
}
// TODO: optimize (no init required).
std::vector<int64_t> offset_table(num_entries);
if (!fp.DeSerialize(&offset_table[0], num_entries)) {
return false;
}
for (unsigned i = 0; i < num_entries && i < TESSDATA_NUM_ENTRIES; ++i) {
if (offset_table[i] >= 0) {
int64_t entry_size = size - offset_table[i];
unsigned j = i + 1;
while (j < num_entries && offset_table[j] == -1) {
++j;
}
if (j < num_entries) {
entry_size = offset_table[j] - offset_table[i];
}
entries_[i].resize(entry_size);
if (!fp.DeSerialize(&entries_[i][0], entry_size)) {
return false;
}
}
}
if (entries_[TESSDATA_VERSION].empty()) {
SetVersionString("Pre-4.0.0");
}
is_loaded_ = true;
return true;
}
// Overwrites a single entry of the given type.
void TessdataManager::OverwriteEntry(TessdataType type, const char *data, int size) {
is_loaded_ = true;
entries_[type].resize(size);
memcpy(&entries_[type][0], data, size);
}
// Saves to the given filename.
bool TessdataManager::SaveFile(const char *filename, FileWriter writer) const {
// TODO: This method supports only the proprietary file format.
ASSERT_HOST(is_loaded_);
std::vector<char> data;
Serialize(&data);
if (writer == nullptr) {
return SaveDataToFile(data, filename);
} else {
return (*writer)(data, filename);
}
}
// Serializes to the given vector.
void TessdataManager::Serialize(std::vector<char> *data) const {
// TODO: This method supports only the proprietary file format.
ASSERT_HOST(is_loaded_);
// Compute the offset_table and total size.
int64_t offset_table[TESSDATA_NUM_ENTRIES];
int64_t offset = sizeof(int32_t) + sizeof(offset_table);
for (unsigned i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
if (entries_[i].empty()) {
offset_table[i] = -1;
} else {
offset_table[i] = offset;
offset += entries_[i].size();
}
}
data->resize(offset, 0);
int32_t num_entries = TESSDATA_NUM_ENTRIES;
TFile fp;
fp.OpenWrite(data);
fp.Serialize(&num_entries);
fp.Serialize(&offset_table[0], countof(offset_table));
for (const auto &entry : entries_) {
if (!entry.empty()) {
fp.Serialize(&entry[0], entry.size());
}
}
}
// Resets to the initial state, keeping the reader.
void TessdataManager::Clear() {
for (auto &entry : entries_) {
entry.clear();
}
is_loaded_ = false;
}
// Prints a directory of contents.
void TessdataManager::Directory() const {
printf("Version:%s\n", VersionString().c_str());
auto offset = TESSDATA_NUM_ENTRIES * sizeof(int64_t);
for (unsigned i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
if (!entries_[i].empty()) {
printf("%u:%s:size=%zu, offset=%zu\n", i, kTessdataFileSuffixes[i], entries_[i].size(),
offset);
offset += entries_[i].size();
}
}
}
// Opens the given TFile pointer to the given component type.
// Returns false in case of failure.
bool TessdataManager::GetComponent(TessdataType type, TFile *fp) {
if (!is_loaded_ && !Init(data_file_name_.c_str())) {
return false;
}
const TessdataManager *const_this = this;
return const_this->GetComponent(type, fp);
}
// As non-const version except it can't load the component if not already
// loaded.
bool TessdataManager::GetComponent(TessdataType type, TFile *fp) const {
ASSERT_HOST(is_loaded_);
if (entries_[type].empty()) {
return false;
}
fp->Open(&entries_[type][0], entries_[type].size());
fp->set_swap(swap_);
return true;
}
// Returns the current version string.
std::string TessdataManager::VersionString() const {
return std::string(&entries_[TESSDATA_VERSION][0], entries_[TESSDATA_VERSION].size());
}
// Sets the version string to the given v_str.
void TessdataManager::SetVersionString(const std::string &v_str) {
entries_[TESSDATA_VERSION].resize(v_str.size());
memcpy(&entries_[TESSDATA_VERSION][0], v_str.data(), v_str.size());
}
bool TessdataManager::CombineDataFiles(const char *language_data_path_prefix,
const char *output_filename) {
// Load individual tessdata components from files.
for (auto filesuffix : kTessdataFileSuffixes) {
TessdataType type;
ASSERT_HOST(TessdataTypeFromFileSuffix(filesuffix, &type));
std::string filename = language_data_path_prefix;
filename += filesuffix;
FILE *fp = fopen(filename.c_str(), "rb");
if (fp != nullptr) {
fclose(fp);
if (!LoadDataFromFile(filename.c_str(), &entries_[type])) {
tprintf("Load of file %s failed!\n", filename.c_str());
return false;
}
}
}
is_loaded_ = true;
// Make sure that the required components are present.
if (!IsBaseAvailable() && !IsLSTMAvailable()) {
tprintf(
"Error: traineddata file must contain at least (a unicharset file"
" and inttemp) OR an lstm file.\n");
return false;
}
// Write updated data to the output traineddata file.
return SaveFile(output_filename, nullptr);
}
bool TessdataManager::OverwriteComponents(const char *new_traineddata_filename,
char **component_filenames, int num_new_components) {
// Open the files with the new components.
// TODO: This method supports only the proprietary file format.
for (int i = 0; i < num_new_components; ++i) {
TessdataType type;
if (TessdataTypeFromFileName(component_filenames[i], &type)) {
if (!LoadDataFromFile(component_filenames[i], &entries_[type])) {
tprintf("Failed to read component file:%s\n", component_filenames[i]);
return false;
}
}
}
// Write updated data to the output traineddata file.
return SaveFile(new_traineddata_filename, nullptr);
}
bool TessdataManager::ExtractToFile(const char *filename) {
TessdataType type = TESSDATA_NUM_ENTRIES;
ASSERT_HOST(tesseract::TessdataManager::TessdataTypeFromFileName(filename, &type));
if (entries_[type].empty()) {
return false;
}
return SaveDataToFile(entries_[type], filename);
}
bool TessdataManager::TessdataTypeFromFileSuffix(const char *suffix, TessdataType *type) {
for (unsigned i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
if (strcmp(kTessdataFileSuffixes[i], suffix) == 0) {
*type = static_cast<TessdataType>(i);
return true;
}
}
#if !defined(NDEBUG)
tprintf(
"TessdataManager can't determine which tessdata"
" component is represented by %s\n",
suffix);
#endif
return false;
}
bool TessdataManager::TessdataTypeFromFileName(const char *filename, TessdataType *type) {
// Get the file suffix (extension)
const char *suffix = strrchr(filename, '.');
if (suffix == nullptr || *(++suffix) == '\0') {
return false;
}
return TessdataTypeFromFileSuffix(suffix, type);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/tessdatamanager.cpp
|
C++
|
apache-2.0
| 10,554
|
///////////////////////////////////////////////////////////////////////
// File: tessdatamanager.h
// Description: Functions to handle loading/combining tesseract data files.
// Author: Daria Antonova
//
// (C) Copyright 2009, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_TESSDATAMANAGER_H_
#define TESSERACT_CCUTIL_TESSDATAMANAGER_H_
#include <tesseract/baseapi.h> // FileReader
#include <string> // std::string
#include <vector> // std::vector
#include "serialis.h" // FileWriter
static const char kTrainedDataSuffix[] = "traineddata";
// When adding new tessdata types and file suffixes, please make sure to
// update TessdataType enum, kTessdataFileSuffixes and kTessdataFileIsText.
static const char kLangConfigFileSuffix[] = "config";
static const char kUnicharsetFileSuffix[] = "unicharset";
static const char kAmbigsFileSuffix[] = "unicharambigs";
static const char kBuiltInTemplatesFileSuffix[] = "inttemp";
static const char kBuiltInCutoffsFileSuffix[] = "pffmtable";
static const char kNormProtoFileSuffix[] = "normproto";
static const char kPuncDawgFileSuffix[] = "punc-dawg";
static const char kSystemDawgFileSuffix[] = "word-dawg";
static const char kNumberDawgFileSuffix[] = "number-dawg";
static const char kFreqDawgFileSuffix[] = "freq-dawg";
static const char kFixedLengthDawgsFileSuffix[] = "fixed-length-dawgs";
static const char kCubeUnicharsetFileSuffix[] = "cube-unicharset";
static const char kCubeSystemDawgFileSuffix[] = "cube-word-dawg";
static const char kShapeTableFileSuffix[] = "shapetable";
static const char kBigramDawgFileSuffix[] = "bigram-dawg";
static const char kUnambigDawgFileSuffix[] = "unambig-dawg";
static const char kParamsModelFileSuffix[] = "params-model";
static const char kLSTMModelFileSuffix[] = "lstm";
static const char kLSTMPuncDawgFileSuffix[] = "lstm-punc-dawg";
static const char kLSTMSystemDawgFileSuffix[] = "lstm-word-dawg";
static const char kLSTMNumberDawgFileSuffix[] = "lstm-number-dawg";
static const char kLSTMUnicharsetFileSuffix[] = "lstm-unicharset";
static const char kLSTMRecoderFileSuffix[] = "lstm-recoder";
static const char kVersionFileSuffix[] = "version";
namespace tesseract {
enum TessdataType {
TESSDATA_LANG_CONFIG, // 0
TESSDATA_UNICHARSET, // 1
TESSDATA_AMBIGS, // 2
TESSDATA_INTTEMP, // 3
TESSDATA_PFFMTABLE, // 4
TESSDATA_NORMPROTO, // 5
TESSDATA_PUNC_DAWG, // 6
TESSDATA_SYSTEM_DAWG, // 7
TESSDATA_NUMBER_DAWG, // 8
TESSDATA_FREQ_DAWG, // 9
TESSDATA_FIXED_LENGTH_DAWGS, // 10 // deprecated
TESSDATA_CUBE_UNICHARSET, // 11 // deprecated
TESSDATA_CUBE_SYSTEM_DAWG, // 12 // deprecated
TESSDATA_SHAPE_TABLE, // 13
TESSDATA_BIGRAM_DAWG, // 14
TESSDATA_UNAMBIG_DAWG, // 15
TESSDATA_PARAMS_MODEL, // 16
TESSDATA_LSTM, // 17
TESSDATA_LSTM_PUNC_DAWG, // 18
TESSDATA_LSTM_SYSTEM_DAWG, // 19
TESSDATA_LSTM_NUMBER_DAWG, // 20
TESSDATA_LSTM_UNICHARSET, // 21
TESSDATA_LSTM_RECODER, // 22
TESSDATA_VERSION, // 23
TESSDATA_NUM_ENTRIES
};
/**
* kTessdataFileSuffixes[i] indicates the file suffix for
* tessdata of type i (from TessdataType enum).
*/
static const char *const kTessdataFileSuffixes[] = {
kLangConfigFileSuffix, // 0
kUnicharsetFileSuffix, // 1
kAmbigsFileSuffix, // 2
kBuiltInTemplatesFileSuffix, // 3
kBuiltInCutoffsFileSuffix, // 4
kNormProtoFileSuffix, // 5
kPuncDawgFileSuffix, // 6
kSystemDawgFileSuffix, // 7
kNumberDawgFileSuffix, // 8
kFreqDawgFileSuffix, // 9
kFixedLengthDawgsFileSuffix, // 10 // deprecated
kCubeUnicharsetFileSuffix, // 11 // deprecated
kCubeSystemDawgFileSuffix, // 12 // deprecated
kShapeTableFileSuffix, // 13
kBigramDawgFileSuffix, // 14
kUnambigDawgFileSuffix, // 15
kParamsModelFileSuffix, // 16
kLSTMModelFileSuffix, // 17
kLSTMPuncDawgFileSuffix, // 18
kLSTMSystemDawgFileSuffix, // 19
kLSTMNumberDawgFileSuffix, // 20
kLSTMUnicharsetFileSuffix, // 21
kLSTMRecoderFileSuffix, // 22
kVersionFileSuffix, // 23
};
/**
* TessdataType could be updated to contain more entries, however
* we do not expect that number to be astronomically high.
* In order to automatically detect endianness TessdataManager will
* flip the bits if actual_tessdata_num_entries_ is larger than
* kMaxNumTessdataEntries.
*/
static const int kMaxNumTessdataEntries = 1000;
class TESS_API TessdataManager {
public:
TessdataManager();
explicit TessdataManager(FileReader reader);
~TessdataManager() = default;
bool swap() const {
return swap_;
}
bool is_loaded() const {
return is_loaded_;
}
// Lazily loads from the given filename. Won't actually read the file
// until it needs it.
void LoadFileLater(const char *data_file_name);
/**
* Opens and reads the given data file right now.
* @return true on success.
*/
bool Init(const char *data_file_name);
// Loads from the given memory buffer as if a file, remembering name as some
// arbitrary source id for caching.
bool LoadMemBuffer(const char *name, const char *data, int size);
// Overwrites a single entry of the given type.
void OverwriteEntry(TessdataType type, const char *data, int size);
// Saves to the given filename.
bool SaveFile(const char *filename, FileWriter writer) const;
// Serializes to the given vector.
void Serialize(std::vector<char> *data) const;
// Resets to the initial state, keeping the reader.
void Clear();
// Prints a directory of contents.
void Directory() const;
// Returns true if the component requested is present.
bool IsComponentAvailable(TessdataType type) const {
return !entries_[type].empty();
}
// Opens the given TFile pointer to the given component type.
// Returns false in case of failure.
bool GetComponent(TessdataType type, TFile *fp);
// As non-const version except it can't load the component if not already
// loaded.
bool GetComponent(TessdataType type, TFile *fp) const;
// Returns the current version string.
std::string VersionString() const;
// Sets the version string to the given v_str.
void SetVersionString(const std::string &v_str);
// Returns true if the base Tesseract components are present.
bool IsBaseAvailable() const {
return !entries_[TESSDATA_UNICHARSET].empty() && !entries_[TESSDATA_INTTEMP].empty();
}
// Returns true if the LSTM components are present.
bool IsLSTMAvailable() const {
return !entries_[TESSDATA_LSTM].empty();
}
// Return the name of the underlying data file.
const std::string &GetDataFileName() const {
return data_file_name_;
}
/**
* Reads all the standard tesseract config and data files for a language
* at the given path and bundles them up into one binary data file.
* Returns true if the combined traineddata file was successfully written.
*/
bool CombineDataFiles(const char *language_data_path_prefix, const char *output_filename);
/**
* Gets the individual components from the data_file_ with which the class was
* initialized. Overwrites the components specified by component_filenames.
* Writes the updated traineddata file to new_traineddata_filename.
*/
bool OverwriteComponents(const char *new_traineddata_filename, char **component_filenames,
int num_new_components);
/**
* Extracts tessdata component implied by the name of the input file from
* the combined traineddata loaded into TessdataManager.
* Writes the extracted component to the file indicated by the file name.
* E.g. if the filename given is somepath/somelang.unicharset, unicharset
* will be extracted from the data loaded into the TessdataManager and will
* be written to somepath/somelang.unicharset.
* @return true if the component was successfully extracted, false if the
* component was not present in the traineddata loaded into TessdataManager.
*/
bool ExtractToFile(const char *filename);
private:
// Use libarchive.
bool LoadArchiveFile(const char *filename);
/**
* Fills type with TessdataType of the tessdata component represented by the
* given file name. E.g. tessdata/eng.unicharset -> TESSDATA_UNICHARSET.
* @return true if the tessdata component type could be determined
* from the given file name.
*/
static bool TessdataTypeFromFileSuffix(const char *suffix, TessdataType *type);
/**
* Tries to determine tessdata component file suffix from filename,
* returns true on success.
*/
static bool TessdataTypeFromFileName(const char *filename, TessdataType *type);
// Name of file it came from.
std::string data_file_name_;
// Function to load the file when we need it.
FileReader reader_;
// True if the file has been loaded.
bool is_loaded_;
// True if the bytes need swapping.
bool swap_;
// Contents of each element of the traineddata file.
std::vector<char> entries_[TESSDATA_NUM_ENTRIES];
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_TESSDATAMANAGER_H_
|
2301_81045437/tesseract
|
src/ccutil/tessdatamanager.h
|
C++
|
apache-2.0
| 9,901
|
///////////////////////////////////////////////////////////////////////
// File: tesstypes.h
// Description: Simple data types used by Tesseract code.
// Author: Stefan Weil
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_TESSTYPES_H
#define TESSERACT_TESSTYPES_H
#ifdef HAVE_CONFIG_H
# include "config_auto.h" // FAST_FLOAT
#endif
#include <cstdint> // for int16_t, int32_t
namespace tesseract {
// Image dimensions (width and height, coordinates).
#if defined(LARGE_IMAGES)
using TDimension = int32_t;
#else
using TDimension = int16_t;
#endif
// Floating point data type used for LSTM calculations.
#if defined(FAST_FLOAT)
using TFloat = float;
#else
using TFloat = double;
#endif
}
#endif // TESSERACT_TESSTYPES_H
|
2301_81045437/tesseract
|
src/ccutil/tesstypes.h
|
C++
|
apache-2.0
| 1,328
|
/**********************************************************************
* File: tprintf.cpp
* Description: Trace version of printf - portable between UX and NT
* Author: Phil Cheatle
*
* (C) Copyright 1995, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "tprintf.h"
#include "params.h"
#include <climits> // for INT_MAX
#include <cstdarg>
#include <cstdio>
namespace tesseract {
#define MAX_MSG_LEN 2048
INT_VAR(log_level, INT_MAX, "Logging level");
static STRING_VAR(debug_file, "", "File to send tprintf output to");
// Trace printf
void tprintf(const char *format, ...) {
const char *debug_file_name = debug_file.c_str();
static FILE *debugfp = nullptr; // debug file
if (debug_file_name == nullptr) {
// This should not happen.
return;
}
#ifdef _WIN32
// Replace /dev/null by nul for Windows.
if (strcmp(debug_file_name, "/dev/null") == 0) {
debug_file_name = "nul";
debug_file.set_value(debug_file_name);
}
#endif
if (debugfp == nullptr && debug_file_name[0] != '\0') {
debugfp = fopen(debug_file_name, "wb");
} else if (debugfp != nullptr && debug_file_name[0] == '\0') {
fclose(debugfp);
debugfp = nullptr;
}
va_list args; // variable args
va_start(args, format); // variable list
if (debugfp != nullptr) {
vfprintf(debugfp, format, args);
} else {
vfprintf(stderr, format, args);
}
va_end(args);
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/tprintf.cpp
|
C++
|
apache-2.0
| 2,183
|
/**********************************************************************
* File: tprintf.h
* Description: Trace version of printf - portable between UX and NT
* Author: Phil Cheatle
*
* (C) Copyright 1995, Hewlett-Packard Ltd.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*
**********************************************************************/
#ifndef TESSERACT_CCUTIL_TPRINTF_H
#define TESSERACT_CCUTIL_TPRINTF_H
#include "params.h" // for BOOL_VAR_H
#include <tesseract/export.h> // for TESS_API
namespace tesseract {
#if !defined(__GNUC__) && !defined(__attribute__)
# define __attribute__(attr) // compiler without support for __attribute__
#endif
// Disable some log messages by setting log_level > 0.
extern TESS_API INT_VAR_H(log_level);
// Main logging function.
extern TESS_API void tprintf( // Trace printf
const char *format, ...) // Message
__attribute__((format(printf, 1, 2)));
} // namespace tesseract
#undef __attribute__
#endif // define TESSERACT_CCUTIL_TPRINTF_H
|
2301_81045437/tesseract
|
src/ccutil/tprintf.h
|
C++
|
apache-2.0
| 1,529
|
///////////////////////////////////////////////////////////////////////
// File: unichar.cpp
// Description: Unicode character/ligature class.
// Author: Ray Smith
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include <tesseract/unichar.h>
#include "errcode.h"
#include "tprintf.h"
#define UNI_MAX_LEGAL_UTF32 0x0010FFFF
namespace tesseract {
// Construct from a utf8 string. If len<0 then the string is null terminated.
// If the string is too long to fit in the UNICHAR then it takes only what
// will fit. Checks for illegal input and stops at an illegal sequence.
// The resulting UNICHAR may be empty.
UNICHAR::UNICHAR(const char *utf8_str, int len) {
int total_len = 0;
int step = 0;
if (len < 0) {
for (len = 0; len < UNICHAR_LEN && utf8_str[len] != 0; ++len) {
;
}
}
for (total_len = 0; total_len < len; total_len += step) {
step = utf8_step(utf8_str + total_len);
if (total_len + step > UNICHAR_LEN) {
break; // Too long.
}
if (step == 0) {
break; // Illegal first byte.
}
int i;
for (i = 1; i < step; ++i) {
if ((utf8_str[total_len + i] & 0xc0) != 0x80) {
break;
}
}
if (i < step) {
break; // Illegal surrogate
}
}
memcpy(chars, utf8_str, total_len);
if (total_len < UNICHAR_LEN) {
chars[UNICHAR_LEN - 1] = total_len;
while (total_len < UNICHAR_LEN - 1) {
chars[total_len++] = 0;
}
}
}
// Construct from a single UCS4 character. Illegal values are ignored,
// resulting in an empty UNICHAR.
UNICHAR::UNICHAR(int unicode) {
const int bytemask = 0xBF;
const int bytemark = 0x80;
if (unicode < 0x80) {
chars[UNICHAR_LEN - 1] = 1;
chars[2] = 0;
chars[1] = 0;
chars[0] = static_cast<char>(unicode);
} else if (unicode < 0x800) {
chars[UNICHAR_LEN - 1] = 2;
chars[2] = 0;
chars[1] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[0] = static_cast<char>(unicode | 0xc0);
} else if (unicode < 0x10000) {
chars[UNICHAR_LEN - 1] = 3;
chars[2] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[1] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[0] = static_cast<char>(unicode | 0xe0);
} else if (unicode <= UNI_MAX_LEGAL_UTF32) {
chars[UNICHAR_LEN - 1] = 4;
chars[3] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[2] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[1] = static_cast<char>((unicode | bytemark) & bytemask);
unicode >>= 6;
chars[0] = static_cast<char>(unicode | 0xf0);
} else {
memset(chars, 0, UNICHAR_LEN);
}
}
// Get the first character as UCS-4.
int UNICHAR::first_uni() const {
static const int utf8_offsets[5] = {0, 0, 0x3080, 0xE2080, 0x3C82080};
int uni = 0;
int len = utf8_step(chars);
const char *src = chars;
switch (len) {
default:
break;
case 4:
uni += static_cast<unsigned char>(*src++);
uni <<= 6;
// Fall through.
case 3:
uni += static_cast<unsigned char>(*src++);
uni <<= 6;
// Fall through.
case 2:
uni += static_cast<unsigned char>(*src++);
uni <<= 6;
// Fall through.
case 1:
uni += static_cast<unsigned char>(*src++);
}
uni -= utf8_offsets[len];
return uni;
}
// Get a terminated UTF8 string: Must delete[] it after use.
char *UNICHAR::utf8_str() const {
int len = utf8_len();
char *str = new char[len + 1];
memcpy(str, chars, len);
str[len] = 0;
return str;
}
// Get the number of bytes in the first character of the given utf8 string.
int UNICHAR::utf8_step(const char *utf8_str) {
static const char utf8_bytes[256] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0};
return utf8_bytes[static_cast<unsigned char>(*utf8_str)];
}
UNICHAR::const_iterator &UNICHAR::const_iterator::operator++() {
ASSERT_HOST(it_ != nullptr);
int step = utf8_step(it_);
if (step == 0) {
tprintf("ERROR: Illegal UTF8 encountered.\n");
for (int i = 0; i < 5 && it_[i] != '\0'; ++i) {
tprintf("Index %d char = 0x%x\n", i, it_[i]);
}
step = 1;
}
it_ += step;
return *this;
}
int UNICHAR::const_iterator::operator*() const {
ASSERT_HOST(it_ != nullptr);
const int len = utf8_step(it_);
if (len == 0) {
tprintf("WARNING: Illegal UTF8 encountered\n");
return ' ';
}
UNICHAR uch(it_, len);
return uch.first_uni();
}
int UNICHAR::const_iterator::get_utf8(char *utf8_output) const {
ASSERT_HOST(it_ != nullptr);
const int len = utf8_step(it_);
if (len == 0) {
tprintf("WARNING: Illegal UTF8 encountered\n");
utf8_output[0] = ' ';
return 1;
}
strncpy(utf8_output, it_, len);
return len;
}
int UNICHAR::const_iterator::utf8_len() const {
ASSERT_HOST(it_ != nullptr);
const int len = utf8_step(it_);
if (len == 0) {
tprintf("WARNING: Illegal UTF8 encountered\n");
return 1;
}
return len;
}
bool UNICHAR::const_iterator::is_legal() const {
return utf8_step(it_) > 0;
}
UNICHAR::const_iterator UNICHAR::begin(const char *utf8_str, int len) {
return UNICHAR::const_iterator(utf8_str);
}
UNICHAR::const_iterator UNICHAR::end(const char *utf8_str, int len) {
return UNICHAR::const_iterator(utf8_str + len);
}
// Converts a utf-8 string to a vector of unicodes.
// Returns an empty vector if the input contains invalid UTF-8.
/* static */
std::vector<char32> UNICHAR::UTF8ToUTF32(const char *utf8_str) {
const int utf8_length = strlen(utf8_str);
std::vector<char32> unicodes;
unicodes.reserve(utf8_length);
const_iterator end_it(end(utf8_str, utf8_length));
for (const_iterator it(begin(utf8_str, utf8_length)); it != end_it; ++it) {
if (it.is_legal()) {
unicodes.push_back(*it);
} else {
unicodes.clear();
return unicodes;
}
}
return unicodes;
}
// Returns an empty string if the input contains an invalid unicode.
std::string UNICHAR::UTF32ToUTF8(const std::vector<char32> &str32) {
std::string utf8_str;
for (char32 ch : str32) {
UNICHAR uni_ch(ch);
int step;
if (uni_ch.utf8_len() > 0 && (step = utf8_step(uni_ch.utf8())) > 0) {
utf8_str.append(uni_ch.utf8(), step);
} else {
return "";
}
}
return utf8_str;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/unichar.cpp
|
C++
|
apache-2.0
| 7,731
|
///////////////////////////////////////////////////////////////////////
// File: unicharcompress.cpp
// Description: Unicode re-encoding using a sequence of smaller numbers in
// place of a single large code for CJK, similarly for Indic,
// and dissection of ligatures for other scripts.
// Author: Ray Smith
//
// (C) Copyright 2015, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "unicharcompress.h"
#include <algorithm>
#include <memory>
#include "tprintf.h"
namespace tesseract {
// String used to represent the null_id in direct_set.
static const char *kNullChar = "<nul>";
// Radix to make unique values from the stored radical codes.
const int kRadicalRadix = 29;
// "Hash" function for const std::vector<int> computes the sum of elements.
// Build a unique number for each code sequence that we can use as the index in
// a hash map of ints instead of trying to hash the vectors.
static int RadicalPreHash(const std::vector<int> &rs) {
size_t result = 0;
for (int radical : rs) {
result *= kRadicalRadix;
result += radical;
}
return result;
}
// A hash map to convert unicodes to radical encoding.
using RSMap = std::unordered_map<int, std::unique_ptr<std::vector<int>>>;
// A hash map to count occurrences of each radical encoding.
using RSCounts = std::unordered_map<int, int>;
static bool DecodeRadicalLine(std::string &radical_data_line, RSMap *radical_map) {
if (radical_data_line.empty() || (radical_data_line)[0] == '#') {
return true;
}
std::vector<std::string> entries = split(radical_data_line, ' ');
if (entries.size() < 2) {
return false;
}
char *end = nullptr;
int unicode = strtol(&entries[0][0], &end, 10);
if (*end != '\0') {
return false;
}
std::unique_ptr<std::vector<int>> radicals(new std::vector<int>);
for (size_t i = 1; i < entries.size(); ++i) {
int radical = strtol(&entries[i][0], &end, 10);
if (*end != '\0') {
return false;
}
radicals->push_back(radical);
}
(*radical_map)[unicode] = std::move(radicals);
return true;
}
// Helper function builds the RSMap from the radical-stroke file, which has
// already been read into a string. Returns false on error.
// The radical_stroke_table is non-const because it gets split and the caller
// is unlikely to want to use it again.
static bool DecodeRadicalTable(std::string &radical_data, RSMap *radical_map) {
std::vector<std::string> lines = split(radical_data, '\n');
for (unsigned i = 0; i < lines.size(); ++i) {
if (!DecodeRadicalLine(lines[i], radical_map)) {
tprintf("Invalid format in radical table at line %d: %s\n", i, lines[i].c_str());
return false;
}
}
return true;
}
UnicharCompress::UnicharCompress() : code_range_(0) {}
UnicharCompress::UnicharCompress(const UnicharCompress &src) {
*this = src;
}
UnicharCompress::~UnicharCompress() {
Cleanup();
}
UnicharCompress &UnicharCompress::operator=(const UnicharCompress &src) {
Cleanup();
encoder_ = src.encoder_;
code_range_ = src.code_range_;
SetupDecoder();
return *this;
}
// Computes the encoding for the given unicharset. It is a requirement that
// the file training/langdata/radical-stroke.txt have been read into the
// input string radical_stroke_table.
// Returns false if the encoding cannot be constructed.
bool UnicharCompress::ComputeEncoding(const UNICHARSET &unicharset, int null_id,
std::string *radical_stroke_table) {
RSMap radical_map;
if (radical_stroke_table != nullptr && !DecodeRadicalTable(*radical_stroke_table, &radical_map)) {
return false;
}
encoder_.clear();
UNICHARSET direct_set;
// To avoid unused codes, clear the special codes from the direct_set.
direct_set.clear();
// Always keep space as 0;
direct_set.unichar_insert(" ", OldUncleanUnichars::kTrue);
// Null char is next if we have one.
if (null_id >= 0) {
direct_set.unichar_insert(kNullChar);
}
RSCounts radical_counts;
// In the initial map, codes [0, unicharset.size()) are
// reserved for non-han/hangul sequences of 1 or more unicodes.
int hangul_offset = unicharset.size();
// Hangul takes the next range [hangul_offset, hangul_offset + kTotalJamos).
const int kTotalJamos = kLCount + kVCount + kTCount;
// Han takes the codes beyond hangul_offset + kTotalJamos. Since it is hard
// to measure the number of radicals and strokes, initially we use the same
// code range for all 3 Han code positions, and fix them after.
int han_offset = hangul_offset + kTotalJamos;
for (unsigned u = 0; u <= unicharset.size(); ++u) {
// We special-case allow null_id to be equal to unicharset.size() in case
// there is no space in unicharset for it.
if (u == unicharset.size() && static_cast<int>(u) != null_id) {
break; // Finished
}
RecodedCharID code;
// Convert to unicodes.
std::vector<char32> unicodes;
std::string cleaned;
if (u < unicharset.size()) {
cleaned = UNICHARSET::CleanupString(unicharset.id_to_unichar(u));
}
if (u < unicharset.size() && (unicodes = UNICHAR::UTF8ToUTF32(cleaned.c_str())).size() == 1) {
// Check single unicodes for Hangul/Han and encode if so.
int unicode = unicodes[0];
int leading, vowel, trailing;
auto it = radical_map.find(unicode);
if (it != radical_map.end()) {
// This is Han. Use the radical codes directly.
int num_radicals = it->second->size();
for (int c = 0; c < num_radicals; ++c) {
code.Set(c, han_offset + (*it->second)[c]);
}
int pre_hash = RadicalPreHash(*it->second);
int num_samples = radical_counts[pre_hash]++;
if (num_samples > 0) {
code.Set(num_radicals, han_offset + num_samples + kRadicalRadix);
}
} else if (DecomposeHangul(unicode, &leading, &vowel, &trailing)) {
// This is Hangul. Since we know the exact size of each part at compile
// time, it gets the bottom set of codes.
code.Set3(leading + hangul_offset, vowel + kLCount + hangul_offset,
trailing + kLCount + kVCount + hangul_offset);
}
}
// If the code is still empty, it wasn't Han or Hangul.
if (code.empty()) {
// Special cases.
if (u == UNICHAR_SPACE) {
code.Set(0, 0); // Space.
} else if (static_cast<int>(u) == null_id ||
(unicharset.has_special_codes() && u < SPECIAL_UNICHAR_CODES_COUNT)) {
code.Set(0, direct_set.unichar_to_id(kNullChar));
} else {
// Add the direct_set unichar-ids of the unicodes in sequence to the
// code.
for (int uni : unicodes) {
int position = code.length();
if (position >= RecodedCharID::kMaxCodeLen) {
tprintf("Unichar %d=%s is too long to encode!!\n", u, unicharset.id_to_unichar(u));
return false;
}
UNICHAR unichar(uni);
char *utf8 = unichar.utf8_str();
if (!direct_set.contains_unichar(utf8)) {
direct_set.unichar_insert(utf8);
}
code.Set(position, direct_set.unichar_to_id(utf8));
delete[] utf8;
if (direct_set.size() > unicharset.size() + !unicharset.has_special_codes()) {
// Code space got bigger!
tprintf("Code space expanded from original unicharset!!\n");
return false;
}
}
}
}
encoder_.push_back(code);
}
// Now renumber Han to make all codes unique. We already added han_offset to
// all Han. Now separate out the radical, stroke, and count codes for Han.
int code_offset = 0;
for (int i = 0; i < RecodedCharID::kMaxCodeLen; ++i) {
int max_offset = 0;
for (unsigned u = 0; u < unicharset.size(); ++u) {
RecodedCharID *code = &encoder_[u];
if (code->length() <= i) {
continue;
}
max_offset = std::max(max_offset, (*code)(i)-han_offset);
code->Set(i, (*code)(i) + code_offset);
}
if (max_offset == 0) {
break;
}
code_offset += max_offset + 1;
}
DefragmentCodeValues(null_id >= 0 ? 1 : -1);
SetupDecoder();
return true;
}
// Sets up an encoder that doesn't change the unichars at all, so it just
// passes them through unchanged.
void UnicharCompress::SetupPassThrough(const UNICHARSET &unicharset) {
std::vector<RecodedCharID> codes;
for (unsigned u = 0; u < unicharset.size(); ++u) {
RecodedCharID code;
code.Set(0, u);
codes.push_back(code);
}
if (!unicharset.has_special_codes()) {
RecodedCharID code;
code.Set(0, unicharset.size());
codes.push_back(code);
}
SetupDirect(codes);
}
// Sets up an encoder directly using the given encoding vector, which maps
// unichar_ids to the given codes.
void UnicharCompress::SetupDirect(const std::vector<RecodedCharID> &codes) {
encoder_ = codes;
ComputeCodeRange();
SetupDecoder();
}
// Renumbers codes to eliminate unused values.
void UnicharCompress::DefragmentCodeValues(int encoded_null) {
// There may not be any Hangul, but even if there is, it is possible that not
// all codes are used. Likewise with the Han encoding, it is possible that not
// all numbers of strokes are used.
ComputeCodeRange();
std::vector<int> offsets(code_range_);
// Find which codes are used
for (auto &code : encoder_) {
for (int i = 0; i < code.length(); ++i) {
offsets[code(i)] = 1;
}
}
// Compute offsets based on code use.
int offset = 0;
for (unsigned i = 0; i < offsets.size(); ++i) {
// If not used, decrement everything above here.
// We are moving encoded_null to the end, so it is not "used".
if (offsets[i] == 0 || i == static_cast<unsigned>(encoded_null)) {
--offset;
} else {
offsets[i] = offset;
}
}
if (encoded_null >= 0) {
// The encoded_null is moving to the end, for the benefit of TensorFlow,
// which is offsets.size() + offsets.back().
offsets[encoded_null] = offsets.size() + offsets.back() - encoded_null;
}
// Now apply the offsets.
for (auto &c : encoder_) {
RecodedCharID *code = &c;
for (int i = 0; i < code->length(); ++i) {
int value = (*code)(i);
code->Set(i, value + offsets[value]);
}
}
ComputeCodeRange();
}
// Encodes a single unichar_id. Returns the length of the code, or zero if
// invalid input, and the encoding itself
int UnicharCompress::EncodeUnichar(unsigned unichar_id, RecodedCharID *code) const {
if (unichar_id >= encoder_.size()) {
return 0;
}
*code = encoder_[unichar_id];
return code->length();
}
// Decodes code, returning the original unichar-id, or
// INVALID_UNICHAR_ID if the input is invalid.
int UnicharCompress::DecodeUnichar(const RecodedCharID &code) const {
int len = code.length();
if (len <= 0 || len > RecodedCharID::kMaxCodeLen) {
return INVALID_UNICHAR_ID;
}
auto it = decoder_.find(code);
if (it == decoder_.end()) {
return INVALID_UNICHAR_ID;
}
return it->second;
}
// Writes to the given file. Returns false in case of error.
bool UnicharCompress::Serialize(TFile *fp) const {
return fp->Serialize(encoder_);
}
// Reads from the given file. Returns false in case of error.
bool UnicharCompress::DeSerialize(TFile *fp) {
if (!fp->DeSerialize(encoder_)) {
return false;
}
ComputeCodeRange();
SetupDecoder();
return true;
}
// Returns a string containing a text file that describes the encoding thus:
// <index>[,<index>]*<tab><UTF8-str><newline>
// In words, a comma-separated list of one or more indices, followed by a tab
// and the UTF-8 string that the code represents per line. Most simple scripts
// will encode a single index to a UTF8-string, but Chinese, Japanese, Korean
// and the Indic scripts will contain a many-to-many mapping.
// See the class comment above for details.
std::string UnicharCompress::GetEncodingAsString(const UNICHARSET &unicharset) const {
std::string encoding;
for (unsigned c = 0; c < encoder_.size(); ++c) {
const RecodedCharID &code = encoder_[c];
if (0 < c && c < SPECIAL_UNICHAR_CODES_COUNT && code == encoder_[c - 1]) {
// Don't show the duplicate entry.
continue;
}
encoding += std::to_string(code(0));
for (int i = 1; i < code.length(); ++i) {
encoding += "," + std::to_string(code(i));
}
encoding += "\t";
if (c >= unicharset.size() ||
(0 < c && c < SPECIAL_UNICHAR_CODES_COUNT && unicharset.has_special_codes())) {
encoding += kNullChar;
} else {
encoding += unicharset.id_to_unichar(c);
}
encoding += "\n";
}
return encoding;
}
// Helper decomposes a Hangul unicode to 3 parts, leading, vowel, trailing.
// Note that the returned values are 0-based indices, NOT unicode Jamo.
// Returns false if the input is not in the Hangul unicode range.
/* static */
bool UnicharCompress::DecomposeHangul(int unicode, int *leading, int *vowel, int *trailing) {
if (unicode < kFirstHangul) {
return false;
}
int offset = unicode - kFirstHangul;
if (offset >= kNumHangul) {
return false;
}
const int kNCount = kVCount * kTCount;
*leading = offset / kNCount;
*vowel = (offset % kNCount) / kTCount;
*trailing = offset % kTCount;
return true;
}
// Computes the value of code_range_ from the encoder_.
void UnicharCompress::ComputeCodeRange() {
code_range_ = -1;
for (auto &code : encoder_) {
for (int i = 0; i < code.length(); ++i) {
if (code(i) > code_range_) {
code_range_ = code(i);
}
}
}
++code_range_;
}
// Initializes the decoding hash_map from the encoding array.
void UnicharCompress::SetupDecoder() {
Cleanup();
is_valid_start_.clear();
is_valid_start_.resize(code_range_);
for (unsigned c = 0; c < encoder_.size(); ++c) {
const RecodedCharID &code = encoder_[c];
decoder_[code] = c;
is_valid_start_[code(0)] = true;
RecodedCharID prefix = code;
int len = code.length() - 1;
prefix.Truncate(len);
auto final_it = final_codes_.find(prefix);
if (final_it == final_codes_.end()) {
auto *code_list = new std::vector<int>;
code_list->push_back(code(len));
final_codes_[prefix] = code_list;
while (--len >= 0) {
prefix.Truncate(len);
auto next_it = next_codes_.find(prefix);
if (next_it == next_codes_.end()) {
auto *code_list = new std::vector<int>;
code_list->push_back(code(len));
next_codes_[prefix] = code_list;
} else {
// We still have to search the list as we may get here via multiple
// lengths of code.
if (!contains(*next_it->second, code(len))) {
next_it->second->push_back(code(len));
}
break; // This prefix has been processed.
}
}
} else {
if (!contains(*final_it->second, code(len))) {
final_it->second->push_back(code(len));
}
}
}
}
// Frees allocated memory.
void UnicharCompress::Cleanup() {
decoder_.clear();
is_valid_start_.clear();
for (auto &next_code : next_codes_) {
delete next_code.second;
}
for (auto &final_code : final_codes_) {
delete final_code.second;
}
next_codes_.clear();
final_codes_.clear();
}
} // namespace tesseract.
|
2301_81045437/tesseract
|
src/ccutil/unicharcompress.cpp
|
C++
|
apache-2.0
| 15,872
|
///////////////////////////////////////////////////////////////////////
// File: unicharcompress.h
// Description: Unicode re-encoding using a sequence of smaller numbers in
// place of a single large code for CJK, similarly for Indic,
// and dissection of ligatures for other scripts.
// Author: Ray Smith
//
// (C) Copyright 2015, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_UNICHARCOMPRESS_H_
#define TESSERACT_CCUTIL_UNICHARCOMPRESS_H_
#include <unordered_map>
#include <vector>
#include "serialis.h"
#include "unicharset.h"
namespace tesseract {
// Trivial class to hold the code for a recoded unichar-id.
class RecodedCharID {
public:
// The maximum length of a code.
static const int kMaxCodeLen = 9;
RecodedCharID() : self_normalized_(1), length_(0) {
memset(code_, 0, sizeof(code_));
}
void Truncate(int length) {
length_ = length;
}
// Sets the code value at the given index in the code.
void Set(int index, int value) {
code_[index] = value;
if (length_ <= index) {
length_ = index + 1;
}
}
// Shorthand for setting codes of length 3, as all Hangul and Han codes are
// length 3.
void Set3(int code0, int code1, int code2) {
length_ = 3;
code_[0] = code0;
code_[1] = code1;
code_[2] = code2;
}
bool empty() const {
return length_ == 0;
}
// Accessors
int length() const {
return length_;
}
int operator()(int index) const {
return code_[index];
}
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const {
return fp->Serialize(&self_normalized_) && fp->Serialize(&length_) &&
fp->Serialize(&code_[0], length_);
}
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp) {
return fp->DeSerialize(&self_normalized_) && fp->DeSerialize(&length_) &&
fp->DeSerialize(&code_[0], length_);
}
bool operator==(const RecodedCharID &other) const {
if (length_ != other.length_) {
return false;
}
for (int i = 0; i < length_; ++i) {
if (code_[i] != other.code_[i]) {
return false;
}
}
return true;
}
// Hash functor for RecodedCharID.
struct RecodedCharIDHash {
uint64_t operator()(const RecodedCharID &code) const {
uint64_t result = 0;
for (int i = 0; i < code.length_; ++i) {
result ^= static_cast<uint64_t>(code(i)) << (7 * i);
}
return result;
}
};
private:
// True if this code is self-normalizing, ie is the master entry for indices
// that map to the same code. Has boolean value, but int8_t for serialization.
int8_t self_normalized_;
// The number of elements in use in code_;
int32_t length_;
// The re-encoded form of the unichar-id to which this RecodedCharID relates.
int32_t code_[kMaxCodeLen];
};
// Class holds a "compression" of a unicharset to simplify the learning problem
// for a neural-network-based classifier.
// Objectives:
// 1 (CJK): Ids of a unicharset with a large number of classes are expressed as
// a sequence of 3 codes with much fewer values.
// This is achieved using the Jamo coding for Hangul and the Unicode
// Radical-Stroke-index for Han.
// 2 (Indic): Instead of thousands of codes with one for each grapheme, re-code
// as the unicode sequence (but coded in a more compact space).
// 3 (the rest): Eliminate multi-path problems with ligatures and fold confusing
// and not significantly distinct shapes (quotes) together, ie
// represent the fi ligature as the f-i pair, and fold u+2019 and
// friends all onto ascii single '
// 4 The null character and mapping to target activations:
// To save horizontal coding space, the compressed codes are generally mapped
// to target network activations without intervening null characters, BUT
// in the case of ligatures, such as ff, null characters have to be included
// so existence of repeated codes is detected at codebook-building time, and
// null characters are embedded directly into the codes, so the rest of the
// system doesn't need to worry about the problem (much). There is still an
// effect on the range of ways in which the target activations can be
// generated.
//
// The computed code values are compact (no unused values), and, for CJK,
// unique (each code position uses a disjoint set of values from each other code
// position). For non-CJK, the same code value CAN be used in multiple
// positions, eg the ff ligature is converted to <f> <nullchar> <f>, where <f>
// is the same code as is used for the single f.
class TESS_API UnicharCompress {
public:
UnicharCompress();
UnicharCompress(const UnicharCompress &src);
~UnicharCompress();
UnicharCompress &operator=(const UnicharCompress &src);
// The 1st Hangul unicode.
static const int kFirstHangul = 0xac00;
// The number of Hangul unicodes.
static const int kNumHangul = 11172;
// The number of Jamos for each of the 3 parts of a Hangul character, being
// the Leading consonant, Vowel and Trailing consonant.
static const int kLCount = 19;
static const int kVCount = 21;
static const int kTCount = 28;
// Computes the encoding for the given unicharset. It is a requirement that
// the file training/langdata/radical-stroke.txt have been read into the
// input string radical_stroke_table.
// Returns false if the encoding cannot be constructed.
bool ComputeEncoding(const UNICHARSET &unicharset, int null_id, std::string *radical_stroke_table);
// Sets up an encoder that doesn't change the unichars at all, so it just
// passes them through unchanged.
void SetupPassThrough(const UNICHARSET &unicharset);
// Sets up an encoder directly using the given encoding vector, which maps
// unichar_ids to the given codes.
void SetupDirect(const std::vector<RecodedCharID> &codes);
// Returns the number of different values that can be used in a code, ie
// 1 + the maximum value that will ever be used by an RecodedCharID code in
// any position in its array.
int code_range() const {
return code_range_;
}
// Encodes a single unichar_id. Returns the length of the code, (or zero if
// invalid input), and the encoding itself in code.
int EncodeUnichar(unsigned unichar_id, RecodedCharID *code) const;
// Decodes code, returning the original unichar-id, or
// INVALID_UNICHAR_ID if the input is invalid.
int DecodeUnichar(const RecodedCharID &code) const;
// Returns true if the given code is a valid start or single code.
bool IsValidFirstCode(int code) const {
return is_valid_start_[code];
}
// Returns a list of valid non-final next codes for a given prefix code,
// which may be empty.
const std::vector<int> *GetNextCodes(const RecodedCharID &code) const {
auto it = next_codes_.find(code);
return it == next_codes_.end() ? nullptr : it->second;
}
// Returns a list of valid final codes for a given prefix code, which may
// be empty.
const std::vector<int> *GetFinalCodes(const RecodedCharID &code) const {
auto it = final_codes_.find(code);
return it == final_codes_.end() ? nullptr : it->second;
}
// Writes to the given file. Returns false in case of error.
bool Serialize(TFile *fp) const;
// Reads from the given file. Returns false in case of error.
bool DeSerialize(TFile *fp);
// Returns a string containing a text file that describes the encoding thus:
// <index>[,<index>]*<tab><UTF8-str><newline>
// In words, a comma-separated list of one or more indices, followed by a tab
// and the UTF-8 string that the code represents per line. Most simple scripts
// will encode a single index to a UTF8-string, but Chinese, Japanese, Korean
// and the Indic scripts will contain a many-to-many mapping.
// See the class comment above for details.
std::string GetEncodingAsString(const UNICHARSET &unicharset) const;
// Helper decomposes a Hangul unicode to 3 parts, leading, vowel, trailing.
// Note that the returned values are 0-based indices, NOT unicode Jamo.
// Returns false if the input is not in the Hangul unicode range.
static bool DecomposeHangul(int unicode, int *leading, int *vowel, int *trailing);
private:
// Renumbers codes to eliminate unused values.
void DefragmentCodeValues(int encoded_null);
// Computes the value of code_range_ from the encoder_.
void ComputeCodeRange();
// Initializes the decoding hash_map from the encoder_ array.
void SetupDecoder();
// Frees allocated memory.
void Cleanup();
// The encoder that maps a unichar-id to a sequence of small codes.
// encoder_ is the only part that is serialized. The rest is computed on load.
std::vector<RecodedCharID> encoder_;
// Decoder converts the output of encoder back to a unichar-id.
std::unordered_map<RecodedCharID, int, RecodedCharID::RecodedCharIDHash> decoder_;
// True if the index is a valid single or start code.
std::vector<bool> is_valid_start_;
// Maps a prefix code to a list of valid next codes.
// The map owns the vectors.
std::unordered_map<RecodedCharID, std::vector<int> *, RecodedCharID::RecodedCharIDHash>
next_codes_;
// Maps a prefix code to a list of valid final codes.
// The map owns the vectors.
std::unordered_map<RecodedCharID, std::vector<int> *, RecodedCharID::RecodedCharIDHash>
final_codes_;
// Max of any value in encoder_ + 1.
int code_range_;
};
} // namespace tesseract.
#endif // TESSERACT_CCUTIL_UNICHARCOMPRESS_H_
|
2301_81045437/tesseract
|
src/ccutil/unicharcompress.h
|
C++
|
apache-2.0
| 10,206
|
///////////////////////////////////////////////////////////////////////
// File: unicharmap.cpp
// Description: Unicode character/ligature to integer id class.
// Author: Thomas Kielbus
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "unicharmap.h"
#include <tesseract/unichar.h>
#include <cassert>
namespace tesseract {
UNICHARMAP::UNICHARMAP() : nodes(nullptr) {}
UNICHARMAP::~UNICHARMAP() {
delete[] nodes;
}
// Search the given unichar representation in the tree, using length characters
// from it maximum. Each character in the string is interpreted as an index in
// an array of nodes.
UNICHAR_ID UNICHARMAP::unichar_to_id(const char *const unichar_repr, int length) const {
UNICHARMAP_NODE *current_nodes = nodes;
assert(*unichar_repr != '\0');
assert(length > 0 && length <= UNICHAR_LEN);
int index = 0;
if (length <= 0 || unichar_repr[index] == '\0') {
return INVALID_UNICHAR_ID;
}
do {
if (index + 1 >= length || unichar_repr[index + 1] == '\0') {
return current_nodes[static_cast<unsigned char>(unichar_repr[index])].id;
}
current_nodes = current_nodes[static_cast<unsigned char>(unichar_repr[index])].children;
++index;
} while (true);
}
// Search the given unichar representation in the tree, creating the possibly
// missing nodes. Once the right place has been found, insert the given id and
// update the inserted flag to keep track of the insert. Each character in the
// string is interpreted as an index in an array of nodes.
void UNICHARMAP::insert(const char *const unichar_repr, UNICHAR_ID id) {
const char *current_char = unichar_repr;
if (*current_char == '\0') {
return;
}
UNICHARMAP_NODE **current_nodes_pointer = &nodes;
do {
if (*current_nodes_pointer == nullptr) {
*current_nodes_pointer = new UNICHARMAP_NODE[256];
}
if (current_char[1] == '\0') {
(*current_nodes_pointer)[static_cast<unsigned char>(*current_char)].id = id;
return;
}
current_nodes_pointer =
&((*current_nodes_pointer)[static_cast<unsigned char>(*current_char)].children);
++current_char;
} while (true);
}
// Search the given unichar representation in the tree, using length characters
// from it maximum. Each character in the string is interpreted as an index in
// an array of nodes. Stop once the tree does not have anymore nodes or once we
// found the right unichar_repr.
bool UNICHARMAP::contains(const char *const unichar_repr, int length) const {
if (unichar_repr == nullptr || *unichar_repr == '\0') {
return false;
}
if (length <= 0 || length > UNICHAR_LEN) {
return false;
}
int index = 0;
if (unichar_repr[index] == '\0') {
return false;
}
UNICHARMAP_NODE *current_nodes = nodes;
while (current_nodes != nullptr && index + 1 < length && unichar_repr[index + 1] != '\0') {
current_nodes = current_nodes[static_cast<unsigned char>(unichar_repr[index])].children;
++index;
}
return current_nodes != nullptr && (index + 1 >= length || unichar_repr[index + 1] == '\0') &&
current_nodes[static_cast<unsigned char>(unichar_repr[index])].id >= 0;
}
// Return the minimum number of characters that must be used from this string
// to obtain a match in the UNICHARMAP.
int UNICHARMAP::minmatch(const char *const unichar_repr) const {
const char *current_char = unichar_repr;
if (*current_char == '\0') {
return 0;
}
UNICHARMAP_NODE *current_nodes = nodes;
while (current_nodes != nullptr && *current_char != '\0') {
if (current_nodes[static_cast<unsigned char>(*current_char)].id >= 0) {
return current_char + 1 - unichar_repr;
}
current_nodes = current_nodes[static_cast<unsigned char>(*current_char)].children;
++current_char;
}
return 0;
}
void UNICHARMAP::clear() {
delete[] nodes;
nodes = nullptr;
}
UNICHARMAP::UNICHARMAP_NODE::UNICHARMAP_NODE() : children(nullptr), id(-1) {}
// Recursively delete the children
UNICHARMAP::UNICHARMAP_NODE::~UNICHARMAP_NODE() {
delete[] children;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/unicharmap.cpp
|
C++
|
apache-2.0
| 4,672
|
///////////////////////////////////////////////////////////////////////
// File: unicharmap.h
// Description: Unicode character/ligature to integer id class.
// Author: Thomas Kielbus
// Created: Wed Jun 28 17:05:01 PDT 2006
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_UNICHARMAP_H_
#define TESSERACT_CCUTIL_UNICHARMAP_H_
#include <tesseract/unichar.h>
namespace tesseract {
// A UNICHARMAP stores unique unichars. Each of them is associated with one
// UNICHAR_ID.
class TESS_API UNICHARMAP {
public:
// Create an empty UNICHARMAP
UNICHARMAP();
~UNICHARMAP();
// Insert the given unichar representation in the UNICHARMAP and associate it
// with the given id. The length of the representation MUST be non-zero.
void insert(const char *const unichar_repr, UNICHAR_ID id);
// Return the id associated with the given unichar representation,
// this representation MUST exist within the UNICHARMAP. The first
// length characters (maximum) from unichar_repr are used. The length
// MUST be non-zero.
UNICHAR_ID unichar_to_id(const char *const unichar_repr, int length) const;
// Return true if the given unichar representation is already present in the
// UNICHARMAP. The first length characters (maximum) from unichar_repr are
// used. The length MUST be non-zero.
bool contains(const char *const unichar_repr, int length) const;
// Return the minimum number of characters that must be used from this string
// to obtain a match in the UNICHARMAP.
int minmatch(const char *const unichar_repr) const;
// Clear the UNICHARMAP. All previous data is lost.
void clear();
private:
// The UNICHARMAP is represented as a tree whose nodes are of type
// UNICHARMAP_NODE.
struct UNICHARMAP_NODE {
UNICHARMAP_NODE();
~UNICHARMAP_NODE();
UNICHARMAP_NODE *children;
UNICHAR_ID id;
};
UNICHARMAP_NODE *nodes;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_UNICHARMAP_H_
|
2301_81045437/tesseract
|
src/ccutil/unicharmap.h
|
C++
|
apache-2.0
| 2,598
|
///////////////////////////////////////////////////////////////////////
// File: unicharset.cpp
// Description: Unicode character/ligature set class.
// Author: Thomas Kielbus
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "unicharset.h"
#include "params.h"
#include <tesseract/unichar.h>
#include "serialis.h"
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstring>
#include <iomanip> // for std::setw
#include <locale> // for std::locale::classic
#include <sstream> // for std::istringstream, std::ostringstream
namespace tesseract {
// Special character used in representing character fragments.
static const char kSeparator = '|';
// Special character used in representing 'natural' character fragments.
static const char kNaturalFlag = 'n';
static const int ISALPHA_MASK = 0x1;
static const int ISLOWER_MASK = 0x2;
static const int ISUPPER_MASK = 0x4;
static const int ISDIGIT_MASK = 0x8;
static const int ISPUNCTUATION_MASK = 0x10;
// Y coordinate threshold for determining cap-height vs x-height.
// TODO(rays) Bring the global definition down to the ccutil library level,
// so this constant is relative to some other constants.
static const int kMeanlineThreshold = 220;
// Let C be the number of alpha chars for which all tops exceed
// kMeanlineThreshold, and X the number of alpha chars for which all
// tops are below kMeanlineThreshold, then if X > C *
// kMinXHeightFraction and C > X * kMinCapHeightFraction or more than
// half the alpha characters have upper or lower case, then the
// unicharset "has x-height".
const double kMinXHeightFraction = 0.25;
const double kMinCapHeightFraction = 0.05;
/*static */
const char *UNICHARSET::kCustomLigatures[][2] = {
{"ct", "\uE003"}, // c + t -> U+E003
{"ſh", "\uE006"}, // long-s + h -> U+E006
{"ſi", "\uE007"}, // long-s + i -> U+E007
{"ſl", "\uE008"}, // long-s + l -> U+E008
{"ſſ", "\uE009"}, // long-s + long-s -> U+E009
{nullptr, nullptr}};
// List of mappings to make when ingesting strings from the outside.
// The substitutions clean up text that should exist for rendering of
// synthetic data, but not in the recognition set.
const char *UNICHARSET::kCleanupMaps[][2] = {
{"\u0640", ""}, // TATWEEL is deleted.
{"\ufb01", "fi"}, // fi ligature->fi pair.
{"\ufb02", "fl"}, // fl ligature->fl pair.
{nullptr, nullptr}};
// List of strings for the SpecialUnicharCodes. Keep in sync with the enum.
const char *UNICHARSET::kSpecialUnicharCodes[SPECIAL_UNICHAR_CODES_COUNT] = {
" ", "Joined", "|Broken|0|1"};
const char *UNICHARSET::null_script = "NULL";
UNICHARSET::UNICHAR_PROPERTIES::UNICHAR_PROPERTIES() {
Init();
}
// Initialize all properties to sensible default values.
void UNICHARSET::UNICHAR_PROPERTIES::Init() {
isalpha = false;
islower = false;
isupper = false;
isdigit = false;
ispunctuation = false;
isngram = false;
enabled = false;
SetRangesOpen();
script_id = 0;
other_case = 0;
mirror = 0;
normed = "";
direction = UNICHARSET::U_LEFT_TO_RIGHT;
fragment = nullptr;
}
// Sets all ranges wide open. Initialization default in case there are
// no useful values available.
void UNICHARSET::UNICHAR_PROPERTIES::SetRangesOpen() {
min_bottom = 0;
max_bottom = UINT8_MAX;
min_top = 0;
max_top = UINT8_MAX;
width = 0.0f;
width_sd = 0.0f;
bearing = 0.0f;
bearing_sd = 0.0f;
advance = 0.0f;
advance_sd = 0.0f;
}
// Sets all ranges to empty. Used before expanding with font-based data.
void UNICHARSET::UNICHAR_PROPERTIES::SetRangesEmpty() {
min_bottom = UINT8_MAX;
max_bottom = 0;
min_top = UINT8_MAX;
max_top = 0;
width = 0.0f;
width_sd = 0.0f;
bearing = 0.0f;
bearing_sd = 0.0f;
advance = 0.0f;
advance_sd = 0.0f;
}
// Returns true if any of the top/bottom/width/bearing/advance ranges/stats
// is empty.
bool UNICHARSET::UNICHAR_PROPERTIES::AnyRangeEmpty() const {
return width == 0.0f || advance == 0.0f;
}
// Expands the ranges with the ranges from the src properties.
void UNICHARSET::UNICHAR_PROPERTIES::ExpandRangesFrom(
const UNICHAR_PROPERTIES &src) {
UpdateRange(src.min_bottom, &min_bottom, &max_bottom);
UpdateRange(src.max_bottom, &min_bottom, &max_bottom);
UpdateRange(src.min_top, &min_top, &max_top);
UpdateRange(src.max_top, &min_top, &max_top);
if (src.width_sd > width_sd) {
width = src.width;
width_sd = src.width_sd;
}
if (src.bearing_sd > bearing_sd) {
bearing = src.bearing;
bearing_sd = src.bearing_sd;
}
if (src.advance_sd > advance_sd) {
advance = src.advance;
advance_sd = src.advance_sd;
}
}
// Copies the properties from src into this.
void UNICHARSET::UNICHAR_PROPERTIES::CopyFrom(const UNICHAR_PROPERTIES &src) {
// Apart from the fragment, everything else can be done with a default copy.
CHAR_FRAGMENT *saved_fragment = fragment;
*this = src; // Bitwise copy.
fragment = saved_fragment;
}
UNICHARSET::UNICHARSET()
: ids(), script_table(nullptr), script_table_size_used(0) {
clear();
for (int i = 0; i < SPECIAL_UNICHAR_CODES_COUNT; ++i) {
unichar_insert(kSpecialUnicharCodes[i]);
if (i == UNICHAR_JOINED) {
set_isngram(i, true);
}
}
}
UNICHARSET::~UNICHARSET() {
clear();
}
UNICHAR_ID
UNICHARSET::unichar_to_id(const char *const unichar_repr) const {
std::string cleaned =
old_style_included_ ? unichar_repr : CleanupString(unichar_repr);
return ids.contains(cleaned.data(), cleaned.size())
? ids.unichar_to_id(cleaned.data(), cleaned.size())
: INVALID_UNICHAR_ID;
}
UNICHAR_ID UNICHARSET::unichar_to_id(const char *const unichar_repr,
int length) const {
assert(length > 0 && length <= UNICHAR_LEN);
std::string cleaned(unichar_repr, length);
if (!old_style_included_) {
cleaned = CleanupString(unichar_repr, length);
}
return ids.contains(cleaned.data(), cleaned.size())
? ids.unichar_to_id(cleaned.data(), cleaned.size())
: INVALID_UNICHAR_ID;
}
// Return the minimum number of bytes that matches a legal UNICHAR_ID,
// while leaving the rest of the string encodable. Returns 0 if the
// beginning of the string is not encodable.
// WARNING: this function now encodes the whole string for precision.
// Use encode_string in preference to repeatedly calling step.
int UNICHARSET::step(const char *str) const {
std::vector<UNICHAR_ID> encoding;
std::vector<char> lengths;
encode_string(str, true, &encoding, &lengths, nullptr);
if (encoding.empty() || encoding[0] == INVALID_UNICHAR_ID) {
return 0;
}
return lengths[0];
}
// Return whether the given UTF-8 string is encodable with this UNICHARSET.
// If not encodable, write the first byte offset which cannot be converted
// into the second (return) argument.
bool UNICHARSET::encodable_string(const char *str,
unsigned *first_bad_position) const {
std::vector<UNICHAR_ID> encoding;
return encode_string(str, true, &encoding, nullptr, first_bad_position);
}
// Encodes the given UTF-8 string with this UNICHARSET.
// Returns true if the encoding succeeds completely, false if there is at
// least one INVALID_UNICHAR_ID in the returned encoding, but in this case
// the rest of the string is still encoded.
// If lengths is not nullptr, then it is filled with the corresponding
// byte length of each encoded UNICHAR_ID.
// WARNING: Caller must guarantee that str has already been cleaned of codes
// that do not belong in the unicharset, or encoding may fail.
// Use CleanupString to perform the cleaning.
bool UNICHARSET::encode_string(const char *str, bool give_up_on_failure,
std::vector<UNICHAR_ID> *encoding,
std::vector<char> *lengths,
unsigned *encoded_length) const {
std::vector<UNICHAR_ID> working_encoding;
std::vector<char> working_lengths;
std::vector<char> best_lengths;
encoding->clear(); // Just in case str is empty.
auto str_length = strlen(str);
unsigned str_pos = 0;
bool perfect = true;
while (str_pos < str_length) {
encode_string(str, str_pos, str_length, &working_encoding, &working_lengths,
&str_pos, encoding, &best_lengths);
if (str_pos < str_length) {
// This is a non-match. Skip one utf-8 character.
perfect = false;
if (give_up_on_failure) {
break;
}
int step = UNICHAR::utf8_step(str + str_pos);
if (step == 0) {
step = 1;
}
encoding->push_back(INVALID_UNICHAR_ID);
best_lengths.push_back(step);
str_pos += step;
working_encoding = *encoding;
working_lengths = best_lengths;
}
}
if (lengths != nullptr) {
*lengths = std::move(best_lengths);
}
if (encoded_length != nullptr) {
*encoded_length = str_pos;
}
return perfect;
}
const char *UNICHARSET::id_to_unichar(UNICHAR_ID id) const {
if (id == INVALID_UNICHAR_ID) {
return INVALID_UNICHAR;
}
ASSERT_HOST(static_cast<unsigned>(id) < this->size());
return unichars[id].representation;
}
const char *UNICHARSET::id_to_unichar_ext(UNICHAR_ID id) const {
if (id == INVALID_UNICHAR_ID) {
return INVALID_UNICHAR;
}
ASSERT_HOST(static_cast<unsigned>(id) < this->size());
// Resolve from the kCustomLigatures table if this is a private encoding.
if (get_isprivate(id)) {
const char *ch = id_to_unichar(id);
for (int i = 0; kCustomLigatures[i][0] != nullptr; ++i) {
if (!strcmp(ch, kCustomLigatures[i][1])) {
return kCustomLigatures[i][0];
}
}
}
// Otherwise return the stored representation.
return unichars[id].representation;
}
// Return a string that reformats the utf8 str into the str followed
// by its hex unicodes.
std::string UNICHARSET::debug_utf8_str(const char *str) {
std::string result = str;
result += " [";
int step = 1;
// Chop into unicodes and code each as hex.
for (int i = 0; str[i] != '\0'; i += step) {
char hex[sizeof(int) * 2 + 1];
step = UNICHAR::utf8_step(str + i);
if (step == 0) {
step = 1;
snprintf(hex, sizeof(hex), "%x", str[i]);
} else {
UNICHAR ch(str + i, step);
snprintf(hex, sizeof(hex), "%x", ch.first_uni());
}
result += hex;
result += " ";
}
result += "]";
return result;
}
// Return a string containing debug information on the unichar, including
// the id_to_unichar, its hex unicodes and the properties.
std::string UNICHARSET::debug_str(UNICHAR_ID id) const {
if (id == INVALID_UNICHAR_ID) {
return std::string(id_to_unichar(id));
}
const CHAR_FRAGMENT *fragment = this->get_fragment(id);
if (fragment) {
return fragment->to_string();
}
const char *str = id_to_unichar(id);
std::string result = debug_utf8_str(str);
// Append a for lower alpha, A for upper alpha, and x if alpha but neither.
if (get_isalpha(id)) {
if (get_islower(id)) {
result += "a";
} else if (get_isupper(id)) {
result += "A";
} else {
result += "x";
}
}
// Append 0 if a digit.
if (get_isdigit(id)) {
result += "0";
}
// Append p is a punctuation symbol.
if (get_ispunctuation(id)) {
result += "p";
}
return result;
}
// Sets the normed_ids vector from the normed string. normed_ids is not
// stored in the file, and needs to be set when the UNICHARSET is loaded.
void UNICHARSET::set_normed_ids(UNICHAR_ID unichar_id) {
unichars[unichar_id].properties.normed_ids.clear();
if (unichar_id == UNICHAR_SPACE && id_to_unichar(unichar_id)[0] == ' ') {
unichars[unichar_id].properties.normed_ids.push_back(UNICHAR_SPACE);
} else if (!encode_string(unichars[unichar_id].properties.normed.c_str(),
true, &unichars[unichar_id].properties.normed_ids,
nullptr, nullptr)) {
unichars[unichar_id].properties.normed_ids.clear();
unichars[unichar_id].properties.normed_ids.push_back(unichar_id);
}
}
// Returns whether the unichar id represents a unicode value in the private use
// area. We use this range only internally to represent uncommon ligatures
// (eg. 'ct') that do not have regular unicode values.
bool UNICHARSET::get_isprivate(UNICHAR_ID unichar_id) const {
UNICHAR uc(id_to_unichar(unichar_id), -1);
int uni = uc.first_uni();
return (uni >= 0xE000 && uni <= 0xF8FF);
}
// Sets all ranges to empty, so they can be expanded to set the values.
void UNICHARSET::set_ranges_empty() {
for (auto &uc : unichars) {
uc.properties.SetRangesEmpty();
}
}
// Sets all the properties for this unicharset given a src unicharset with
// everything set. The unicharsets don't have to be the same, and graphemes
// are correctly accounted for.
void UNICHARSET::PartialSetPropertiesFromOther(int start_index,
const UNICHARSET &src) {
for (unsigned ch = start_index; ch < unichars.size(); ++ch) {
const char *utf8 = id_to_unichar(ch);
UNICHAR_PROPERTIES properties;
if (src.GetStrProperties(utf8, &properties)) {
// Setup the script_id, other_case, and mirror properly.
const char *script = src.get_script_from_script_id(properties.script_id);
properties.script_id = add_script(script);
const char *other_case = src.id_to_unichar(properties.other_case);
if (contains_unichar(other_case)) {
properties.other_case = unichar_to_id(other_case);
} else {
properties.other_case = ch;
}
const char *mirror_str = src.id_to_unichar(properties.mirror);
if (contains_unichar(mirror_str)) {
properties.mirror = unichar_to_id(mirror_str);
} else {
properties.mirror = ch;
}
unichars[ch].properties.CopyFrom(properties);
set_normed_ids(ch);
}
}
}
// Expands the tops and bottoms and widths for this unicharset given a
// src unicharset with ranges in it. The unicharsets don't have to be the
// same, and graphemes are correctly accounted for.
void UNICHARSET::ExpandRangesFromOther(const UNICHARSET &src) {
for (unsigned ch = 0; ch < unichars.size(); ++ch) {
const char *utf8 = id_to_unichar(ch);
UNICHAR_PROPERTIES properties;
if (src.GetStrProperties(utf8, &properties)) {
// Expand just the ranges from properties.
unichars[ch].properties.ExpandRangesFrom(properties);
}
}
}
// Makes this a copy of src. Clears this completely first, so the automatic
// ids will not be present in this if not in src. Does NOT reorder the set!
void UNICHARSET::CopyFrom(const UNICHARSET &src) {
clear();
for (unsigned ch = 0; ch < src.unichars.size(); ++ch) {
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
const char *utf8 = src.id_to_unichar(ch);
unichar_insert_backwards_compatible(utf8);
unichars[ch].properties.ExpandRangesFrom(src_props);
}
// Set properties, including mirror and other_case, WITHOUT reordering
// the unicharset.
PartialSetPropertiesFromOther(0, src);
}
// For each id in src, if it does not occur in this, add it, as in
// SetPropertiesFromOther, otherwise expand the ranges, as in
// ExpandRangesFromOther.
void UNICHARSET::AppendOtherUnicharset(const UNICHARSET &src) {
int initial_used = unichars.size();
for (unsigned ch = 0; ch < src.unichars.size(); ++ch) {
const UNICHAR_PROPERTIES &src_props = src.unichars[ch].properties;
const char *utf8 = src.id_to_unichar(ch);
int id = unichars.size();
if (contains_unichar(utf8)) {
id = unichar_to_id(utf8);
// Just expand current ranges.
unichars[id].properties.ExpandRangesFrom(src_props);
} else {
unichar_insert_backwards_compatible(utf8);
unichars[id].properties.SetRangesEmpty();
}
}
// Set properties, including mirror and other_case, WITHOUT reordering
// the unicharset.
PartialSetPropertiesFromOther(initial_used, src);
}
// Returns true if the acceptable ranges of the tops of the characters do
// not overlap, making their x-height calculations distinct.
bool UNICHARSET::SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const {
int overlap = std::min(unichars[id1].properties.max_top,
unichars[id2].properties.max_top) -
std::max(unichars[id1].properties.min_top,
unichars[id2].properties.min_top);
return overlap <= 0;
}
// Internal recursive version of encode_string above.
// Seeks to encode the given string as a sequence of UNICHAR_IDs such that
// each UNICHAR_ID uses the least possible part of the utf8 str.
// It does this by depth-first tail recursion on increasing length matches
// to the UNICHARSET, saving the first encountered result that encodes the
// maximum total length of str. It stops on a failure to encode to make
// the overall process of encoding a partially failed string more efficient.
// See unicharset.h for definition of the args.
void UNICHARSET::encode_string(const char *str, int str_index, int str_length,
std::vector<UNICHAR_ID> *encoding,
std::vector<char> *lengths,
unsigned *best_total_length,
std::vector<UNICHAR_ID> *best_encoding,
std::vector<char> *best_lengths) const {
if (str_index > static_cast<int>(*best_total_length)) {
// This is the best result so far.
*best_total_length = str_index;
*best_encoding = *encoding;
if (best_lengths != nullptr) {
*best_lengths = *lengths;
}
}
if (str_index == str_length) {
return;
}
int encoding_index = encoding->size();
// Find the length of the first matching unicharset member.
int length = ids.minmatch(str + str_index);
if (length == 0 || str_index + length > str_length) {
return;
}
do {
if (ids.contains(str + str_index, length)) {
// Successful encoding so far.
UNICHAR_ID id = ids.unichar_to_id(str + str_index, length);
encoding->push_back(id);
lengths->push_back(length);
encode_string(str, str_index + length, str_length, encoding, lengths,
best_total_length, best_encoding, best_lengths);
if (static_cast<int>(*best_total_length) == str_length) {
return; // Tail recursion success!
}
// Failed with that length, truncate back and try again.
encoding->resize(encoding_index);
lengths->resize(encoding_index);
}
int step = UNICHAR::utf8_step(str + str_index + length);
if (step == 0) {
step = 1;
}
length += step;
} while (length <= UNICHAR_LEN && str_index + length <= str_length);
}
// Gets the properties for a grapheme string, combining properties for
// multiple characters in a meaningful way where possible.
// Returns false if no valid match was found in the unicharset.
// NOTE that script_id, mirror, and other_case refer to this unicharset on
// return and will need translation if the target unicharset is different.
bool UNICHARSET::GetStrProperties(const char *utf8_str,
UNICHAR_PROPERTIES *props) const {
props->Init();
props->SetRangesEmpty();
int total_unicodes = 0;
std::vector<UNICHAR_ID> encoding;
if (!encode_string(utf8_str, true, &encoding, nullptr, nullptr)) {
return false; // Some part was invalid.
}
for (auto it : encoding) {
int id = it;
const UNICHAR_PROPERTIES &src_props = unichars[id].properties;
// Logical OR all the bools.
if (src_props.isalpha) {
props->isalpha = true;
}
if (src_props.islower) {
props->islower = true;
}
if (src_props.isupper) {
props->isupper = true;
}
if (src_props.isdigit) {
props->isdigit = true;
}
if (src_props.ispunctuation) {
props->ispunctuation = true;
}
if (src_props.isngram) {
props->isngram = true;
}
if (src_props.enabled) {
props->enabled = true;
}
// Min/max the tops/bottoms.
UpdateRange(src_props.min_bottom, &props->min_bottom, &props->max_bottom);
UpdateRange(src_props.max_bottom, &props->min_bottom, &props->max_bottom);
UpdateRange(src_props.min_top, &props->min_top, &props->max_top);
UpdateRange(src_props.max_top, &props->min_top, &props->max_top);
float bearing = props->advance + src_props.bearing;
if (total_unicodes == 0 || bearing < props->bearing) {
props->bearing = bearing;
props->bearing_sd = props->advance_sd + src_props.bearing_sd;
}
props->advance += src_props.advance;
props->advance_sd += src_props.advance_sd;
// With a single width, just use the widths stored in the unicharset.
props->width = src_props.width;
props->width_sd = src_props.width_sd;
// Use the first script id, other_case, mirror, direction.
// Note that these will need translation, except direction.
if (total_unicodes == 0) {
props->script_id = src_props.script_id;
props->other_case = src_props.other_case;
props->mirror = src_props.mirror;
props->direction = src_props.direction;
}
// The normed string for the compound character is the concatenation of
// the normed versions of the individual characters.
props->normed += src_props.normed;
++total_unicodes;
}
if (total_unicodes > 1) {
// Estimate the total widths from the advance - bearing.
props->width = props->advance - props->bearing;
props->width_sd = props->advance_sd + props->bearing_sd;
}
return total_unicodes > 0;
}
// TODO(rays) clean-up the order of functions to match unicharset.h.
unsigned int UNICHARSET::get_properties(UNICHAR_ID id) const {
unsigned int properties = 0;
if (this->get_isalpha(id)) {
properties |= ISALPHA_MASK;
}
if (this->get_islower(id)) {
properties |= ISLOWER_MASK;
}
if (this->get_isupper(id)) {
properties |= ISUPPER_MASK;
}
if (this->get_isdigit(id)) {
properties |= ISDIGIT_MASK;
}
if (this->get_ispunctuation(id)) {
properties |= ISPUNCTUATION_MASK;
}
return properties;
}
char UNICHARSET::get_chartype(UNICHAR_ID id) const {
if (this->get_isupper(id)) {
return 'A';
}
if (this->get_islower(id)) {
return 'a';
}
if (this->get_isalpha(id)) {
return 'x';
}
if (this->get_isdigit(id)) {
return '0';
}
if (this->get_ispunctuation(id)) {
return 'p';
}
return 0;
}
void UNICHARSET::unichar_insert(const char *const unichar_repr,
OldUncleanUnichars old_style) {
if (old_style == OldUncleanUnichars::kTrue) {
old_style_included_ = true;
}
std::string cleaned =
old_style_included_ ? unichar_repr : CleanupString(unichar_repr);
if (!cleaned.empty() && !ids.contains(cleaned.data(), cleaned.size())) {
const char *str = cleaned.c_str();
std::vector<int> encoding;
if (!old_style_included_ &&
encode_string(str, true, &encoding, nullptr, nullptr)) {
return;
}
unichars.emplace_back();
auto &u = unichars.back();
int index = 0;
do {
if (index >= UNICHAR_LEN) {
fprintf(stderr, "Utf8 buffer too big, size>%d for %s\n", UNICHAR_LEN,
unichar_repr);
return;
}
u.representation[index++] = *str++;
} while (*str != '\0');
u.representation[index] = '\0';
this->set_script(unichars.size() - 1, null_script);
// If the given unichar_repr represents a fragmented character, set
// fragment property to a pointer to CHAR_FRAGMENT class instance with
// information parsed from the unichar representation. Use the script
// of the base unichar for the fragmented character if possible.
CHAR_FRAGMENT *frag = CHAR_FRAGMENT::parse_from_string(u.representation);
u.properties.fragment = frag;
if (frag != nullptr && this->contains_unichar(frag->get_unichar())) {
u.properties.script_id = this->get_script(frag->get_unichar());
}
u.properties.enabled = true;
ids.insert(u.representation, unichars.size() - 1);
}
}
bool UNICHARSET::contains_unichar(const char *const unichar_repr) const {
std::string cleaned =
old_style_included_ ? unichar_repr : CleanupString(unichar_repr);
return ids.contains(cleaned.data(), cleaned.size());
}
bool UNICHARSET::contains_unichar(const char *const unichar_repr,
int length) const {
if (length == 0) {
return false;
}
std::string cleaned(unichar_repr, length);
if (!old_style_included_) {
cleaned = CleanupString(unichar_repr, length);
}
return ids.contains(cleaned.data(), cleaned.size());
}
bool UNICHARSET::eq(UNICHAR_ID unichar_id,
const char *const unichar_repr) const {
return strcmp(this->id_to_unichar(unichar_id), unichar_repr) == 0;
}
bool UNICHARSET::save_to_string(std::string &str) const {
const int kFileBufSize = 1024;
char buffer[kFileBufSize + 1];
snprintf(buffer, kFileBufSize, "%zu\n", this->size());
str = buffer;
for (unsigned id = 0; id < this->size(); ++id) {
int min_bottom, max_bottom, min_top, max_top;
get_top_bottom(id, &min_bottom, &max_bottom, &min_top, &max_top);
float width, width_sd;
get_width_stats(id, &width, &width_sd);
float bearing, bearing_sd;
get_bearing_stats(id, &bearing, &bearing_sd);
float advance, advance_sd;
get_advance_stats(id, &advance, &advance_sd);
unsigned int properties = this->get_properties(id);
if (strcmp(this->id_to_unichar(id), " ") == 0) {
snprintf(buffer, kFileBufSize, "%s %x %s %d\n", "NULL", properties,
this->get_script_from_script_id(this->get_script(id)),
this->get_other_case(id));
str += buffer;
} else {
std::ostringstream stream;
stream.imbue(std::locale::classic());
stream << this->id_to_unichar(id) << ' ' << properties << ' '
<< min_bottom << ',' << max_bottom << ',' << min_top << ','
<< max_top << ',' << width << ',' << width_sd << ',' << bearing
<< ',' << bearing_sd << ',' << advance << ',' << advance_sd << ' '
<< this->get_script_from_script_id(this->get_script(id)) << ' '
<< this->get_other_case(id) << ' ' << this->get_direction(id)
<< ' ' << this->get_mirror(id) << ' '
<< this->get_normed_unichar(id) << "\t# "
<< this->debug_str(id).c_str() << '\n';
str += stream.str().c_str();
}
}
return true;
}
class LocalFilePointer {
public:
LocalFilePointer(FILE *stream) : fp_(stream) {}
char *fgets(char *dst, int size) {
return ::fgets(dst, size, fp_);
}
private:
FILE *fp_;
};
bool UNICHARSET::load_from_file(FILE *file, bool skip_fragments) {
LocalFilePointer lfp(file);
using namespace std::placeholders; // for _1, _2
std::function<char *(char *, int)> fgets_cb =
std::bind(&LocalFilePointer::fgets, &lfp, _1, _2);
bool success = load_via_fgets(fgets_cb, skip_fragments);
return success;
}
bool UNICHARSET::load_from_file(tesseract::TFile *file, bool skip_fragments) {
using namespace std::placeholders; // for _1, _2
std::function<char *(char *, int)> fgets_cb =
std::bind(&tesseract::TFile::FGets, file, _1, _2);
bool success = load_via_fgets(fgets_cb, skip_fragments);
return success;
}
bool UNICHARSET::load_via_fgets(
const std::function<char *(char *, int)> &fgets_cb, bool skip_fragments) {
int unicharset_size;
char buffer[256];
this->clear();
if (fgets_cb(buffer, sizeof(buffer)) == nullptr ||
sscanf(buffer, "%d", &unicharset_size) != 1) {
return false;
}
for (UNICHAR_ID id = 0; id < unicharset_size; ++id) {
char unichar[256];
unsigned int properties;
char script[64];
strncpy(script, null_script, sizeof(script) - 1);
int min_bottom = 0;
int max_bottom = UINT8_MAX;
int min_top = 0;
int max_top = UINT8_MAX;
float width = 0.0f;
float width_sd = 0.0f;
float bearing = 0.0f;
float bearing_sd = 0.0f;
float advance = 0.0f;
float advance_sd = 0.0f;
// TODO(eger): check that this default it ok
// after enabling BiDi iterator for Arabic.
int direction = UNICHARSET::U_LEFT_TO_RIGHT;
UNICHAR_ID other_case = unicharset_size;
UNICHAR_ID mirror = unicharset_size;
if (fgets_cb(buffer, sizeof(buffer)) == nullptr) {
return false;
}
char normed[64];
normed[0] = '\0';
std::istringstream stream(buffer);
stream.imbue(std::locale::classic());
// 标 1 0,255,0,255,0,0,0,0,0,0 Han 68 0 68 标 # 标 [6807 ]x
// stream.flags(std::ios::hex);
stream >> std::setw(255) >> unichar >> std::hex >> properties >> std::dec;
// stream.flags(std::ios::dec);
if (stream.fail()) {
fprintf(stderr, "%s:%u failed\n", __FILE__, __LINE__);
return false;
}
auto position = stream.tellg();
stream.seekg(position);
char c1, c2, c3, c4, c5, c6, c7, c8, c9;
stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >>
max_top >> c4 >> width >> c5 >> width_sd >> c6 >> bearing >> c7 >>
bearing_sd >> c8 >> advance >> c9 >> advance_sd >> std::setw(63) >>
script >> other_case >> direction >> mirror >> std::setw(63) >> normed;
if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' ||
c5 != ',' || c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') {
stream.clear();
stream.seekg(position);
stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >>
max_top >> c4 >> width >> c5 >> width_sd >> c6 >> bearing >> c7 >>
bearing_sd >> c8 >> advance >> c9 >> advance_sd >> std::setw(63) >>
script >> other_case >> direction >> mirror;
if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',' || c4 != ',' ||
c5 != ',' || c6 != ',' || c7 != ',' || c8 != ',' || c9 != ',') {
stream.clear();
stream.seekg(position);
stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >>
max_top >> std::setw(63) >> script >> other_case >> direction >>
mirror;
if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',') {
stream.clear();
stream.seekg(position);
stream >> min_bottom >> c1 >> max_bottom >> c2 >> min_top >> c3 >>
max_top >> std::setw(63) >> script >> other_case;
if (stream.fail() || c1 != ',' || c2 != ',' || c3 != ',') {
stream.clear();
stream.seekg(position);
stream >> std::setw(63) >> script >> other_case;
if (stream.fail()) {
stream.clear();
stream.seekg(position);
stream >> std::setw(63) >> script;
}
}
}
}
}
// Skip fragments if needed.
CHAR_FRAGMENT *frag = nullptr;
if (skip_fragments && (frag = CHAR_FRAGMENT::parse_from_string(unichar))) {
int num_pieces = frag->get_total();
delete frag;
// Skip multi-element fragments, but keep singles like UNICHAR_BROKEN in.
if (num_pieces > 1) {
continue;
}
}
// Insert unichar into unicharset and set its properties.
if (strcmp(unichar, "NULL") == 0) {
this->unichar_insert(" ");
} else {
this->unichar_insert_backwards_compatible(unichar);
}
this->set_isalpha(id, properties & ISALPHA_MASK);
this->set_islower(id, properties & ISLOWER_MASK);
this->set_isupper(id, properties & ISUPPER_MASK);
this->set_isdigit(id, properties & ISDIGIT_MASK);
this->set_ispunctuation(id, properties & ISPUNCTUATION_MASK);
this->set_isngram(id, false);
this->set_script(id, script);
this->unichars[id].properties.enabled = true;
this->set_top_bottom(id, min_bottom, max_bottom, min_top, max_top);
this->set_width_stats(id, width, width_sd);
this->set_bearing_stats(id, bearing, bearing_sd);
this->set_advance_stats(id, advance, advance_sd);
this->set_direction(id, static_cast<UNICHARSET::Direction>(direction));
this->set_other_case(id, (other_case < unicharset_size) ? other_case : id);
this->set_mirror(id, (mirror < unicharset_size) ? mirror : id);
this->set_normed(id, normed[0] != '\0' ? normed : unichar);
}
post_load_setup();
return true;
}
// Sets up internal data after loading the file, based on the char
// properties. Called from load_from_file, but also needs to be run
// during set_unicharset_properties.
void UNICHARSET::post_load_setup() {
// Number of alpha chars with the case property minus those without,
// in order to determine that half the alpha chars have case.
int net_case_alphas = 0;
int x_height_alphas = 0;
int cap_height_alphas = 0;
top_bottom_set_ = false;
for (unsigned id = 0; id < unichars.size(); ++id) {
int min_bottom = 0;
int max_bottom = UINT8_MAX;
int min_top = 0;
int max_top = UINT8_MAX;
get_top_bottom(id, &min_bottom, &max_bottom, &min_top, &max_top);
if (min_top > 0) {
top_bottom_set_ = true;
}
if (get_isalpha(id)) {
if (get_islower(id) || get_isupper(id)) {
++net_case_alphas;
} else {
--net_case_alphas;
}
if (min_top < kMeanlineThreshold && max_top < kMeanlineThreshold) {
++x_height_alphas;
} else if (min_top > kMeanlineThreshold && max_top > kMeanlineThreshold) {
++cap_height_alphas;
}
}
set_normed_ids(id);
}
script_has_upper_lower_ = net_case_alphas > 0;
script_has_xheight_ =
script_has_upper_lower_ ||
(x_height_alphas > cap_height_alphas * kMinXHeightFraction &&
cap_height_alphas > x_height_alphas * kMinCapHeightFraction);
null_sid_ = get_script_id_from_name(null_script);
ASSERT_HOST(null_sid_ == 0);
common_sid_ = get_script_id_from_name("Common");
latin_sid_ = get_script_id_from_name("Latin");
cyrillic_sid_ = get_script_id_from_name("Cyrillic");
greek_sid_ = get_script_id_from_name("Greek");
han_sid_ = get_script_id_from_name("Han");
hiragana_sid_ = get_script_id_from_name("Hiragana");
katakana_sid_ = get_script_id_from_name("Katakana");
thai_sid_ = get_script_id_from_name("Thai");
hangul_sid_ = get_script_id_from_name("Hangul");
// Compute default script. Use the highest-counting alpha script, that is
// not the common script, as that still contains some "alphas".
int *script_counts = new int[script_table_size_used];
memset(script_counts, 0, sizeof(*script_counts) * script_table_size_used);
for (unsigned id = 0; id < unichars.size(); ++id) {
if (get_isalpha(id)) {
++script_counts[get_script(id)];
}
}
default_sid_ = 0;
for (int s = 1; s < script_table_size_used; ++s) {
if (script_counts[s] > script_counts[default_sid_] && s != common_sid_) {
default_sid_ = s;
}
}
delete[] script_counts;
}
// Returns true if right_to_left scripts are significant in the unicharset,
// but without being so sensitive that "universal" unicharsets containing
// characters from many scripts, like orientation and script detection,
// look like they are right_to_left.
bool UNICHARSET::major_right_to_left() const {
int ltr_count = 0;
int rtl_count = 0;
for (unsigned id = 0; id < unichars.size(); ++id) {
int dir = get_direction(id);
if (dir == UNICHARSET::U_LEFT_TO_RIGHT) {
ltr_count++;
}
if (dir == UNICHARSET::U_RIGHT_TO_LEFT ||
dir == UNICHARSET::U_RIGHT_TO_LEFT_ARABIC ||
dir == UNICHARSET::U_ARABIC_NUMBER) {
rtl_count++;
}
}
return rtl_count > ltr_count;
}
// Set a whitelist and/or blacklist of characters to recognize.
// An empty or nullptr whitelist enables everything (minus any blacklist).
// An empty or nullptr blacklist disables nothing.
// An empty or nullptr unblacklist has no effect.
void UNICHARSET::set_black_and_whitelist(const char *blacklist,
const char *whitelist,
const char *unblacklist) {
bool def_enabled = whitelist == nullptr || whitelist[0] == '\0';
// Set everything to default
for (auto &uc : unichars) {
uc.properties.enabled = def_enabled;
}
if (!def_enabled) {
// Enable the whitelist.
std::vector<UNICHAR_ID> encoding;
encode_string(whitelist, false, &encoding, nullptr, nullptr);
for (auto it : encoding) {
if (it != INVALID_UNICHAR_ID) {
unichars[it].properties.enabled = true;
}
}
}
if (blacklist != nullptr && blacklist[0] != '\0') {
// Disable the blacklist.
std::vector<UNICHAR_ID> encoding;
encode_string(blacklist, false, &encoding, nullptr, nullptr);
for (auto it : encoding) {
if (it != INVALID_UNICHAR_ID) {
unichars[it].properties.enabled = false;
}
}
}
if (unblacklist != nullptr && unblacklist[0] != '\0') {
// Re-enable the unblacklist.
std::vector<UNICHAR_ID> encoding;
encode_string(unblacklist, false, &encoding, nullptr, nullptr);
for (auto it : encoding) {
if (it != INVALID_UNICHAR_ID) {
unichars[it].properties.enabled = true;
}
}
}
}
// Returns true if there are any repeated unicodes in the normalized
// text of any unichar-id in the unicharset.
bool UNICHARSET::AnyRepeatedUnicodes() const {
int start_id = 0;
if (has_special_codes()) {
start_id = SPECIAL_UNICHAR_CODES_COUNT;
}
for (unsigned id = start_id; id < unichars.size(); ++id) {
// Convert to unicodes.
std::vector<char32> unicodes = UNICHAR::UTF8ToUTF32(get_normed_unichar(id));
for (size_t u = 1; u < unicodes.size(); ++u) {
if (unicodes[u - 1] == unicodes[u]) {
return true;
}
}
}
return false;
}
int UNICHARSET::add_script(const char *script) {
for (int i = 0; i < script_table_size_used; ++i) {
if (strcmp(script, script_table[i]) == 0) {
return i;
}
}
if (script_table_size_reserved == 0) {
script_table_size_reserved = 8;
script_table = new char *[script_table_size_reserved];
} else if (script_table_size_used >= script_table_size_reserved) {
assert(script_table_size_used == script_table_size_reserved);
script_table_size_reserved += script_table_size_reserved;
char **new_script_table = new char *[script_table_size_reserved];
memcpy(new_script_table, script_table,
script_table_size_used * sizeof(char *));
delete[] script_table;
script_table = new_script_table;
}
script_table[script_table_size_used] = new char[strlen(script) + 1];
strcpy(script_table[script_table_size_used], script);
return script_table_size_used++;
}
// Returns the string that represents a fragment
// with the given unichar, pos and total.
std::string CHAR_FRAGMENT::to_string(const char *unichar, int pos, int total,
bool natural) {
if (total == 1) {
return std::string(unichar);
}
std::string result;
result += kSeparator;
result += unichar;
char buffer[kMaxLen];
snprintf(buffer, kMaxLen, "%c%d%c%d", kSeparator, pos,
natural ? kNaturalFlag : kSeparator, total);
result += buffer;
return result;
}
CHAR_FRAGMENT *CHAR_FRAGMENT::parse_from_string(const char *string) {
const char *ptr = string;
int len = strlen(string);
if (len < kMinLen || *ptr != kSeparator) {
return nullptr; // this string cannot represent a fragment
}
ptr++; // move to the next character
int step = 0;
while ((ptr + step) < (string + len) && *(ptr + step) != kSeparator) {
step += UNICHAR::utf8_step(ptr + step);
}
if (step == 0 || step > UNICHAR_LEN) {
return nullptr; // no character for unichar or the character is too long
}
char unichar[UNICHAR_LEN + 1];
strncpy(unichar, ptr, step);
unichar[step] = '\0'; // null terminate unichar
ptr += step; // move to the next fragment separator
int pos = 0;
int total = 0;
bool natural = false;
char *end_ptr = nullptr;
for (int i = 0; i < 2; i++) {
if (ptr > string + len || *ptr != kSeparator) {
if (i == 1 && *ptr == kNaturalFlag) {
natural = true;
} else {
return nullptr; // Failed to parse fragment representation.
}
}
ptr++; // move to the next character
i == 0 ? pos = static_cast<int>(strtol(ptr, &end_ptr, 10))
: total = static_cast<int>(strtol(ptr, &end_ptr, 10));
ptr = end_ptr;
}
if (ptr != string + len) {
return nullptr; // malformed fragment representation
}
auto *fragment = new CHAR_FRAGMENT();
fragment->set_all(unichar, pos, total, natural);
return fragment;
}
int UNICHARSET::get_script_id_from_name(const char *script_name) const {
for (int i = 0; i < script_table_size_used; ++i) {
if (strcmp(script_name, script_table[i]) == 0) {
return i;
}
}
return 0; // 0 is always the null_script
}
// Removes/replaces content that belongs in rendered text, but not in the
// unicharset.
/* static */
std::string UNICHARSET::CleanupString(const char *utf8_str, size_t length) {
std::string result;
result.reserve(length);
char ch;
while ((ch = *utf8_str) != '\0' && length-- > 0) {
int key_index = 0;
const char *key;
while ((key = kCleanupMaps[key_index][0]) != nullptr) {
int match = 0;
while (key[match] != '\0' && key[match] == utf8_str[match]) {
++match;
}
if (key[match] == '\0') {
utf8_str += match;
break;
}
++key_index;
}
if (key == nullptr) {
result.push_back(ch);
++utf8_str;
} else {
result.append(kCleanupMaps[key_index][1]);
}
}
return result;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/ccutil/unicharset.cpp
|
C++
|
apache-2.0
| 42,266
|
///////////////////////////////////////////////////////////////////////
// File: unicharset.h
// Description: Unicode character/ligature set class.
// Author: Thomas Kielbus
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_UNICHARSET_H_
#define TESSERACT_CCUTIL_UNICHARSET_H_
#include "errcode.h"
#include "unicharmap.h"
#include <tesseract/unichar.h>
#include "helpers.h"
#include "serialis.h"
#include <functional> // for std::function
namespace tesseract {
// Enum holding special values of unichar_id. Every unicharset has these.
// Warning! Keep in sync with kSpecialUnicharCodes.
enum SpecialUnicharCodes {
UNICHAR_SPACE,
UNICHAR_JOINED,
UNICHAR_BROKEN,
SPECIAL_UNICHAR_CODES_COUNT
};
// Boolean flag for unichar_insert. It's a bit of a double negative to allow
// the default value to be false.
enum class OldUncleanUnichars {
kFalse,
kTrue,
};
class TESS_API CHAR_FRAGMENT {
public:
// Minimum number of characters used for fragment representation.
static const int kMinLen = 6;
// Maximum number of characters used for fragment representation.
static const int kMaxLen = 3 + UNICHAR_LEN + 2;
// Maximum number of fragments per character.
static const int kMaxChunks = 5;
// Setters and Getters.
inline void set_all(const char *unichar, int pos, int total, bool natural) {
set_unichar(unichar);
set_pos(pos);
set_total(total);
set_natural(natural);
}
inline void set_unichar(const char *uch) {
strncpy(this->unichar, uch, sizeof(this->unichar));
this->unichar[UNICHAR_LEN] = '\0';
}
inline void set_pos(int p) {
this->pos = p;
}
inline void set_total(int t) {
this->total = t;
}
inline const char *get_unichar() const {
return this->unichar;
}
inline int get_pos() const {
return this->pos;
}
inline int get_total() const {
return this->total;
}
// Returns the string that represents a fragment
// with the given unichar, pos and total.
static std::string to_string(const char *unichar, int pos, int total,
bool natural);
// Returns the string that represents this fragment.
std::string to_string() const {
return to_string(unichar, pos, total, natural);
}
// Checks whether a fragment has the same unichar,
// position and total as the given inputs.
inline bool equals(const char *other_unichar, int other_pos,
int other_total) const {
return (strcmp(this->unichar, other_unichar) == 0 &&
this->pos == other_pos && this->total == other_total);
}
inline bool equals(const CHAR_FRAGMENT *other) const {
return this->equals(other->get_unichar(), other->get_pos(),
other->get_total());
}
// Checks whether a given fragment is a continuation of this fragment.
// Assumes that the given fragment pointer is not nullptr.
inline bool is_continuation_of(const CHAR_FRAGMENT *fragment) const {
return (strcmp(this->unichar, fragment->get_unichar()) == 0 &&
this->total == fragment->get_total() &&
this->pos == fragment->get_pos() + 1);
}
// Returns true if this fragment is a beginning fragment.
inline bool is_beginning() const {
return this->pos == 0;
}
// Returns true if this fragment is an ending fragment.
inline bool is_ending() const {
return this->pos == this->total - 1;
}
// Returns true if the fragment was a separate component to begin with,
// ie did not need chopping to be isolated, but may have been separated
// out from a multi-outline blob.
inline bool is_natural() const {
return natural;
}
void set_natural(bool value) {
natural = value;
}
// Parses the string to see whether it represents a character fragment
// (rather than a regular character). If so, allocates memory for a new
// CHAR_FRAGMENT instance and fills it in with the corresponding fragment
// information. Fragments are of the form:
// |m|1|2, meaning chunk 1 of 2 of character m, or
// |:|1n2, meaning chunk 1 of 2 of character :, and no chopping was needed
// to divide the parts, as they were already separate connected components.
//
// If parsing succeeded returns the pointer to the allocated CHAR_FRAGMENT
// instance, otherwise (if the string does not represent a fragment or it
// looks like it does, but parsing it as a fragment fails) returns nullptr.
//
// Note: The caller is responsible for deallocating memory
// associated with the returned pointer.
static CHAR_FRAGMENT *parse_from_string(const char *str);
private:
char unichar[UNICHAR_LEN + 1];
// True if the fragment was a separate component to begin with,
// ie did not need chopping to be isolated, but may have been separated
// out from a multi-outline blob.
bool natural;
int16_t pos; // fragment position in the character
int16_t total; // total number of fragments in the character
};
// The UNICHARSET class is an utility class for Tesseract that holds the
// set of characters that are used by the engine. Each character is identified
// by a unique number, from 0 to (size - 1).
class TESS_API UNICHARSET {
public:
// Custom list of characters and their ligature forms (UTF8)
// These map to unicode values in the private use area (PUC) and are supported
// by only few font families (eg. Wyld, Adobe Caslon Pro).
static const char *kCustomLigatures[][2];
// List of strings for the SpecialUnicharCodes. Keep in sync with the enum.
static const char *kSpecialUnicharCodes[SPECIAL_UNICHAR_CODES_COUNT];
// ICU 2.0 UCharDirection enum (from icu/include/unicode/uchar.h)
enum Direction {
U_LEFT_TO_RIGHT = 0,
U_RIGHT_TO_LEFT = 1,
U_EUROPEAN_NUMBER = 2,
U_EUROPEAN_NUMBER_SEPARATOR = 3,
U_EUROPEAN_NUMBER_TERMINATOR = 4,
U_ARABIC_NUMBER = 5,
U_COMMON_NUMBER_SEPARATOR = 6,
U_BLOCK_SEPARATOR = 7,
U_SEGMENT_SEPARATOR = 8,
U_WHITE_SPACE_NEUTRAL = 9,
U_OTHER_NEUTRAL = 10,
U_LEFT_TO_RIGHT_EMBEDDING = 11,
U_LEFT_TO_RIGHT_OVERRIDE = 12,
U_RIGHT_TO_LEFT_ARABIC = 13,
U_RIGHT_TO_LEFT_EMBEDDING = 14,
U_RIGHT_TO_LEFT_OVERRIDE = 15,
U_POP_DIRECTIONAL_FORMAT = 16,
U_DIR_NON_SPACING_MARK = 17,
U_BOUNDARY_NEUTRAL = 18,
U_FIRST_STRONG_ISOLATE = 19,
U_LEFT_TO_RIGHT_ISOLATE = 20,
U_RIGHT_TO_LEFT_ISOLATE = 21,
U_POP_DIRECTIONAL_ISOLATE = 22,
#ifndef U_HIDE_DEPRECATED_API
U_CHAR_DIRECTION_COUNT
#endif // U_HIDE_DEPRECATED_API
};
// Create an empty UNICHARSET
UNICHARSET();
~UNICHARSET();
// Return the UNICHAR_ID of a given unichar representation within the
// UNICHARSET.
UNICHAR_ID unichar_to_id(const char *const unichar_repr) const;
// Return the UNICHAR_ID of a given unichar representation within the
// UNICHARSET. Only the first length characters from unichar_repr are used.
UNICHAR_ID unichar_to_id(const char *const unichar_repr, int length) const;
// Return the minimum number of bytes that matches a legal UNICHAR_ID,
// while leaving the rest of the string encodable. Returns 0 if the
// beginning of the string is not encodable.
// WARNING: this function now encodes the whole string for precision.
// Use encode_string in preference to repeatedly calling step.
int step(const char *str) const;
// Returns true if the given UTF-8 string is encodable with this UNICHARSET.
// If not encodable, write the first byte offset which cannot be converted
// into the second (return) argument.
bool encodable_string(const char *str, unsigned *first_bad_position) const;
// Encodes the given UTF-8 string with this UNICHARSET.
// Any part of the string that cannot be encoded (because the utf8 can't
// be broken up into pieces that are in the unicharset) then:
// if give_up_on_failure, stops and returns a partial encoding,
// else continues and inserts an INVALID_UNICHAR_ID in the returned encoding.
// Returns true if the encoding succeeds completely, false if there is at
// least one failure.
// If lengths is not nullptr, then it is filled with the corresponding
// byte length of each encoded UNICHAR_ID.
// If encoded_length is not nullptr then on return it contains the length of
// str that was encoded. (if give_up_on_failure the location of the first
// failure, otherwise strlen(str).)
// WARNING: Caller must guarantee that str has already been cleaned of codes
// that do not belong in the unicharset, or encoding may fail.
// Use CleanupString to perform the cleaning.
bool encode_string(const char *str, bool give_up_on_failure,
std::vector<UNICHAR_ID> *encoding,
std::vector<char> *lengths,
unsigned *encoded_length) const;
// Return the unichar representation corresponding to the given UNICHAR_ID
// within the UNICHARSET.
const char *id_to_unichar(UNICHAR_ID id) const;
// Return the UTF8 representation corresponding to the given UNICHAR_ID after
// resolving any private encodings internal to Tesseract. This method is
// preferable to id_to_unichar for outputting text that will be visible to
// external applications.
const char *id_to_unichar_ext(UNICHAR_ID id) const;
// Return a string that reformats the utf8 str into the str followed
// by its hex unicodes.
static std::string debug_utf8_str(const char *str);
// Removes/replaces content that belongs in rendered text, but not in the
// unicharset.
static std::string CleanupString(const char *utf8_str) {
return CleanupString(utf8_str, strlen(utf8_str));
}
static std::string CleanupString(const char *utf8_str, size_t length);
// Return a string containing debug information on the unichar, including
// the id_to_unichar, its hex unicodes and the properties.
std::string debug_str(UNICHAR_ID id) const;
std::string debug_str(const char *unichar_repr) const {
return debug_str(unichar_to_id(unichar_repr));
}
// Adds a unichar representation to the set. If old_style is true, then
// TATWEEL characters are kept and n-grams are allowed. Otherwise TATWEEL
// characters are ignored/skipped as if they don't exist and n-grams that
// can already be encoded are not added.
void unichar_insert(const char *const unichar_repr,
OldUncleanUnichars old_style);
void unichar_insert(const char *const unichar_repr) {
unichar_insert(unichar_repr, OldUncleanUnichars::kFalse);
}
// Adds a unichar representation to the set. Avoids setting old_style to true,
// unless it is necessary to make the new unichar get added.
void unichar_insert_backwards_compatible(const char *const unichar_repr) {
std::string cleaned = CleanupString(unichar_repr);
if (cleaned != unichar_repr) {
unichar_insert(unichar_repr, OldUncleanUnichars::kTrue);
} else {
auto old_size = size();
unichar_insert(unichar_repr, OldUncleanUnichars::kFalse);
if (size() == old_size) {
unichar_insert(unichar_repr, OldUncleanUnichars::kTrue);
}
}
}
// Return true if the given unichar id exists within the set.
// Relies on the fact that unichar ids are contiguous in the unicharset.
bool contains_unichar_id(UNICHAR_ID unichar_id) const {
return static_cast<size_t>(unichar_id) < unichars.size();
}
// Return true if the given unichar representation exists within the set.
bool contains_unichar(const char *const unichar_repr) const;
bool contains_unichar(const char *const unichar_repr, int length) const;
// Return true if the given unichar representation corresponds to the given
// UNICHAR_ID within the set.
bool eq(UNICHAR_ID unichar_id, const char *const unichar_repr) const;
// Delete CHAR_FRAGMENTs stored in properties of unichars array.
void delete_pointers_in_unichars() {
for (auto &unichar : unichars) {
delete unichar.properties.fragment;
unichar.properties.fragment = nullptr;
}
}
// Clear the UNICHARSET (all the previous data is lost).
void clear() {
if (script_table != nullptr) {
for (int i = 0; i < script_table_size_used; ++i) {
delete[] script_table[i];
}
delete[] script_table;
script_table = nullptr;
script_table_size_used = 0;
}
script_table_size_reserved = 0;
delete_pointers_in_unichars();
unichars.clear();
ids.clear();
top_bottom_set_ = false;
script_has_upper_lower_ = false;
script_has_xheight_ = false;
old_style_included_ = false;
null_sid_ = 0;
common_sid_ = 0;
latin_sid_ = 0;
cyrillic_sid_ = 0;
greek_sid_ = 0;
han_sid_ = 0;
hiragana_sid_ = 0;
katakana_sid_ = 0;
thai_sid_ = 0;
hangul_sid_ = 0;
default_sid_ = 0;
}
// Return the size of the set (the number of different UNICHAR it holds).
size_t size() const {
return unichars.size();
}
// Opens the file indicated by filename and saves unicharset to that file.
// Returns true if the operation is successful.
bool save_to_file(const char *const filename) const {
FILE *file = fopen(filename, "w+b");
if (file == nullptr) {
return false;
}
bool result = save_to_file(file);
fclose(file);
return result;
}
// Saves the content of the UNICHARSET to the given file.
// Returns true if the operation is successful.
bool save_to_file(FILE *file) const {
std::string str;
return save_to_string(str) &&
tesseract::Serialize(file, &str[0], str.length());
}
bool save_to_file(tesseract::TFile *file) const {
std::string str;
return save_to_string(str) && file->Serialize(&str[0], str.length());
}
// Saves the content of the UNICHARSET to the given string.
// Returns true if the operation is successful.
bool save_to_string(std::string &str) const;
// Opens the file indicated by filename and loads the UNICHARSET
// from the given file. The previous data is lost.
// Returns true if the operation is successful.
bool load_from_file(const char *const filename, bool skip_fragments) {
FILE *file = fopen(filename, "rb");
if (file == nullptr) {
return false;
}
bool result = load_from_file(file, skip_fragments);
fclose(file);
return result;
}
// returns true if the operation is successful.
bool load_from_file(const char *const filename) {
return load_from_file(filename, false);
}
// Loads the UNICHARSET from the given file. The previous data is lost.
// Returns true if the operation is successful.
bool load_from_file(FILE *file, bool skip_fragments);
bool load_from_file(FILE *file) {
return load_from_file(file, false);
}
bool load_from_file(tesseract::TFile *file, bool skip_fragments);
// Sets up internal data after loading the file, based on the char
// properties. Called from load_from_file, but also needs to be run
// during set_unicharset_properties.
void post_load_setup();
// Returns true if right_to_left scripts are significant in the unicharset,
// but without being so sensitive that "universal" unicharsets containing
// characters from many scripts, like orientation and script detection,
// look like they are right_to_left.
bool major_right_to_left() const;
// Set a whitelist and/or blacklist of characters to recognize.
// An empty or nullptr whitelist enables everything (minus any blacklist).
// An empty or nullptr blacklist disables nothing.
// An empty or nullptr unblacklist has no effect.
// The blacklist overrides the whitelist.
// The unblacklist overrides the blacklist.
// Each list is a string of utf8 character strings. Boundaries between
// unicharset units are worked out automatically, and characters not in
// the unicharset are silently ignored.
void set_black_and_whitelist(const char *blacklist, const char *whitelist,
const char *unblacklist);
// Set the isalpha property of the given unichar to the given value.
void set_isalpha(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.isalpha = value;
}
// Set the islower property of the given unichar to the given value.
void set_islower(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.islower = value;
}
// Set the isupper property of the given unichar to the given value.
void set_isupper(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.isupper = value;
}
// Set the isdigit property of the given unichar to the given value.
void set_isdigit(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.isdigit = value;
}
// Set the ispunctuation property of the given unichar to the given value.
void set_ispunctuation(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.ispunctuation = value;
}
// Set the isngram property of the given unichar to the given value.
void set_isngram(UNICHAR_ID unichar_id, bool value) {
unichars[unichar_id].properties.isngram = value;
}
// Set the script name of the given unichar to the given value.
// Value is copied and thus can be a temporary;
void set_script(UNICHAR_ID unichar_id, const char *value) {
unichars[unichar_id].properties.script_id = add_script(value);
}
// Set other_case unichar id in the properties for the given unichar id.
void set_other_case(UNICHAR_ID unichar_id, UNICHAR_ID other_case) {
unichars[unichar_id].properties.other_case = other_case;
}
// Set the direction property of the given unichar to the given value.
void set_direction(UNICHAR_ID unichar_id, UNICHARSET::Direction value) {
unichars[unichar_id].properties.direction = value;
}
// Set mirror unichar id in the properties for the given unichar id.
void set_mirror(UNICHAR_ID unichar_id, UNICHAR_ID mirror) {
unichars[unichar_id].properties.mirror = mirror;
}
// Record normalized version of unichar with the given unichar_id.
void set_normed(UNICHAR_ID unichar_id, const char *normed) {
unichars[unichar_id].properties.normed = normed;
unichars[unichar_id].properties.normed_ids.clear();
}
// Sets the normed_ids vector from the normed string. normed_ids is not
// stored in the file, and needs to be set when the UNICHARSET is loaded.
void set_normed_ids(UNICHAR_ID unichar_id);
// Return the isalpha property of the given unichar.
bool get_isalpha(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.isalpha;
}
// Return the islower property of the given unichar.
bool get_islower(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.islower;
}
// Return the isupper property of the given unichar.
bool get_isupper(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.isupper;
}
// Return the isdigit property of the given unichar.
bool get_isdigit(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.isdigit;
}
// Return the ispunctuation property of the given unichar.
bool get_ispunctuation(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.ispunctuation;
}
// Return the isngram property of the given unichar.
bool get_isngram(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return false;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.isngram;
}
// Returns whether the unichar id represents a unicode value in the private
// use area.
bool get_isprivate(UNICHAR_ID unichar_id) const;
// Returns true if the ids have useful min/max top/bottom values.
bool top_bottom_useful() const {
return top_bottom_set_;
}
// Sets all ranges to empty, so they can be expanded to set the values.
void set_ranges_empty();
// Sets all the properties for this unicharset given a src_unicharset with
// everything set. The unicharsets don't have to be the same, and graphemes
// are correctly accounted for.
void SetPropertiesFromOther(const UNICHARSET &src) {
PartialSetPropertiesFromOther(0, src);
}
// Sets properties from Other, starting only at the given index.
void PartialSetPropertiesFromOther(int start_index, const UNICHARSET &src);
// Expands the tops and bottoms and widths for this unicharset given a
// src_unicharset with ranges in it. The unicharsets don't have to be the
// same, and graphemes are correctly accounted for.
void ExpandRangesFromOther(const UNICHARSET &src);
// Makes this a copy of src. Clears this completely first, so the automattic
// ids will not be present in this if not in src.
void CopyFrom(const UNICHARSET &src);
// For each id in src, if it does not occur in this, add it, as in
// SetPropertiesFromOther, otherwise expand the ranges, as in
// ExpandRangesFromOther.
void AppendOtherUnicharset(const UNICHARSET &src);
// Returns true if the acceptable ranges of the tops of the characters do
// not overlap, making their x-height calculations distinct.
bool SizesDistinct(UNICHAR_ID id1, UNICHAR_ID id2) const;
// Returns the min and max bottom and top of the given unichar in
// baseline-normalized coordinates, ie, where the baseline is
// kBlnBaselineOffset and the meanline is kBlnBaselineOffset + kBlnXHeight
// (See normalis.h for the definitions).
void get_top_bottom(UNICHAR_ID unichar_id, int *min_bottom, int *max_bottom,
int *min_top, int *max_top) const {
if (INVALID_UNICHAR_ID == unichar_id) {
*min_bottom = *min_top = 0;
*max_bottom = *max_top = 256; // kBlnCellHeight
return;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
*min_bottom = unichars[unichar_id].properties.min_bottom;
*max_bottom = unichars[unichar_id].properties.max_bottom;
*min_top = unichars[unichar_id].properties.min_top;
*max_top = unichars[unichar_id].properties.max_top;
}
void set_top_bottom(UNICHAR_ID unichar_id, int min_bottom, int max_bottom,
int min_top, int max_top) {
unichars[unichar_id].properties.min_bottom =
ClipToRange<int>(min_bottom, 0, UINT8_MAX);
unichars[unichar_id].properties.max_bottom =
ClipToRange<int>(max_bottom, 0, UINT8_MAX);
unichars[unichar_id].properties.min_top =
ClipToRange<int>(min_top, 0, UINT8_MAX);
unichars[unichar_id].properties.max_top =
ClipToRange<int>(max_top, 0, UINT8_MAX);
}
// Returns the width stats (as mean, sd) of the given unichar relative to the
// median advance of all characters in the character set.
void get_width_stats(UNICHAR_ID unichar_id, float *width,
float *width_sd) const {
if (INVALID_UNICHAR_ID == unichar_id) {
*width = 0.0f;
*width_sd = 0.0f;
return;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
*width = unichars[unichar_id].properties.width;
*width_sd = unichars[unichar_id].properties.width_sd;
}
void set_width_stats(UNICHAR_ID unichar_id, float width, float width_sd) {
unichars[unichar_id].properties.width = width;
unichars[unichar_id].properties.width_sd = width_sd;
}
// Returns the stats of the x-bearing (as mean, sd) of the given unichar
// relative to the median advance of all characters in the character set.
void get_bearing_stats(UNICHAR_ID unichar_id, float *bearing,
float *bearing_sd) const {
if (INVALID_UNICHAR_ID == unichar_id) {
*bearing = *bearing_sd = 0.0f;
return;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
*bearing = unichars[unichar_id].properties.bearing;
*bearing_sd = unichars[unichar_id].properties.bearing_sd;
}
void set_bearing_stats(UNICHAR_ID unichar_id, float bearing,
float bearing_sd) {
unichars[unichar_id].properties.bearing = bearing;
unichars[unichar_id].properties.bearing_sd = bearing_sd;
}
// Returns the stats of the x-advance of the given unichar (as mean, sd)
// relative to the median advance of all characters in the character set.
void get_advance_stats(UNICHAR_ID unichar_id, float *advance,
float *advance_sd) const {
if (INVALID_UNICHAR_ID == unichar_id) {
*advance = *advance_sd = 0;
return;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
*advance = unichars[unichar_id].properties.advance;
*advance_sd = unichars[unichar_id].properties.advance_sd;
}
void set_advance_stats(UNICHAR_ID unichar_id, float advance,
float advance_sd) {
unichars[unichar_id].properties.advance = advance;
unichars[unichar_id].properties.advance_sd = advance_sd;
}
// Returns true if the font metrics properties are empty.
bool PropertiesIncomplete(UNICHAR_ID unichar_id) const {
return unichars[unichar_id].properties.AnyRangeEmpty();
}
// Returns true if the script of the given id is space delimited.
// Returns false for Han and Thai scripts.
bool IsSpaceDelimited(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return true;
}
int script_id = get_script(unichar_id);
return script_id != han_sid_ && script_id != thai_sid_ &&
script_id != hangul_sid_ && script_id != hiragana_sid_ &&
script_id != katakana_sid_;
}
// Return the script name of the given unichar.
// The returned pointer will always be the same for the same script, it's
// managed by unicharset and thus MUST NOT be deleted
int get_script(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return null_sid_;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.script_id;
}
// Return the character properties, eg. alpha/upper/lower/digit/punct,
// as a bit field of unsigned int.
unsigned int get_properties(UNICHAR_ID unichar_id) const;
// Return the character property as a single char. If a character has
// multiple attributes, the main property is defined by the following order:
// upper_case : 'A'
// lower_case : 'a'
// alpha : 'x'
// digit : '0'
// punctuation: 'p'
char get_chartype(UNICHAR_ID unichar_id) const;
// Get other_case unichar id in the properties for the given unichar id.
UNICHAR_ID get_other_case(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return INVALID_UNICHAR_ID;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.other_case;
}
// Returns the direction property of the given unichar.
Direction get_direction(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return UNICHARSET::U_OTHER_NEUTRAL;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.direction;
}
// Get mirror unichar id in the properties for the given unichar id.
UNICHAR_ID get_mirror(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return INVALID_UNICHAR_ID;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.mirror;
}
// Returns UNICHAR_ID of the corresponding lower-case unichar.
UNICHAR_ID to_lower(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return INVALID_UNICHAR_ID;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
if (unichars[unichar_id].properties.islower) {
return unichar_id;
}
return unichars[unichar_id].properties.other_case;
}
// Returns UNICHAR_ID of the corresponding upper-case unichar.
UNICHAR_ID to_upper(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return INVALID_UNICHAR_ID;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
if (unichars[unichar_id].properties.isupper) {
return unichar_id;
}
return unichars[unichar_id].properties.other_case;
}
// Returns true if this UNICHARSET has the special codes in
// SpecialUnicharCodes available. If false then there are normal unichars
// at these codes and they should not be used.
bool has_special_codes() const {
return get_fragment(UNICHAR_BROKEN) != nullptr &&
strcmp(id_to_unichar(UNICHAR_BROKEN),
kSpecialUnicharCodes[UNICHAR_BROKEN]) == 0;
}
// Returns true if there are any repeated unicodes in the normalized
// text of any unichar-id in the unicharset.
bool AnyRepeatedUnicodes() const;
// Return a pointer to the CHAR_FRAGMENT class if the given
// unichar id represents a character fragment.
const CHAR_FRAGMENT *get_fragment(UNICHAR_ID unichar_id) const {
if (INVALID_UNICHAR_ID == unichar_id) {
return nullptr;
}
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.fragment;
}
// Return the isalpha property of the given unichar representation.
bool get_isalpha(const char *const unichar_repr) const {
return get_isalpha(unichar_to_id(unichar_repr));
}
// Return the islower property of the given unichar representation.
bool get_islower(const char *const unichar_repr) const {
return get_islower(unichar_to_id(unichar_repr));
}
// Return the isupper property of the given unichar representation.
bool get_isupper(const char *const unichar_repr) const {
return get_isupper(unichar_to_id(unichar_repr));
}
// Return the isdigit property of the given unichar representation.
bool get_isdigit(const char *const unichar_repr) const {
return get_isdigit(unichar_to_id(unichar_repr));
}
// Return the ispunctuation property of the given unichar representation.
bool get_ispunctuation(const char *const unichar_repr) const {
return get_ispunctuation(unichar_to_id(unichar_repr));
}
// Return the character properties, eg. alpha/upper/lower/digit/punct,
// of the given unichar representation
unsigned int get_properties(const char *const unichar_repr) const {
return get_properties(unichar_to_id(unichar_repr));
}
char get_chartype(const char *const unichar_repr) const {
return get_chartype(unichar_to_id(unichar_repr));
}
// Return the script name of the given unichar representation.
// The returned pointer will always be the same for the same script, it's
// managed by unicharset and thus MUST NOT be deleted
int get_script(const char *const unichar_repr) const {
return get_script(unichar_to_id(unichar_repr));
}
// Return a pointer to the CHAR_FRAGMENT class struct if the given
// unichar representation represents a character fragment.
const CHAR_FRAGMENT *get_fragment(const char *const unichar_repr) const {
if (unichar_repr == nullptr || unichar_repr[0] == '\0' ||
!ids.contains(unichar_repr, false)) {
return nullptr;
}
return get_fragment(unichar_to_id(unichar_repr));
}
// Return the isalpha property of the given unichar representation.
// Only the first length characters from unichar_repr are used.
bool get_isalpha(const char *const unichar_repr, int length) const {
return get_isalpha(unichar_to_id(unichar_repr, length));
}
// Return the islower property of the given unichar representation.
// Only the first length characters from unichar_repr are used.
bool get_islower(const char *const unichar_repr, int length) const {
return get_islower(unichar_to_id(unichar_repr, length));
}
// Return the isupper property of the given unichar representation.
// Only the first length characters from unichar_repr are used.
bool get_isupper(const char *const unichar_repr, int length) const {
return get_isupper(unichar_to_id(unichar_repr, length));
}
// Return the isdigit property of the given unichar representation.
// Only the first length characters from unichar_repr are used.
bool get_isdigit(const char *const unichar_repr, int length) const {
return get_isdigit(unichar_to_id(unichar_repr, length));
}
// Return the ispunctuation property of the given unichar representation.
// Only the first length characters from unichar_repr are used.
bool get_ispunctuation(const char *const unichar_repr, int length) const {
return get_ispunctuation(unichar_to_id(unichar_repr, length));
}
// Returns normalized version of unichar with the given unichar_id.
const char *get_normed_unichar(UNICHAR_ID unichar_id) const {
if (unichar_id == UNICHAR_SPACE) {
return " ";
}
return unichars[unichar_id].properties.normed.c_str();
}
// Returns a vector of UNICHAR_IDs that represent the ids of the normalized
// version of the given id. There may be more than one UNICHAR_ID in the
// vector if unichar_id represents a ligature.
const std::vector<UNICHAR_ID> &normed_ids(UNICHAR_ID unichar_id) const {
return unichars[unichar_id].properties.normed_ids;
}
// Return the script name of the given unichar representation.
// Only the first length characters from unichar_repr are used.
// The returned pointer will always be the same for the same script, it's
// managed by unicharset and thus MUST NOT be deleted
int get_script(const char *const unichar_repr, int length) const {
return get_script(unichar_to_id(unichar_repr, length));
}
// Return the (current) number of scripts in the script table
int get_script_table_size() const {
return script_table_size_used;
}
// Return the script string from its id
const char *get_script_from_script_id(int id) const {
if (id >= script_table_size_used || id < 0) {
return null_script;
}
return script_table[id];
}
// Returns the id from the name of the script, or 0 if script is not found.
// Note that this is an expensive operation since it involves iteratively
// comparing strings in the script table. To avoid dependency on STL, we
// won't use a hash. Instead, the calling function can use this to lookup
// and save the ID for relevant scripts for fast comparisons later.
int get_script_id_from_name(const char *script_name) const;
// Return true if the given script is the null script
bool is_null_script(const char *script) const {
return script == null_script;
}
// Uniquify the given script. For two scripts a and b, if strcmp(a, b) == 0,
// then the returned pointer will be the same.
// The script parameter is copied and thus can be a temporary.
int add_script(const char *script);
// Return the enabled property of the given unichar.
bool get_enabled(UNICHAR_ID unichar_id) const {
ASSERT_HOST(contains_unichar_id(unichar_id));
return unichars[unichar_id].properties.enabled;
}
int null_sid() const {
return null_sid_;
}
int common_sid() const {
return common_sid_;
}
int latin_sid() const {
return latin_sid_;
}
int cyrillic_sid() const {
return cyrillic_sid_;
}
int greek_sid() const {
return greek_sid_;
}
int han_sid() const {
return han_sid_;
}
int hiragana_sid() const {
return hiragana_sid_;
}
int katakana_sid() const {
return katakana_sid_;
}
int thai_sid() const {
return thai_sid_;
}
int hangul_sid() const {
return hangul_sid_;
}
int default_sid() const {
return default_sid_;
}
// Returns true if the unicharset has the concept of upper/lower case.
bool script_has_upper_lower() const {
return script_has_upper_lower_;
}
// Returns true if the unicharset has the concept of x-height.
// script_has_xheight can be true even if script_has_upper_lower is not,
// when the script has a sufficiently predominant top line with ascenders,
// such as Devanagari and Thai.
bool script_has_xheight() const {
return script_has_xheight_;
}
private:
struct TESS_API UNICHAR_PROPERTIES {
UNICHAR_PROPERTIES();
// Initializes all properties to sensible default values.
void Init();
// Sets all ranges wide open. Initialization default in case there are
// no useful values available.
void SetRangesOpen();
// Sets all ranges to empty. Used before expanding with font-based data.
void SetRangesEmpty();
// Returns true if any of the top/bottom/width/bearing/advance ranges/stats
// is empty.
bool AnyRangeEmpty() const;
// Expands the ranges with the ranges from the src properties.
void ExpandRangesFrom(const UNICHAR_PROPERTIES &src);
// Copies the properties from src into this.
void CopyFrom(const UNICHAR_PROPERTIES &src);
bool isalpha;
bool islower;
bool isupper;
bool isdigit;
bool ispunctuation;
bool isngram;
bool enabled;
// Possible limits of the top and bottom of the bounding box in
// baseline-normalized coordinates, ie, where the baseline is
// kBlnBaselineOffset and the meanline is kBlnBaselineOffset + kBlnXHeight
// (See normalis.h for the definitions).
uint8_t min_bottom;
uint8_t max_bottom;
uint8_t min_top;
uint8_t max_top;
// Statistics of the widths of bounding box, relative to the median advance.
float width;
float width_sd;
// Stats of the x-bearing and advance, also relative to the median advance.
float bearing;
float bearing_sd;
float advance;
float advance_sd;
int script_id;
UNICHAR_ID other_case; // id of the corresponding upper/lower case unichar
Direction direction; // direction of this unichar
// Mirror property is useful for reverse DAWG lookup for words in
// right-to-left languages (e.g. "(word)" would be in
// '[open paren]' 'w' 'o' 'r' 'd' '[close paren]' in a UTF8 string.
// However, what we want in our DAWG is
// '[open paren]', 'd', 'r', 'o', 'w', '[close paren]' not
// '[close paren]', 'd', 'r', 'o', 'w', '[open paren]'.
UNICHAR_ID mirror;
// A string of unichar_ids that represent the corresponding normed string.
// For awkward characters like em-dash, this gives hyphen.
// For ligatures, this gives the string of normal unichars.
std::vector<UNICHAR_ID> normed_ids;
std::string normed; // normalized version of this unichar
// Contains meta information about the fragment if a unichar represents
// a fragment of a character, otherwise should be set to nullptr.
// It is assumed that character fragments are added to the unicharset
// after the corresponding 'base' characters.
CHAR_FRAGMENT *fragment;
};
struct UNICHAR_SLOT {
char representation[UNICHAR_LEN + 1];
UNICHAR_PROPERTIES properties;
};
// Internal recursive version of encode_string above.
// str is the start of the whole string.
// str_index is the current position in str.
// str_length is the length of str.
// encoding is a working encoding of str.
// lengths is a working set of lengths of each element of encoding.
// best_total_length is the longest length of str that has been successfully
// encoded so far.
// On return:
// best_encoding contains the encoding that used the longest part of str.
// best_lengths (may be null) contains the lengths of best_encoding.
void encode_string(const char *str, int str_index, int str_length,
std::vector<UNICHAR_ID> *encoding,
std::vector<char> *lengths, unsigned *best_total_length,
std::vector<UNICHAR_ID> *best_encoding,
std::vector<char> *best_lengths) const;
// Gets the properties for a grapheme string, combining properties for
// multiple characters in a meaningful way where possible.
// Returns false if no valid match was found in the unicharset.
// NOTE that script_id, mirror, and other_case refer to this unicharset on
// return and will need redirecting if the target unicharset is different.
bool GetStrProperties(const char *utf8_str, UNICHAR_PROPERTIES *props) const;
// Load ourselves from a "file" where our only interface to the file is
// an implementation of fgets(). This is the parsing primitive accessed by
// the public routines load_from_file().
bool load_via_fgets(const std::function<char *(char *, int)> &fgets_cb,
bool skip_fragments);
// List of mappings to make when ingesting strings from the outside.
// The substitutions clean up text that should exists for rendering of
// synthetic data, but not in the recognition set.
static const char *kCleanupMaps[][2];
static const char *null_script;
std::vector<UNICHAR_SLOT> unichars;
UNICHARMAP ids;
char **script_table;
int script_table_size_used;
int script_table_size_reserved;
// True if the unichars have their tops/bottoms set.
bool top_bottom_set_;
// True if the unicharset has significant upper/lower case chars.
bool script_has_upper_lower_;
// True if the unicharset has a significant mean-line with significant
// ascenders above that.
bool script_has_xheight_;
// True if the set contains chars that would be changed by the cleanup.
bool old_style_included_;
// A few convenient script name-to-id mapping without using hash.
// These are initialized when unicharset file is loaded. Anything
// missing from this list can be looked up using get_script_id_from_name.
int null_sid_;
int common_sid_;
int latin_sid_;
int cyrillic_sid_;
int greek_sid_;
int han_sid_;
int hiragana_sid_;
int katakana_sid_;
int thai_sid_;
int hangul_sid_;
// The most frequently occurring script in the charset.
int default_sid_;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_UNICHARSET_H_
|
2301_81045437/tesseract
|
src/ccutil/unicharset.h
|
C++
|
apache-2.0
| 42,757
|
///////////////////////////////////////////////////////////////////////
// File: unicity_table.h
// Description: a class to uniquify objects, manipulating them using integers
// ids.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_UNICITY_TABLE_H_
#define TESSERACT_CCUTIL_UNICITY_TABLE_H_
#include "errcode.h"
#include "genericvector.h"
#include <functional> // for std::function
namespace tesseract {
// A class to uniquify objects, manipulating them using integers ids.
// T requirements:
// operator= to add an element
// default-constructible: allocating the internal table will call the default
// constructor.
template <typename T>
class UnicityTable {
public:
/// Clear the structures and deallocate internal structures.
~UnicityTable() {
clear();
}
/// Reserve some memory. If there is size or more elements, the table will
/// then allocate size * 2 elements.
void reserve(int size) {
table_.reserve(size);
}
/// Return the size used.
int size() const {
return table_.size();
}
/// Return the object from an id.
const T &at(int id) const {
return table_.at(id);
}
// Return the pointer to an object with the given id.
T &at(int id) {
return table_.at(id);
}
T &operator[](size_t id) {
return table_[id];
}
const T &operator[](size_t id) const {
return table_[id];
}
/// Return the id of the T object.
/// This method NEEDS a compare_callback to be passed to
/// set_compare_callback.
int get_index(T object) const {
return table_.get_index(object);
}
/// Add an element in the table
int push_back(T object) {
auto idx = get_index(object);
if (idx == -1) {
idx = table_.push_back(object);
}
return idx;
}
/// Add a callback to be called to delete the elements when the table took
/// their ownership.
void set_clear_callback(const std::function<void(T)> &cb) {
table_.set_clear_callback(cb);
}
/// Clear the table, calling the callback function if any.
/// All the owned Callbacks are also deleted.
/// If you don't want the Callbacks to be deleted, before calling clear, set
/// the callback to nullptr.
void clear() {
table_.clear();
}
/// This method clear the current object, then, does a shallow copy of
/// its argument, and finally invalidate its argument.
void move(UnicityTable<T> *from) {
table_.move(&from->table_);
}
/// Read/Write the table to a file. This does _NOT_ read/write the callbacks.
/// The Callback given must be permanent since they will be called more than
/// once. The given callback will be deleted at the end.
/// Returns false on read/write error.
bool write(FILE *f, const std::function<bool(FILE *, const T &)> &cb) const {
return table_.write(f, cb);
}
bool read(tesseract::TFile *f, const std::function<bool(tesseract::TFile *, T *)> &cb) {
return table_.read(f, cb);
}
private:
GenericVector<T> table_;
};
} // namespace tesseract
#endif // TESSERACT_CCUTIL_UNICITY_TABLE_H_
|
2301_81045437/tesseract
|
src/ccutil/unicity_table.h
|
C++
|
apache-2.0
| 3,714
|
///////////////////////////////////////////////////////////////////////
// File: universalambigs.h
// Description: Data for a universal ambigs file that is useful for
// any language.
// Author: Ray Smith
//
// (C) Copyright 2013, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CCUTIL_UNIVERSALAMBIGS_H_
#define TESSERACT_CCUTIL_UNIVERSALAMBIGS_H_
namespace tesseract {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Woverlength-strings"
inline const char kUniversalAmbigsFile[] = {
"v2\n"
"'' \" 1\n"
"`' \" 1\n"
"'` \" 1\n"
"‘' \" 1\n"
"'‘ \" 1\n"
"’' \" 1\n"
"'’ \" 1\n"
"`` \" 1\n"
"`‘ \" 1\n"
"‘` \" 1\n"
"`’ \" 1\n"
"’` \" 1\n"
"‘‘ “ 1\n"
"‘’ \" 1\n"
"’‘ \" 1\n"
"’’ ” 1\n"
",, „ 1\n"
"m rn 0\n"
"rn m 0\n"
"m in 0\n"
"in m 0\n"
"d cl 0\n"
"cl d 0\n"
"nn rm 0\n"
"rm nn 0\n"
"n ri 0\n"
"ri n 0\n"
"li h 0\n"
"lr h 0\n"
"ii u 0\n"
"ii n 0\n"
"ni m 0\n"
"iii m 0\n"
"ll H 0\n"
"I-I H 0\n"
"vv w 0\n"
"VV W 0\n"
"t f 0\n"
"f t 0\n"
"a o 0\n"
"o a 0\n"
"e c 0\n"
"c e 0\n"
"rr n 0\n"
"E fi 0\n"
"l< k 0\n"
"ld ki 0\n"
"lx h 0\n"
"xn m 0\n"
"ux in 0\n"
"r t 0\n"
"d tl 0\n"
"di th 0\n"
"ur in 0\n"
"un im 0\n"
"u a 0\n"
"o ó 0\n"
"ó o 0\n"
"i í 0\n"
"í i 0\n"
"a á 0\n"
"á a 0\n"
"e é 0\n"
"é e 0\n"
"u ú 0\n"
"ú u 0\n"
"n ñ 0\n"
"ñ n 0\n"
"0 o 0\n"
"d tr 0\n"
"n tr 0\n"
"ñ fi 0\n"
"u ti 0\n"
"ñ ti 0\n"
"d ti 0\n"
"d tí 0\n"
"d rí 0\n"
"a à 0\n"
"e è 0\n"
"n ij 0\n"
"g ij 0\n"
"o ò 0\n"
"E É 0\n"
"E È 0\n"
"u ü 0\n"
"xnE an 1\n"
"mYx me 1\n"
"qtE nt 1\n"
"Tlb le 1\n"
"vxN va 1\n"
"gjQ ng 1\n"
"jpF ij 1\n"
"Yrl le 1\n"
"aqY an 1\n"
"zvJ va 1\n"
"fbL be 1\n"
"Nvk va 1\n"
"fJp pr 1\n"
"wxC wa 1\n"
"cuJ qu 1\n"
"Qzt ta 1\n"
"qKw wa 1\n"
"scJ st 1\n"
"pXp po 1\n"
"Vqi ti 1\n"
"Uxk ka 1\n"
"kJv ka 1\n"
"Ykd ka 1\n"
"vpX va 1\n"
"iBv ti 1\n"
"zRb sz 1\n"
"yTm mi 1\n"
"mKp pr 1\n"
"Vzq qu 1\n"
"Xtp ti 1\n"
"mvD va 1\n"
"mDq me 1\n"
"jxP ij 1\n"
"Bxv va 1\n"
"oIu qu 1\n"
"Rvc va 1\n"
"uCj qu 1\n"
"oAo vo 1\n"
"quB tu 1\n"
"btV ti 1\n"
"Lmc me 1\n"
"tVw ti 1\n"
"Yxv va 1\n"
"Hxm me 1\n"
"dVh th 1\n"
"xYc ch 1\n"
"uPj tu 1\n"
"fTf fo 1\n"
"Rjw ij 1\n"
"xdA di 1\n"
"jzN ij 1\n"
"mxL me 1\n"
"ygJ ng 1\n"
"Vvg va 1\n"
"rjK ij 1\n"
"yuV tu 1\n"
"sWk ku 1\n"
"Pgz sz 1\n"
"jHm me 1\n"
"zkU ku 1\n"
"gvG va 1\n"
"hdP th 1\n"
"mVb me 1\n"
"Qgd di 1\n"
"zcZ ch 1\n"
"zqj ij 1\n"
"zsJ sz 1\n"
"dfN di 1\n"
"dgW di 1\n"
"wNr ri 1\n"
"zvC va 1\n"
"qYw qu 1\n"
"uHy tu 1\n"
"tNq th 1\n"
"lxJ li 1\n"
"Hbk ku 1\n"
"xsG st 1\n"
"vSb va 1\n"
"xFb bu 1\n"
"Ntg th 1\n"
"oBj ij 1\n"
"qkv qu 1\n"
"bVj ij 1\n"
"zjT ij 1\n"
"bvX va 1\n"
"oZf to 1\n"
"kcU ko 1\n"
"fFm me 1\n"
"Xbj ij 1\n"
"Kqv va 1\n"
"Rwj ij 1\n"
"dvJ va 1\n"
"znJ sz 1\n"
"qqV qu 1\n"
"pxM po 1\n"
"eBj ij 1\n"
"mJx me 1\n"
"xnM ng 1\n"
"aCq va 1\n"
"pHj ij 1\n"
"tfQ th 1\n"
"wqn qu 1\n"
"mSs is 1\n"
"sBw st 1\n"
"Fhn th 1\n"
"zNb sz 1\n"
"Mvb va 1\n"
"bVt th 1\n"
"qHt th 1\n"
"qLv qu 1\n"
"kgF ng 1\n"
"vxW va 1\n"
"cdY ch 1\n"
"Xrz sz 1\n"
"Efh th 1\n"
"lqI qu 1\n"
"Lzq qu 1\n"
"zhX th 1\n"
"ghZ th 1\n"
"lFg ng 1\n"
"vVc va 1\n"
"lMr er 1\n"
"Tqj qu 1\n"
"jAx ij 1\n"
"iMt th 1\n"
"Nlv va 1\n"
"zbP sz 1\n"
"kVx ka 1\n"
"eQl te 1\n"
"sWb st 1\n"
"Bqy qu 1\n"
"dXk ka 1\n"
"vUc va 1\n"
"vOb va 1\n"
"uHf qu 1\n"
"qNr qu 1\n"
"uFz qu 1\n"
"Mlr er 1\n"
"kmZ ka 1\n"
"sRt th 1\n"
"Wqv qu 1\n"
"hfK th 1\n"
"vxQ va 1\n"
"lCq qu 1\n"
"fYw wa 1\n"
"tfS th 1\n"
"qdO qu 1\n"
"dQd de 1\n"
"xdX de 1\n"
"mNx me 1\n"
"kFz sz 1\n"
"wjS ij 1\n"
"yPp pr 1\n"
"wcW ch 1\n"
"Njz sz 1\n"
"dVp de 1\n"
"dqD qu 1\n"
"rJs sz 1\n"
"xpH po 1\n"
"xqR qu 1\n"
"gVr er 1\n"
"Btq th 1\n"
"nmB nt 1\n"
"zcM sz 1\n"
"cfG ch 1\n"
"mfO me 1\n"
"Yhc th 1\n"
"bZm me 1\n"
"mzB sz 1\n"
"vRw va 1\n"
"yDh th 1\n"
"Zgf ng 1\n"
"kqT qu 1\n"
"Iuz qu 1\n"
"rbW er 1\n"
"Jmq qu 1\n"
"Kvj va 1\n"
"zcD ch 1\n"
"xgC ng 1\n"
"jCx ij 1\n"
"bWg ng 1\n"
"ywW wa 1\n"
"Jkc ch 1\n"
"xGs sz 1\n"
"vbH va 1\n"
"lTz sz 1\n"
"eCb er 1\n"
"jVv va 1\n"
"jDq qu 1\n"
"joQ po 1\n"
"qtM th 1\n"
"Rqk qu 1\n"
"Hvg va 1\n"
"uAz qu 1\n"
"mfW me 1\n"
"tgS th 1\n"
"cqD qu 1\n"
"sfY sz 1\n"
"Yhv th 1\n"
"uqM qu 1\n"
"xpK pr 1\n"
"Jzh th 1\n"
"cQk ch 1\n"
"tjO th 1\n"
"qxZ qu 1\n"
"zPv sz 1\n"
"qNk qu 1\n"
"lvQ va 1\n"
"kGw ka 1\n"
"xuD qu 1\n"
"Jvy va 1\n"
"jYe te 1\n"
"fZu qu 1\n"
"qYo qu 1\n"
"vhI th 1\n"
"fxY fo 1\n"
"yPf fo 1\n"
"fGj ij 1\n"
"dmT me 1\n"
"vfX va 1\n"
"xQt th 1\n"
"cxS ch 1\n"
"vzA va 1\n"
"qaA qu 1\n"
"Jbx be 1\n"
"kVd ka 1\n"
"Xjv va 1\n"
"hkI th 1\n"
"vQu qu 1\n"
"vhK th 1\n"
"Dvj va 1\n"
"Vbm me 1\n"
"fpN pr 1\n"
"pkG ka 1\n"
"bLc ch 1\n"
"tJc th 1\n"
"wwJ wa 1\n"
"Zrw er 1\n"
"wdW de 1\n"
"Wgf ng 1\n"
"Pqz qu 1\n"
"wgN ng 1\n"
"zHt th 1\n"
"xTl le 1\n"
"Dvt th 1\n"
"wmU me 1\n"
"xhm th 1\n"
"hCx th 1\n"
"vwV va 1\n"
"zvL va 1\n"
"nGf nt 1\n"
"jjC ij 1\n"
"Ucg ch 1\n"
"pWf pr 1\n"
"jxG ij 1\n"
"Mqn qu 1\n"
"yvW va 1\n"
"lWk ka 1\n"
"mdO me 1\n"
"qNm qu 1\n"
"Rwg ng 1\n"
"xfv va 1\n"
"uOw qu 1\n"
"xhZ th 1\n"
"jLr er 1\n"
"fBy fo 1\n"
"nUj nt 1\n"
"lTg ng 1\n"
"jlP ij 1\n"
"wrR er 1\n"
"rXw er 1\n"
"eVw ve 1\n"
"zWn ng 1\n"
"mJs sz 1\n"
"Mgy ng 1\n"
"uZq qu 1\n"
"Tdg ng 1\n"
"mqI qu 1\n"
"Dhp th 1\n"
"pmK me 1\n"
"Ssf sz 1\n"
"sWl sz 1\n"
"iqK qu 1\n"
"gjG ng 1\n"
"djB ij 1\n"
"wKv va 1\n"
"wvI va 1\n"
"tcU th 1\n"
"tkG th 1\n"
"zUe te 1\n"
"lUh th 1\n"
"nBg nt 1\n"
"dHx de 1\n"
"Wbz sz 1\n"
"vuQ qu 1\n"
"Hpl le 1\n"
"oVj ij 1\n"
"vBb va 1\n"
"Tdz sz 1\n"
"pfV pr 1\n"
"qgN qu 1\n"
"pcU ch 1\n"
"gcN ch 1\n"
"vkA va 1\n"
"cQf ch 1\n"
"Yzx sz 1\n"
"ypF pr 1\n"
"vBw va 1\n"
"pPd de 1\n"
"qmU qu 1\n"
"eWf ve 1\n"
"jZr er 1\n"
"Hwl le 1\n"
"yyI ny 1\n"
"Zfh th 1\n"
"Lgw ng 1\n"
"uqp qu 1\n"
"xOj ij 1\n"
"dkJ ko 1\n"
"dqM qu 1\n"
"sbW is 1\n"
"zMp sz 1\n"
"nJz ng 1\n"
"kMc ko 1\n"
"zqW qu 1\n"
"vQk va 1\n"
"eqD qu 1\n"
"hFn th 1\n"
"vcZ ch 1\n"
"xGk ka 1\n"
"kzf sz 1\n"
"xZx xe 1\n"
"qvN qu 1\n"
"ykY ka 1\n"
"brH er 1\n"
"Wrh th 1\n"
"wjE ij 1\n"
"kjQ ka 1\n"
"fLj ij 1\n"
"mgE ng 1\n"
"xwI wa 1\n"
"iDw ti 1\n"
"Btx th 1\n"
"vPz va 1\n"
"yqH qu 1\n"
"wFe er 1\n"
"lQy le 1\n"
"gBp ng 1\n"
"jdY de 1\n"
"tvQ th 1\n"
"ljO le 1\n"
"Nsq qu 1\n"
"xdO de 1\n"
"gzW ng 1\n"
"wtM th 1\n"
"qfR qu 1\n"
"jZh th 1\n"
"Wcb ch 1\n"
"dvQ va 1\n"
"jHb ij 1\n"
"xbM be 1\n"
"nWg nt 1\n"
"Ywj ij 1\n"
"Xwj ij 1\n"
"pxK pr 1\n"
"ybQ be 1\n"
"Wvm va 1\n"
"Lgz ng 1\n"
"btS th 1\n"
"jRl le 1\n"
"qqJ qu 1\n"
"Cnq qu 1\n"
"Fmw me 1\n"
"dvP va 1\n"
"vqB qu 1\n"
"djI de 1\n"
"jVq qu 1\n"
"fvZ va 1\n"
"Cwt th 1\n"
"Uyb be 1\n"
"Ffc ch 1\n"
"soX sz 1\n"
"qhR th 1\n"
"fWz sz 1\n"
"vrX va 1\n"
"eOq qu 1\n"
"bwZ be 1\n"
"dnV ng 1\n"
"Gbw be 1\n"
"xGd de 1\n"
"mnZ ng 1\n"
"bpN pr 1\n"
"dzX de 1\n"
"Bxq qu 1\n"
"zpx sz 1\n"
"dqZ qu 1\n"
"xTf fo 1\n"
"wPv va 1\n"
"cxq qu 1\n"
"hdT th 1\n"
"ywX wa 1\n"
"Uvv va 1\n"
"rKp er 1\n"
"sdF de 1\n"
"Jcg ch 1\n"
"xzO sz 1\n"
"xTt th 1\n"
"djP de 1\n"
"gTn ng 1\n"
"Gtp th 1\n"
"xgA ng 1\n"
"bdL de 1\n"
"wzO sz 1\n"
"fhI th 1\n"
"Wmp me 1\n"
"Qdt th 1\n"
"uYq qu 1\n"
"pbJ pr 1\n"
"jRd de 1\n"
"Xsx sz 1\n"
"zgI ng 1\n"
"qhY th 1\n"
"Ggj ng 1\n"
"Fjq qu 1\n"
"Qwk ka 1\n"
"zxW sz 1\n"
"vCc ch 1\n"
"ccL ch 1\n"
"Kxs sz 1\n"
"mYr er 1\n"
"rQt er 1\n"
"Zxs sz 1\n"
"hdQ th 1\n"
"dwH de 1\n"
"Yml le 1\n"
"qVz qu 1\n"
"Rvl va 1\n"
"yHk ka 1\n"
"Wjt th 1\n"
"hMw th 1\n"
"pzU sz 1\n"
"gcL ch 1\n"
"qOa qu 1\n"
"eqI qu 1\n"
"iYp ti 1\n"
"vCq qu 1\n"
"uoV ro 1\n"
"fZx fo 1\n"
"qQd qu 1\n"
"qdE qu 1\n"
"qWx qu 1\n"
"Ykj ij 1\n"
"Fpj ij 1\n"
"zGv va 1\n"
"rwO er 1\n"
"Qzq qu 1\n"
"Kqb qu 1\n"
"zgT ng 1\n"
"jsZ sz 1\n"
"aHq qu 1\n"
"yjL ij 1\n"
"Ycw ch 1\n"
"bnP an 1\n"
"vWn an 1\n"
"zyY sz 1\n"
"zRs st 1\n"
"wuP qu 1\n"
"vjB va 1\n"
"jrT er 1\n"
"vwJ va 1\n"
"dVj de 1\n"
"zvW va 1\n"
"dZk de 1\n"
"nrG an 1\n"
"qsU qu 1\n"
"Pvs va 1\n"
"lLh th 1\n"
"qCz qu 1\n"
"dvV de 1\n"
"Pjw ij 1\n"
"Kmj ij 1\n"
"Jfh th 1\n"
"nwY an 1\n"
"gwC ng 1\n"
"vGb va 1\n"
"qWr qu 1\n"
"qpW qu 1\n"
"dKk de 1\n"
"yWb be 1\n"
"jmN ij 1\n"
"gpV ng 1\n"
"qzS qu 1\n"
"oZh th 1\n"
"Qmt th 1\n"
"mNk me 1\n"
"ypM pr 1\n"
"lwH le 1\n"
"zHs sz 1\n"
"jzC jo 1\n"
"oJh th 1\n"
"Lqh th 1\n"
"hXg th 1\n"
"xEf fo 1\n"
"uWx qu 1\n"
"kvT va 1\n"
"zsG sz 1\n"
"lSx le 1\n"
"qKb qu 1\n"
"Qye de 1\n"
"xHk ka 1\n"
"Cwp pr 1\n"
"zmJ sz 1\n"
"xuL qu 1\n"
"bdH de 1\n"
"Pbw wa 1\n"
"qdX qu 1\n"
"lVc ch 1\n"
"bqL qu 1\n"
"wNs sz 1\n"
"vzN va 1\n"
"qjA qu 1\n"
"Zhf th 1\n"
"ypJ pr 1\n"
"xMq qu 1\n"
"bTk ka 1\n"
"tLf th 1\n"
"xgR ng 1\n"
"kQz sz 1\n"
"Rjp ij 1\n"
"xhG th 1\n"
"bCc ch 1\n"
"hbF th 1\n"
"rxQ er 1\n"
"qVp qu 1\n"
"bkY ka 1\n"
"qPl qu 1\n"
"jQk ij 1\n"
"Ovq qu 1\n"
"sVv va 1\n"
"pmU me 1\n"
"uFv qu 1\n"
"xaZ va 1\n"
"gGn an 1\n"
"pgI ng 1\n"
"zTj sz 1\n"
"lvC va 1\n"
"wGv va 1\n"
"rNv va 1\n"
"Qtq th 1\n"
"vNh th 1\n"
"lPv va 1\n"
"Jdq qu 1\n"
"Xdj de 1\n"
"yqk qu 1\n"
"iwY ti 1\n"
"Nmq qu 1\n"
"fTp pr 1\n"
"qzQ qu 1\n"
"pjA ij 1\n"
"pvH va 1\n"
"xLj ij 1\n"
"qWh th 1\n"
"vVq qu 1\n"
"gQd de 1\n"
"svY va 1\n"
"fLf fo 1\n"
"qzB qu 1\n"
"Dxg ng 1\n"
"uzY qu 1\n"
"gVz sz 1\n"
"hZb th 1\n"
"Gpx pr 1\n"
"xqh th 1\n"
"gcX ch 1\n"
"Hxd de 1\n"
"tUq th 1\n"
"bKp pr 1\n"
"iGx ti 1\n"
"xvQ va 1\n"
"lxA le 1\n"
"sjH st 1\n"
"Gqo qu 1\n"
"dgQ de 1\n"
"yDk ka 1\n"
"Znv va 1\n"
"vfU va 1\n"
"vuD qu 1\n"
"oQj ij 1\n"
"bhD th 1\n"
"qLj qu 1\n"
"mdY de 1\n"
"rZb er 1\n"
"kDv va 1\n"
"fsK sz 1\n"
"Kqf qu 1\n"
"yWl le 1\n"
"mVw me 1\n"
"mcV ch 1\n"
"tDf th 1\n"
"lAo le 1\n"
"fzR sz 1\n"
"Xrq qu 1\n"
"jrZ er 1\n"
"qmN qu 1\n"
"Jnp an 1\n"
"jhC th 1\n"
"kqR qu 1\n"
"dWn de 1\n"
"Wmw me 1\n"
"Rgy ng 1\n"
"uvN qu 1\n"
"jiY ti 1\n"
"xWc ch 1\n"
"yJr er 1\n"
"oHq qu 1\n"
"yvw va 1\n"
"Ydn de 1\n"
"Nvq qu 1\n"
"Gmv va 1\n"
"xxZ xe 1\n"
"Xdf de 1\n"
"xYh th 1\n"
"Vnv an 1\n"
"jNz sz 1\n"
"Wnq qu 1\n"
"Xwk ka 1\n"
"qWz qu 1\n"
"mQs sz 1\n"
"Vxb be 1\n"
"xwG wa 1\n"
"wvp va 1\n"
"gmV ng 1\n"
"Rzq qu 1\n"
"Cpw pr 1\n"
"Gyy ny 1\n"
"xzA sz 1\n"
"wGx wa 1\n"
"bqS qu 1\n"
"whR th 1\n"
"jPc ch 1\n"
"iqG qu 1\n"
"djK de 1\n"
"cVk ch 1\n"
"rwT er 1\n"
"Vhn th 1\n"
"Hfw wa 1\n"
"bnJ an 1\n"
"Cpd de 1\n"
"Nmd de 1\n"
"dnO an 1\n"
"qWc qu 1\n"
"aVq qu 1\n"
"qOn qu 1\n"
"Qlr er 1\n"
"qnN qu 1\n"
"rLq qu 1\n"
"wtE th 1\n"
"jgR ng 1\n"
"Yqp qu 1\n"
"Hwg ng 1\n"
"nWk an 1\n"
"wqB qu 1\n"
"fAp pr 1\n"
"hZv th 1\n"
"Kzp sz 1\n"
"fNk ka 1\n"
"Tkd de 1\n"
"uYm qu 1\n"
"kcR ch 1\n"
"xNl le 1\n"
"kHk ka 1\n"
"bJk ka 1\n"
"jjD ij 1\n"
"Nlq qu 1\n"
"dhB th 1\n"
"jXl le 1\n"
"nwB an 1\n"
"Hzb sz 1\n"
"qQz qu 1\n"
"fKc ch 1\n"
"jVw ij 1\n"
"ylU le 1\n"
"Lzj sz 1\n"
"sXu qu 1\n"
"wBw wa 1\n"
"Iqg qu 1\n"
"wjV ij 1\n"
"wxt th 1\n"
"jzK sz 1\n"
"rDd de 1\n"
"uQy qu 1\n"
"qGw qu 1\n"
"tbU th 1\n"
"kUo ka 1\n"
"dVm de 1\n"
"Ddn an 1\n"
"vqC vo 1\n"
"jkZ ij 1\n"
"Lvz va 1\n"
"tPy th 1\n"
"Vfj ij 1\n"
"Qhb th 1\n"
"whB th 1\n"
"Fqf qu 1\n"
"hCv th 1\n"
"Fjf ij 1\n"
"Qfr er 1\n"
"zwF sz 1\n"
"Fwf wa 1\n"
"pvU va 1\n"
"whC th 1\n"
"hTk th 1\n"
"dlQ de 1\n"
"wzL sz 1\n"
"zqS qu 1\n"
"qtP th 1\n"
"yhC th 1\n"
"yjB ij 1\n"
"iTd de 1\n"
"kLx ka 1\n"
"Rqi qu 1\n"
"qjS qu 1\n"
"vjI va 1\n"
"pGz sz 1\n"
"wnV an 1\n"
"lQx le 1\n"
"uvS qu 1\n"
"Zge de 1\n"
"gJv ng 1\n"
"Ydb de 1\n"
"wDh th 1\n"
"zwV sz 1\n"
"hNm th 1\n"
"zwQ sz 1\n"
"fRr er 1\n"
"wVr er 1\n"
"nKg an 1\n"
"Tgg ng 1\n"
"bYp pr 1\n"
"lBn an 1\n"
"zjp sz 1\n"
"qAf qu 1\n"
"zmK me 1\n"
"wqK qu 1\n"
"vjT va 1\n"
"Lql qu 1\n"
"snC an 1\n"
"fzY sz 1\n"
"vqU qu 1\n"
"mGb me 1\n"
"fkP ka 1\n"
"wQg ng 1\n"
"Fqt th 1\n"
"bVm me 1\n"
"Wcx ch 1\n"
"wpY wa 1\n"
"lFv va 1\n"
"gwD ng 1\n"
"gWp ng 1\n"
"fjT ij 1\n"
"pFt th 1\n"
"iIp in 1\n"
"tbD th 1\n"
"Xqc qu 1\n"
"Qkc ch 1\n"
"qeZ qu 1\n"
"qPb qu 1\n"
"gwL ng 1\n"
"fHi in 1\n"
"xwP wa 1\n"
"xvB va 1\n"
"jSw ij 1\n"
"pzF sz 1\n"
"wYp wa 1\n"
"dDx de 1\n"
"nBx an 1\n"
"cNv ch 1\n"
"Ubm me 1\n"
"xXu qu 1\n"
"dRl de 1\n"
"dBz de 1\n"
"Xvh th 1\n"
"Xld de 1\n"
"mwY me 1\n"
"whQ th 1\n"
"Mzl le 1\n"
"Aqj qu 1\n"
"uDp qu 1\n"
"cjZ ch 1\n"
"Vkf ka 1\n"
"uGq qu 1\n"
"hBs th 1\n"
"qLh th 1\n"
"tfW th 1\n"
"cPn an 1\n"
"xoN on 1\n"
"Ydx de 1\n"
"Lxk ka 1\n"
"ccZ ch 1\n"
"uJh th 1\n"
"sVp sz 1\n"
"wrE er 1\n"
"xgP ng 1\n"
"hPp th 1\n"
"euU qu 1\n"
"sZh th 1\n"
"qnK qu 1\n"
"Bgh th 1\n"
"slQ le 1\n"
"gxA ng 1\n"
"jLd de 1\n"
"znD an 1\n"
"kXk ka 1\n"
"tfV th 1\n"
"Vwl le 1\n"
"xWd do 1\n"
"xnH an 1\n"
"cOq ch 1\n"
"Lkk ka 1\n"
"Nvy va 1\n"
"xIh th 1\n"
"xkK ka 1\n"
"rMr er 1\n"
"rmQ er 1\n"
"bPn an 1\n"
"fAa an 1\n"
"vQv va 1\n"
"fHr er 1\n"
"Pmv va 1\n"
"vzJ sz 1\n"
"wTg ng 1\n"
"bWc ch 1\n"
"Zwg ng 1\n"
"gKx ng 1\n"
"Gbq qu 1\n"
"wMk ka 1\n"
"Nfx fo 1\n"
"fAo on 1\n"
"dHb de 1\n"
"lxH le 1\n"
"dqO qu 1\n"
"Tlq qu 1\n"
"Yjj ij 1\n"
"Iyh th 1\n"
"uoY qu 1\n"
"mhH th 1\n"
"lMj le 1\n"
"fzF sz 1\n"
"frR er 1\n"
"yNl le 1\n"
"aPv an 1\n"
"ywG wa 1\n"
"Cmw me 1\n"
"svK va 1\n"
"srO er 1\n"
"Uhz th 1\n"
"vPn an 1\n"
"zTq qu 1\n"
"kzH sz 1\n"
"Iox on 1\n"
"fQa an 1\n"
"wZr er 1\n"
"nqU an 1\n"
"wPb wa 1\n"
"Tzg ng 1\n"
"pnR an 1\n"
"vfJ va 1\n"
"vyX va 1\n"
"fLz sz 1\n"
"zjP sz 1\n"
"pmR me 1\n"
"ePq qu 1\n"
"jyT ij 1\n"
"mjP ij 1\n"
"fsH sz 1\n"
"vwB va 1\n"
"Ynr an 1\n"
"Tqh th 1\n"
"Lvv va 1\n"
"tCf th 1\n"
"wpB wa 1\n"
"wXh th 1\n"
"mhX th 1\n"
"kYd de 1\n"
"Dpg ng 1\n"
"ygR ng 1\n"
"Rfp pr 1\n"
"Jyq qu 1\n"
"yxq qu 1\n"
"pPc ch 1\n"
"aOj an 1\n"
"Zww wa 1\n"
"fFx fo 1\n"
"bDh th 1\n"
"qKx qu 1\n"
"wHx wa 1\n"
"hrX th 1\n"
"rFh th 1\n"
"lLx le 1\n"
"aYj an 1\n"
"kCs sz 1\n"
"lWt th 1\n"
"pdY de 1\n"
"swI sz 1\n"
"bLw wa 1\n"
"Mzx sz 1\n"
"cKk ch 1\n"
"hMz th 1\n"
"Jcu qu 1\n"
"wjB ij 1\n"
"Mqe qu 1\n"
"rxW er 1\n"
"gZv ng 1\n"
"Rfn an 1\n"
"pwD wa 1\n"
"lhX th 1\n"
"fVg ng 1\n"
"vfW va 1\n"
"lxP le 1\n"
"Yyj ij 1\n"
"hPg th 1\n"
"Uxq qu 1\n"
"bdO de 1\n"
"bRz sz 1\n"
"dXq qu 1\n"
"Rjq qu 1\n"
"fgV ng 1\n"
"xAf fo 1\n"
"wXn an 1\n"
"Kvv va 1\n"
"svL va 1\n"
"fWv va 1\n"
"drQ er 1\n"
"Lpv va 1\n"
"qKp qu 1\n"
"eCv er 1\n"
"xwH wa 1\n"
"cvC ch 1\n"
"kUf ka 1\n"
"oPx on 1\n"
"tjJ th 1\n"
"bBk ka 1\n"
"vpI va 1\n"
"gzY ng 1\n"
"oZs on 1\n"
"pKc ch 1\n"
"xKs sz 1\n"
"qcH qu 1\n"
"Vfm me 1\n"
"svM va 1\n"
"Vjx ij 1\n"
"lVw le 1\n"
"wWf wa 1\n"
"Xpx pr 1\n"
"lcA ch 1\n"
"tLc th 1\n"
"lDg ng 1\n"
"Xjh th 1\n"
"Xdh th 1\n"
"rKm er 1\n"
"fnW an 1\n"
"Tcb ch 1\n"
"qgX qu 1\n"
"qZo qu 1\n"
"eJv er 1\n"
"Yxy ny 1\n"
"kfM ka 1\n"
"qKe qu 1\n"
"vMf va 1\n"
"dgY de 1\n"
"gGd ng 1\n"
"Vcj ch 1\n"
"Sfw wa 1\n"
"xDk ka 1\n"
"fTc ch 1\n"
"qRw qu 1\n"
"tOa th 1\n"
"guQ qu 1\n"
"mgJ ng 1\n"
"bRd de 1\n"
"kYq qu 1\n"
"xwD wa 1\n"
"vXs va 1\n"
"zlC le 1\n"
"kmH ka 1\n"
"jhZ th 1\n"
"Wxo on 1\n"
"vtX th 1\n"
"iWm in 1\n"
"qVx qu 1\n"
"Hjv va 1\n"
"Pxs sz 1\n"
"bYi in 1\n"
"wgG ng 1\n"
"Jvs va 1\n"
"gHh th 1\n"
"Kzy sz 1\n"
"xjI ij 1\n"
"uVb qu 1\n"
"Pzq qu 1\n"
"hxC th 1\n"
"wPy wa 1\n"
"bXh th 1\n"
"jzY sz 1\n"
"fqJ qu 1\n"
"qxX qu 1\n"
"vfB va 1\n"
"pPm me 1\n"
"bpC pr 1\n"
"hFv th 1\n"
"Cql qu 1\n"
"dwI de 1\n"
"Tcq ch 1\n"
"Zjx ij 1\n"
"wOz sz 1\n"
"Jfj ij 1\n"
"iZr in 1\n"
"Vxf fo 1\n"
"Lpx pr 1\n"
"fHt th 1\n"
"hFy th 1\n"
"lcD ch 1\n"
"vMc ch 1\n"
"xyU ny 1\n"
"mGq qu 1\n"
"wJv va 1\n"
"zKs sz 1\n"
"lMm le 1\n"
"mqU qu 1\n"
"vHg ng 1\n"
"lGc ch 1\n"
"eIj te 1\n"
"Vdh th 1\n"
"rCk er 1\n"
"wQh th 1\n"
"Ywf wa 1\n"
"zUf sz 1\n"
"qZs qu 1\n"
"vNt th 1\n"
"Dxj ij 1\n"
"cYr ch 1\n"
"dKt th 1\n"
"vDp va 1\n"
"qnF an 1\n"
"Lsj sz 1\n"
"xHv va 1\n"
"jCt th 1\n"
"bnX an 1\n"
"fBx fo 1\n"
"jVt th 1\n"
"qOy qu 1\n"
"uqD qu 1\n"
"Rfw wa 1\n"
"cjS ch 1\n"
"ufX qu 1\n"
"fvI va 1\n"
"Owx wa 1\n"
"gXw ng 1\n"
"oCv va 1\n"
"Mrx er 1\n"
"cIb ch 1\n"
"fJj ij 1\n"
"kqM qu 1\n"
"zqL qu 1\n"
"rPz er 1\n"
"iwW in 1\n"
"cMp ch 1\n"
"lVt th 1\n"
"vTb va 1\n"
"Iwf wa 1\n"
"xlZ le 1\n"
"vjQ va 1\n"
"iPb in 1\n"
"Whk th 1\n"
"Wvh th 1\n"
"mzD sz 1\n"
"Hqk qu 1\n"
"jqB qu 1\n"
"qhM th 1\n"
"prR er 1\n"
"nlV an 1\n"
"qYk qu 1\n"
"zVp sz 1\n"
"vpO va 1\n"
"Rvr er 1\n"
"scY ch 1\n"
"qdA qu 1\n"
"vLk va 1\n"
"svI va 1\n"
"mdE de 1\n"
"hBx th 1\n"
"Zrv er 1\n"
"jWt th 1\n"
"fTx fo 1\n"
"Ypc ch 1\n"
"mMk ka 1\n"
"fdq qu 1\n"
"hcK th 1\n"
"xCy ny 1\n"
"fVr er 1\n"
"aPx an 1\n"
"fpU pr 1\n"
"Vkb ka 1\n"
"tbM th 1\n"
"zQt th 1\n"
"gxV ng 1\n"
"Sfg ng 1\n"
"pYl le 1\n"
"gWt th 1\n"
"xEb be 1\n"
"mXy me 1\n"
"lnQ an 1\n"
"qmL qu 1\n"
"Vky ka 1\n"
"wwX wa 1\n"
"Uwx wa 1\n"
"cfB ch 1\n"
"Gxp pr 1\n"
"fpL pr 1\n"
"jTx ij 1\n"
"cZv ch 1\n"
"zlK le 1\n"
"hBc th 1\n"
"Wqi qu 1\n"
"lGs le 1\n"
"Dqz qu 1\n"
"Jgw ng 1\n"
"gCx ng 1\n"
"cNj ch 1\n"
"cqJ ch 1\n"
"blD le 1\n"
"qXr qu 1\n"
"kXr er 1\n"
"khK th 1\n"
"xZh th 1\n"
"jSs sz 1\n"
"yjx ij 1\n"
"Hwf wa 1\n"
"fXs sz 1\n"
"qgz qu 1\n"
"Xdw de 1\n"
"hcN th 1\n"
"jJd de 1\n"
"cmQ ch 1\n"
"mvV va 1\n"
"Nqe qu 1\n"
"zxS sz 1\n"
"kGt th 1\n"
"tFg th 1\n"
"fzM sz 1\n"
"Xrr er 1\n"
"dcJ ch 1\n"
"dQa an 1\n"
"qNy qu 1\n"
"hxT th 1\n"
"twB th 1\n"
"Bqj qu 1\n"
"prK er 1\n"
"zdC de 1\n"
"yAo on 1\n"
"dLt st 1\n"
"pgF ng 1\n"
"vgW ng 1\n"
"vpN va 1\n"
"Ivx va 1\n"
"vYl le 1\n"
"xRg ng 1\n"
"jPu qu 1\n"
"Oqr qu 1\n"
"vjg ng 1\n"
"dpH de 1\n"
"yDp pr 1\n"
"xfJ fo 1\n"
"fqV qu 1\n"
"eBf er 1\n"
"Zkw ka 1\n"
"qHp qu 1\n"
"Aqz qu 1\n"
"bNw wa 1\n"
"fjX ij 1\n"
"fqS qu 1\n"
"ljK le 1\n"
"Gkf ka 1\n"
"bSf be 1\n"
"Mxg ng 1\n"
"Dqm qu 1\n"
"hKp th 1\n"
"wFq qu 1\n"
"wmJ me 1\n"
"vzT va 1\n"
"rhJ th 1\n"
"nHf an 1\n"
"jJo on 1\n"
"qWy qu 1\n"
"Wvk va 1\n"
"gkB ng 1\n"
"mEw me 1\n"
"Ugx ng 1\n"
"Qmy me 1\n"
"Ljq qu 1\n"
"bGp pr 1\n"
"lHg ng 1\n"
"cGg ch 1\n"
"gFk ng 1\n"
"xnV an 1\n"
"eFy er 1\n"
"Nfm me 1\n"
"hSf th 1\n"
"gXj ng 1\n"
"xHf fo 1\n"
"uqj qu 1\n"
"wXa an 1\n"
"vcT ch 1\n"
"uJw qu 1\n"
"pWx pr 1\n"
"qpQ qu 1\n"
"hqE th 1\n"
"Yfn an 1\n"
"jrI er 1\n"
"cgK ch 1\n"
"yyP ny 1\n"
"Zmg ng 1\n"
"Lkc ch 1\n"
"eUq qu 1\n"
"jrY er 1\n"
"kFs sz 1\n"
"sUq qu 1\n"
"jlZ le 1\n"
"cnV ch 1\n"
"aPj an 1\n"
"mjE ij 1\n"
"pZl le 1\n"
"uFs qu 1\n"
"Knf an 1\n"
"Fpc ch 1\n"
"hfR th 1\n"
"qnC an 1\n"
"Dlq qu 1\n"
"frM er 1\n"
"sfB sz 1\n"
"Gxk ka 1\n"
"Fkj ij 1\n"
"vGk va 1\n"
"gRm ng 1\n"
"rWf er 1\n"
"rYv er 1\n"
"qEd qu 1\n"
"qHr qu 1\n"
"Smv va 1\n"
"lFp le 1\n"
"kDs sz 1\n"
"dSd de 1\n"
"rLw er 1\n"
"cnZ an 1\n"
"Wjp ij 1\n"
"pTq qu 1\n"
"Kcx ch 1\n"
"vKs va 1\n"
"bcK ch 1\n"
"vwy va 1\n"
"Ujx ij 1\n"
"Qvr er 1\n"
"dcV ch 1\n"
"xVf fo 1\n"
"uIk qu 1\n"
"jlN le 1\n"
"vwL va 1\n"
"fWp pr 1\n"
"Pxr er 1\n"
"rRb er 1\n"
"bfD be 1\n"
"yCx ny 1\n"
"nJs an 1\n"
"dCm de 1\n"
"cbG ch 1\n"
"gCf ng 1\n"
"tmV th 1\n"
"qeC qu 1\n"
"knS an 1\n"
"gwY ng 1\n"
"Wjl le 1\n"
"mIw me 1\n"
"qjW qu 1\n"
"gwv ng 1\n"
"qJw wa 1\n"
"cnA an 1\n"
"bBm me 1\n"
"gFw ng 1\n"
"wDn an 1\n"
"qgL qu 1\n"
"lUa an 1\n"
"hDn th 1\n"
"kHx ka 1\n"
"wXm me 1\n"
"qyY qu 1\n"
"pkD ka 1\n"
"sLz st 1\n"
"zxF sz 1\n"
"vMx va 1\n"
"plR le 1\n"
"pwZ pr 1\n"
"pYd de 1\n"
"zfL sz 1\n"
"ztK th 1\n"
"mTm me 1\n"
"dCp de 1\n"
"bwx wa 1\n"
"xCs sz 1\n"
"tfF th 1\n"
"Lnq an 1\n"
"dYi in 1\n"
"pWq qu 1\n"
"oIx on 1\n"
"ywE wa 1\n"
"wNk ka 1\n"
"jwO ij 1\n"
"xZz sz 1\n"
"wGm me 1\n"
"cVw ch 1\n"
"bjK ij 1\n"
"Gzg ng 1\n"
"kwz sz 1\n"
"pBn an 1\n"
"cTx ch 1\n"
"rHq qu 1\n"
"Wsg ng 1\n"
"xEh th 1\n"
"yrK er 1\n"
"mMb me 1\n"
"pHw pr 1\n"
"cjN ch 1\n"
"nXn an 1\n"
"bwO wa 1\n"
"flB le 1\n"
"Qqj qu 1\n"
"mKv va 1\n"
"fFn an 1\n"
"wfG wa 1\n"
"wfB wa 1\n"
"Jqk qu 1\n"
"bwK wa 1\n"
"hhI th 1\n"
"lUe er 1\n"
"wFd de 1\n"
"vkT va 1\n"
"xLg ng 1\n"
"fhB th 1\n"
"wmV me 1\n"
"tmF th 1\n"
"Rtc th 1\n"
"dyY de 1\n"
"jyw ij 1\n"
"kRf ka 1\n"
"fXz sz 1\n"
"Znz an 1\n"
"wqX qu 1\n"
"uMx qu 1\n"
"gwV ng 1\n"
"Pbh th 1\n"
"dcM ch 1\n"
"nPz an 1\n"
"cwU ch 1\n"
"vJt th 1\n"
"gyQ ng 1\n"
"fXi in 1\n"
"bsZ sz 1\n"
"Bqi qu 1\n"
"vGn an 1\n"
"knN an 1\n"
"wYq qu 1\n"
"tTb th 1\n"
"bmP me 1\n"
"jpZ ij 1\n"
"Mqw qu 1\n"
"vjM va 1\n"
"qVh th 1\n"
"juY qu 1\n"
"rBk er 1\n"
"juI qu 1\n"
"zEq qu 1\n"
"zWg ng 1\n"
"fzH sz 1\n"
"tLx th 1\n"
"Ncf ch 1\n"
"kfN ka 1\n"
"uUo qu 1\n"
"fCs sz 1\n"
"tCv th 1\n"
"sUy sz 1\n"
"pBf pr 1\n"
"jBz sz 1\n"
"vDc ch 1\n"
"qmx qu 1\n"
"qtK th 1\n"
"qcS ch 1\n"
"vPt th 1\n"
"gQm ng 1\n"
"hzR th 1\n"
"dcL ch 1\n"
"xrI er 1\n"
"dvN va 1\n"
"Cwv va 1\n"
"xhQ th 1\n"
"Gzu qu 1\n"
"pdO de 1\n"
"Bqr qu 1\n"
"vLn an 1\n"
"lxf le 1\n"
"vYk va 1\n"
"wSq qu 1\n"
"pkS ka 1\n"
"zKg ng 1\n"
"tPm th 1\n"
"Pmj ij 1\n"
"lWu qu 1\n"
"Xuu qu 1\n"
"jcX ch 1\n"
"xzQ sz 1\n"
"Gzw sz 1\n"
"ePm er 1\n"
"fwW wa 1\n"
"qwA qu 1\n"
"vQt th 1\n"
"bxP be 1\n"
"dmD de 1\n"
"awQ an 1\n"
"fVf fo 1\n"
"bwY wa 1\n"
"Zxt th 1\n"
"Xhk th 1\n"
"gYk ng 1\n"
"zCf sz 1\n"
"yfQ ny 1\n"
"zGw sz 1\n"
"gvE ng 1\n"
"gCv ng 1\n"
"oPf on 1\n"
"zXi in 1\n"
"hvI th 1\n"
"hzS th 1\n"
"mfX me 1\n"
"dPd de 1\n"
"Lrf er 1\n"
"lrG er 1\n"
"mYf me 1\n"
"hNj th 1\n"
"qAj qu 1\n"
"sxQ st 1\n"
"kTl le 1\n"
"qOf qu 1\n"
"Jdx de 1\n"
"swK sz 1\n"
"jQb ij 1\n"
"Dqp qu 1\n"
"cWv ch 1\n"
"dxE de 1\n"
"sXj sz 1\n"
"nvB an 1\n"
"wXf wa 1\n"
"Cqi qu 1\n"
"bzW sz 1\n"
"rRf er 1\n"
"mZj ij 1\n"
"bnF an 1\n"
"qaG an 1\n"
"Bqs qu 1\n"
"lMn an 1\n"
"wHp pr 1\n"
"Ljc ch 1\n"
"Mwf wa 1\n"
"pzK sz 1\n"
"mPb me 1\n"
"qjE qu 1\n"
"wRr er 1\n"
"xZf fo 1\n"
"nqG an 1\n"
"vVb va 1\n"
"pjC ij 1\n"
"uHl qu 1\n"
"jDn an 1\n"
"pqX qu 1\n"
"pqk qu 1\n"
"xgU ng 1\n"
"wJx wa 1\n"
"znK an 1\n"
"rhB th 1\n"
"vDq qu 1\n"
"sJc ch 1\n"
"Xkh th 1\n"
"lnJ an 1\n"
"bRq qu 1\n"
"fzA sz 1\n"
"bQe er 1\n"
"Txw wa 1\n"
"bkG ka 1\n"
"ywZ wa 1\n"
"zWc ch 1\n"
"lhL th 1\n"
"gmF ng 1\n"
"sfQ sz 1\n"
"zmG sz 1\n"
"Ogz ng 1\n"
"xuA qu 1\n"
"qAq qu 1\n"
"zDw sz 1\n"
"lVu qu 1\n"
"xRw wa 1\n"
"xmM me 1\n"
"pxB pr 1\n"
"ztT th 1\n"
"kzJ sz 1\n"
"nFz an 1\n"
"uVz qu 1\n"
"pnQ an 1\n"
"pGt th 1\n"
"Xdn an 1\n"
"fVz sz 1\n"
"Mhg th 1\n"
"Xqo qu 1\n"
"sHq qu 1\n"
"jwC ij 1\n"
"vkG va 1\n"
"Xkx ka 1\n"
"tRg th 1\n"
"nvV an 1\n"
"qwG qu 1\n"
"Vhh th 1\n"
"zwO sz 1\n"
"qQb qu 1\n"
"crR ch 1\n"
"Mrq qu 1\n"
"oQe er 1\n"
"mBt th 1\n"
"vUy va 1\n"
"twW th 1\n"
"Qgn an 1\n"
"Nxu qu 1\n"
"qhF th 1\n"
"xpX pr 1\n"
"fvD va 1\n"
"Cvy va 1\n"
"oHj on 1\n"
"Qqo qu 1\n"
"vYd de 1\n"
"xhV th 1\n"
"fZf fo 1\n"
"yKm me 1\n"
"xYq qu 1\n"
"fcU ch 1\n"
"qEp qu 1\n"
"jXd de 1\n"
"mlQ le 1\n"
"Ggz ng 1\n"
"cLp ch 1\n"
"yxU ny 1\n"
"gvJ ng 1\n"
"wqD qu 1\n"
"vsN sz 1\n"
"Ijf ij 1\n"
"jbJ ij 1\n"
"bMx be 1\n"
"kXs sz 1\n"
"grT ng 1\n"
"wOd de 1\n"
"pGw pr 1\n"
"Gkd de 1\n"
"qCj qu 1\n"
"hqY th 1\n"
"rDp er 1\n"
"nQt th 1\n"
"kdV de 1\n"
"bgS ng 1\n"
"Tqo qu 1\n"
"fEj ij 1\n"
"hZs th 1\n"
"jYn an 1\n"
"bPx be 1\n"
"hgY th 1\n"
"Pvy va 1\n"
"fxK fo 1\n"
"Hww wa 1\n"
"xRk ka 1\n"
"dmP de 1\n"
"mcY ch 1\n"
"bxR be 1\n"
"Lsl le 1\n"
"hRl th 1\n"
"iwQ in 1\n"
"Wqx qu 1\n"
"kfV ka 1\n"
"qwN qu 1\n"
"Qpv va 1\n"
"mrO er 1\n"
"iFc ti 1\n"
"wzD sz 1\n"
"qbF qu 1\n"
"xfS fo 1\n"
"Pqh th 1\n"
"xYb be 1\n"
"lDh th 1\n"
"vtG th 1\n"
"Xzu qu 1\n"
"xjK ij 1\n"
"jDx ij 1\n"
"nCj an 1\n"
"mCk ka 1\n"
"qxP qu 1\n"
"oMv on 1\n"
"cgY ch 1\n"
"Wqt th 1\n"
"kkQ ka 1\n"
"tqO th 1\n"
"jnC an 1\n"
"fGq qu 1\n"
"Bfv va 1\n"
"vYi in 1\n"
"pcL ch 1\n"
"Fgp ng 1\n"
"jtR th 1\n"
"vhF th 1\n"
"wUi in 1\n"
"nNj an 1\n"
"jTw ij 1\n"
"qsM qu 1\n"
"aJg an 1\n"
"jQe er 1\n"
"Gnj an 1\n"
"fmM me 1\n"
"zqM qu 1\n"
"gjZ ng 1\n"
"nxH an 1\n"
"cdO ch 1\n"
"aAx an 1\n"
"tUv th 1\n"
"hXk th 1\n"
"qBx qu 1\n"
"tgK th 1\n"
"fZy ny 1\n"
"Jkx ka 1\n"
"pvD va 1\n"
"bmT me 1\n"
"oYx on 1\n"
"hwV th 1\n"
"mjB ij 1\n"
"bYn an 1\n"
"iHx in 1\n"
"lYh th 1\n"
"qCi in 1\n"
"fhR th 1\n"
"nDf an 1\n"
"hCd th 1\n"
"lxB le 1\n"
"eXj er 1\n"
"fvW va 1\n"
"ccW ch 1\n"
"dTc ch 1\n"
"sqA qu 1\n"
"fNt th 1\n"
"zkM sz 1\n"
"lRv le 1\n"
"qnI an 1\n"
"xwC wa 1\n"
"zqY qu 1\n"
"yQb be 1\n"
"xrC er 1\n"
"xFm me 1\n"
"oeQ er 1\n"
"mLl le 1\n"
"jwT ij 1\n"
"fwD wa 1\n"
"vpE va 1\n"
"flY le 1\n"
"sRg ng 1\n"
"vSd de 1\n"
"wuR qu 1\n"
"wrI er 1\n"
"Ysn st 1\n"
"Vhj th 1\n"
"Cqh th 1\n"
"Ygb ng 1\n"
"hPq th 1\n"
"mkB ka 1\n"
"tRq th 1\n"
"ajQ an 1\n"
"hcR th 1\n"
"vDw va 1\n"
"pQn an 1\n"
"xeU er 1\n"
"vcM ch 1\n"
"zVc ch 1\n"
"bRh th 1\n"
"uFx qu 1\n"
"fbW be 1\n"
"uUv qu 1\n"
"Nhv th 1\n"
"Ykx ka 1\n"
"Wtp th 1\n"
"Mzj sz 1\n"
"npT in 1\n"
"Xqk qu 1\n"
"xwN wa 1\n"
"hXw th 1\n"
"zLb sz 1\n"
"Gxy ny 1\n"
"dDq qu 1\n"
"Bfy ny 1\n"
"fkx ka 1\n"
"jOq qu 1\n"
"Ddk de 1\n"
"Njp ij 1\n"
"xjJ ij 1\n"
"qhS th 1\n"
"Qwm me 1\n"
"yWj ij 1\n"
"nFv an 1\n"
"pLb pr 1\n"
"qbB qu 1\n"
"smX sz 1\n"
"tnZ th 1\n"
"zQh th 1\n"
"Fzb sz 1\n"
"cNb ch 1\n"
"hpV th 1\n"
"Bxz sz 1\n"
"xgG ng 1\n"
"Rlj le 1\n"
"iHq in 1\n"
"swN sz 1\n"
"Njv va 1\n"
"wPk ka 1\n"
"oRv on 1\n"
"pJs sz 1\n"
"kZw ka 1\n"
"vVs st 1\n"
"Vbw wa 1\n"
"Ffh th 1\n"
"mzQ sz 1\n"
"Gvl le 1\n"
"Pgq qu 1\n"
"lPp le 1\n"
"vCv va 1\n"
"kNf ka 1\n"
"bmD me 1\n"
"mWt th 1\n"
"slF le 1\n"
"qiX in 1\n"
"yRt th 1\n"
"lqx qu 1\n"
"qlj qu 1\n"
"sfZ sz 1\n"
"Wfy ny 1\n"
"vrO er 1\n"
"gxT ng 1\n"
"lwE le 1\n"
"qdJ qu 1\n"
"Ypk ka 1\n"
"Qpf pr 1\n"
"Znw an 1\n"
"bfJ be 1\n"
"qQy qu 1\n"
"qAy qu 1\n"
"aqW an 1\n"
"qqI qu 1\n"
"Lwg ng 1\n"
"Nnw an 1\n"
"cLv ch 1\n"
"Wtx th 1\n"
"qcq ch 1\n"
"sjR sz 1\n"
"lWn an 1\n"
"Zmx me 1\n"
"qZg qu 1\n"
"tYz th 1\n"
"gVx ng 1\n"
"mXt th 1\n"
"nwJ an 1\n"
"jwZ ij 1\n"
"lwL le 1\n"
"eGx er 1\n"
"Sqk qu 1\n"
"gBg ng 1\n"
"zsS sz 1\n"
"knQ an 1\n"
"Nnf an 1\n"
"qmT qu 1\n"
"Sqp qu 1\n"
"ffQ fo 1\n"
"Vcv ch 1\n"
"fmD me 1\n"
"zYg ng 1\n"
"bAx be 1\n"
"nbW an 1\n"
"gJm ng 1\n"
"Jwn an 1\n"
"mxJ me 1\n"
"xbC be 1\n"
"Rbq qu 1\n"
"xZc ch 1\n"
"bJy be 1\n"
"Xyk ka 1\n"
"zkV sz 1\n"
"uoF qu 1\n"
"bcU ch 1\n"
"cZq ch 1\n"
"rPm er 1\n"
"rGn an 1\n"
"lcL ch 1\n"
"rVt th 1\n"
"Cgw ng 1\n"
"Ctq th 1\n"
"eGv er 1\n"
"Rzs st 1\n"
"Qhz th 1\n"
"sLv va 1\n"
"Vqm qu 1\n"
"ydJ de 1\n"
"xVr er 1\n"
"tLk th 1\n"
"qfy qu 1\n"
"wxV wa 1\n"
"yRq qu 1\n"
"Vxq qu 1\n"
"qYz qu 1\n"
"zhM th 1\n"
"mLn an 1\n"
"Zvt th 1\n"
"Fvm va 1\n"
"hcM th 1\n"
"Mwp wa 1\n"
"cTg ch 1\n"
"lXr er 1\n"
"fQe er 1\n"
"Jbw wa 1\n"
"yfG ny 1\n"
"phK th 1\n"
"gjH ng 1\n"
"Wdg de 1\n"
"pPn an 1\n"
"Bwg ng 1\n"
"znB an 1\n"
"fwJ wa 1\n"
"utQ th 1\n"
"cjC ch 1\n"
"fVd de 1\n"
"cTm ch 1\n"
"wMv va 1\n"
"Kgk ng 1\n"
"nRd an 1\n"
"mMt th 1\n"
"xjQ ij 1\n"
"qYt th 1\n"
"sYj st 1\n"
"jNc ch 1\n"
"qXt th 1\n"
"wzB sz 1\n"
"Sjq qu 1\n"
"qtF th 1\n"
"wYi in 1\n"
"glT ng 1\n"
"Uug ng 1\n"
"uOp qu 1\n"
"iBx in 1\n"
"Rqt th 1\n"
"zWj sz 1\n"
"Hcx ch 1\n"
"jNd de 1\n"
"zQr er 1\n"
"iHd in 1\n"
"Wpx pr 1\n"
"nfY an 1\n"
"Rkz sz 1\n"
"Kqg qu 1\n"
"Gfv va 1\n"
"krC er 1\n"
"Whc th 1\n"
"ljM le 1\n"
"yxG ny 1\n"
"fpW pr 1\n"
"bcF ch 1\n"
"krx er 1\n"
"uDt th 1\n"
"Fzo on 1\n"
"wPn an 1\n"
"Lfj ij 1\n"
"Bkp ka 1\n"
"Xkq qu 1\n"
"jxH ij 1\n"
"vIj va 1\n"
"gTc ch 1\n"
"hEj th 1\n"
"fqB qu 1\n"
"jlD le 1\n"
"tFf th 1\n"
"Nfw wa 1\n"
"Fqe qu 1\n"
"Tzp sz 1\n"
"sJr er 1\n"
"qIt th 1\n"
"dFb de 1\n"
"qzE qu 1\n"
"mVv va 1\n"
"Vqa an 1\n"
"bqM qu 1\n"
"mdJ de 1\n"
"dIp de 1\n"
"Znx an 1\n"
"jkK ij 1\n"
"rfQ er 1\n"
"xkI ku 1\n"
"fIo ro 1\n"
"lqV qu 1\n"
"Qpd de 1\n"
"pAx pr 1\n"
"rrQ er 1\n"
"bIu qu 1\n"
"xDw wa 1\n"
"oHx on 1\n"
"wJw wa 1\n"
"Cqv qu 1\n"
"yvB va 1\n"
"yqU qu 1\n"
"rLx er 1\n"
"Fzx sz 1\n"
"dZf de 1\n"
"Nqh th 1\n"
"Rnz an 1\n"
"hTc th 1\n"
"bVb be 1\n"
"Fdm de 1\n"
"vfv va 1\n"
"hwS th 1\n"
"zPt th 1\n"
"Gxv va 1\n"
"Fvt th 1\n"
"mZr er 1\n"
"zVr er 1\n"
"mBc ch 1\n"
"fXq qu 1\n"
"Plw le 1\n"
"Nlx le 1\n"
"jCd de 1\n"
"Kwv va 1\n"
"Jqa an 1\n"
"zGs st 1\n"
"fuV qu 1\n"
"pzL sz 1\n"
"iFx in 1\n"
"fTm me 1\n"
"yWd de 1\n"
"cHv ch 1\n"
"fFk ka 1\n"
"mqd qu 1\n"
"aQk an 1\n"
"uDf qu 1\n"
"Vbf be 1\n"
"pgJ ng 1\n"
"fkN ka 1\n"
"pBm me 1\n"
"Bdv de 1\n"
"jmW ij 1\n"
"Jvv va 1\n"
"Xpk ka 1\n"
"qQc ch 1\n"
"kdG de 1\n"
"qkP qu 1\n"
"cSd ch 1\n"
"Fdc ch 1\n"
"qgK qu 1\n"
"qdH qu 1\n"
"uNv qu 1\n"
"eVt th 1\n"
"dfA de 1\n"
"Hzy sz 1\n"
"lWc ch 1\n"
"vxH va 1\n"
"hxW th 1\n"
"Khp th 1\n"
"xQb be 1\n"
"pwT pr 1\n"
"Lwf wa 1\n"
"zDq qu 1\n"
"kxK ka 1\n"
"mtY th 1\n"
"bhT th 1\n"
"ywR wa 1\n"
"jIa an 1\n"
"Wze er 1\n"
"hqK th 1\n"
"flZ le 1\n"
"qMi in 1\n"
"wpR wa 1\n"
"qHh th 1\n"
"aOw an 1\n"
"dkU de 1\n"
"vRr er 1\n"
"vjX va 1\n"
"cuQ ch 1\n"
"qmJ qu 1\n"
"uuJ ou 1\n"
"yWx ny 1\n"
"hUf th 1\n"
"vzP va 1\n"
"rSx er 1\n"
"qgy qu 1\n"
"Rzf sz 1\n"
"zjB sz 1\n"
"Sjx ij 1\n"
"xfA fo 1\n"
"fHj ij 1\n"
"qkB qu 1\n"
"cdF ch 1\n"
"fWj ij 1\n"
"jbA ij 1\n"
"Bmb me 1\n"
"yjg ng 1\n"
"rxZ er 1\n"
"Vmr er 1\n"
"iIq in 1\n"
"Wgl ng 1\n"
"mRp me 1\n"
"wvS va 1\n"
"Uvy va 1\n"
"ypQ pr 1\n"
"vFw vo 1\n"
"fqE qu 1\n"
"swJ st 1\n"
"Jrx er 1\n"
"cxE ch 1\n"
"lZk le 1\n"
"fVn an 1\n"
"bhZ th 1\n"
"jhR th 1\n"
"vSq qu 1\n"
"yQz sz 1\n"
"fHv va 1\n"
"vuN qu 1\n"
"jpG ij 1\n"
"Pkz sz 1\n"
"gQb ng 1\n"
"pFs st 1\n"
"Gjq qu 1\n"
"hsK th 1\n"
"twx th 1\n"
"yyQ ny 1\n"
"dqF qu 1\n"
"bHh th 1\n"
"qMq qu 1\n"
"qKv qu 1\n"
"zLg ng 1\n"
"jmO ij 1\n"
"wBk ka 1\n"
"pjQ ij 1\n"
"xZv va 1\n"
"qIu un 1\n"
"ycY ch 1\n"
"mDf me 1\n"
"yJs st 1\n"
"Isx st 1\n"
"Qqr qu 1\n"
"Fkw ka 1\n"
"Cpj ij 1\n"
"Yvq qu 1\n"
"zjG sz 1\n"
"gGc ch 1\n"
"Xdm de 1\n"
"hBv th 1\n"
"Wxj ij 1\n"
"Ywb ow 1\n"
"Vtq th 1\n"
"tjY th 1\n"
"jDj ij 1\n"
"uGd qu 1\n"
"wvF va 1\n"
"uqg qu 1\n"
"Rwp pr 1\n"
"Bgb ng 1\n"
"mnU an 1\n"
"dpI de 1\n"
"wKd de 1\n"
"yXz sz 1\n"
"kLd de 1\n"
"gYx ng 1\n"
"qxk qu 1\n"
"Hhy th 1\n"
"fpJ pr 1\n"
"cVc ch 1\n"
"kVv va 1\n"
"Jzs st 1\n"
"nDw an 1\n"
"tjF th 1\n"
"bZj ij 1\n"
"mqL qu 1\n"
"hFt th 1\n"
"nNw an 1\n"
"wFv va 1\n"
"gHc ch 1\n"
"qRx qu 1\n"
"Jxh th 1\n"
"Vpv va 1\n"
"nMk an 1\n"
"tjN th 1\n"
"fhQ th 1\n"
"bpD pr 1\n"
"Dfg ng 1\n"
"jyO ij 1\n"
"jhV th 1\n"
"kVk ka 1\n"
"nKc an 1\n"
"jkJ ij 1\n"
"cwS ch 1\n"
"oDf on 1\n"
"mkY ka 1\n"
"gdV ng 1\n"
"Xhb th 1\n"
"jUq qu 1\n"
"aJf an 1\n"
"Qxg ng 1\n"
"xzS sz 1\n"
"vUw va 1\n"
"hTj th 1\n"
"oVt th 1\n"
"zdq qu 1\n"
"fHs st 1\n"
"xKk ka 1\n"
"bFc ch 1\n"
"gWq qu 1\n"
"Yqa an 1\n"
"dmH de 1\n"
"Ttq th 1\n"
"iQc ch 1\n"
"jFh ij 1\n"
"fcY ch 1\n"
"fsR st 1\n"
"iWg in 1\n"
"Xyj ij 1\n"
"Xjs st 1\n"
"xpb pr 1\n"
"lzY le 1\n"
"pzg ng 1\n"
"dVw de 1\n"
"Ijc ch 1\n"
"fvq qu 1\n"
"Vnb an 1\n"
"zdH de 1\n"
"cDd ch 1\n"
"wqI qu 1\n"
"yfU ny 1\n"
"qoH qu 1\n"
"xkw ka 1\n"
"Kck ch 1\n"
"mUq qu 1\n"
"zWm sz 1\n"
"Bfj ij 1\n"
"rQj er 1\n"
"qeW qu 1\n"
"qpC qu 1\n"
"oqM qu 1\n"
"pzO sz 1\n"
"cjQ ch 1\n"
"zTx sz 1\n"
"gRw ng 1\n"
"kdQ de 1\n"
"wbQ wa 1\n"
"Qpj ij 1\n"
"zIc ch 1\n"
"yxN ny 1\n"
"nCk an 1\n"
"Jqz qu 1\n"
"dEq qu 1\n"
"gdE ng 1\n"
"wCg ng 1\n"
"pQt th 1\n"
"vKe er 1\n"
"Tjm ij 1\n"
"Zcy ch 1\n"
"kmR ka 1\n"
"cTp ch 1\n"
"bqE qu 1\n"
"vvZ va 1\n"
"cLw ch 1\n"
"oIw on 1\n"
"xjG ij 1\n"
"vtU th 1\n"
"hcH th 1\n"
"xgT ng 1\n"
"vqR qu 1\n"
"wuM qu 1\n"
"xsY st 1\n"
"jCu qu 1\n"
"Fbn an 1\n"
"cqH ch 1\n"
"Xjz ij 1\n"
"fgR ng 1\n"
"yiX in 1\n"
"qnO an 1\n"
"wmN me 1\n"
"wgH ng 1\n"
"tbZ th 1\n"
"Xks st 1\n"
"pzC po 1\n"
"lfX le 1\n"
"qBu un 1\n"
"mLw me 1\n"
"pmY me 1\n"
"xqE qu 1\n"
"rjY er 1\n"
"vrH er 1\n"
"Iuf qu 1\n"
"yfD ny 1\n"
"clG ch 1\n"
"cdZ ch 1\n"
"eTd er 1\n"
"lXv le 1\n"
"kpV ka 1\n"
"sZq qu 1\n"
"Wxc ch 1\n"
"vmJ va 1\n"
"hkE th 1\n"
"pUw pr 1\n"
"Cqd qu 1\n"
"wCn an 1\n"
"pxQ pr 1\n"
"Ywp pr 1\n"
"xwb wa 1\n"
"Wjm ij 1\n"
"zqQ qu 1\n"
"gTp ng 1\n"
"uZv qu 1\n"
"mdH de 1\n"
"juQ qu 1\n"
"gVm ng 1\n"
"zjY ij 1\n"
"fhN th 1\n"
"wfD wa 1\n"
"Zjc ch 1\n"
"iPv in 1\n"
"mzW sz 1\n"
"vXm va 1\n"
"fEq qu 1\n"
"Ozq qu 1\n"
"gEp ng 1\n"
"kDj ij 1\n"
"Zlw le 1\n"
"zbR sz 1\n"
"zCt th 1\n"
"woY on 1\n"
"pkT ka 1\n"
"kbI ka 1\n"
"hdW de 1\n"
"Hsx st 1\n"
"zpX sz 1\n"
"zfV sz 1\n"
"Dhk th 1\n"
"wMp pr 1\n"
"hzJ th 1\n"
"Lwp pr 1\n"
"zmN sz 1\n"
"xfq qu 1\n"
"sjQ sz 1\n"
"zkK sz 1\n"
"bBv va 1\n"
"bdE de 1\n"
"Qxn an 1\n"
"jqt th 1\n"
"jhG th 1\n"
"fYv va 1\n"
"xhE th 1\n"
"cbF ch 1\n"
"Jnb an 1\n"
"jxN ij 1\n"
"fYx fo 1\n"
"hJp th 1\n"
"cRt th 1\n"
"qnS an 1\n"
"vLp va 1\n"
"cBd ch 1\n"
"qqU qu 1\n"
"Sdd de 1\n"
"xeZ er 1\n"
"Jwo on 1\n"
"dPf de 1\n"
"fNl le 1\n"
"kIb ka 1\n"
"cbL ch 1\n"
"Qdr er 1\n"
"Mfb be 1\n"
"jJl le 1\n"
"mxY me 1\n"
"lFd le 1\n"
"twT th 1\n"
"kFk ka 1\n"
"crB ch 1\n"
"jRr er 1\n"
"Htz th 1\n"
"pYf pr 1\n"
"rVc er 1\n"
"vRf va 1\n"
"wVq qu 1\n"
"zpA sz 1\n"
"glY le 1\n"
"sNj ij 1\n"
"vKx va 1\n"
"tvB th 1\n"
"Yjf ij 1\n"
"mwP me 1\n"
"Jyb be 1\n"
"tBc th 1\n"
"gSb ng 1\n"
"cMl ch 1\n"
"gjJ ng 1\n"
"dYz de 1\n"
"zPg ng 1\n"
"kqB qu 1\n"
"sFv st 1\n"
"xkH ka 1\n"
"fZt th 1\n"
"yhR th 1\n"
"bwN wa 1\n"
"qjG qu 1\n"
"nQm an 1\n"
"qMr qu 1\n"
"jcW ch 1\n"
"qJv qu 1\n"
"gTm ng 1\n"
"kmQ ka 1\n"
"Wlc ch 1\n"
"kYf ka 1\n"
"eJp er 1\n"
"Tkb ka 1\n"
"hfM th 1\n"
"nxY an 1\n"
"pDl le 1\n"
"wcN ch 1\n"
"pQa an 1\n"
"ohZ th 1\n"
"xRz sz 1\n"
"lbV le 1\n"
"lKc ch 1\n"
"wxB wa 1\n"
"Lww wa 1\n"
"fqQ qu 1\n"
"kkZ ka 1\n"
"iwO in 1\n"
"dgU ng 1\n"
"dvO de 1\n"
"pDt th 1\n"
"kvK ka 1\n"
"jlV le 1\n"
"xXd de 1\n"
"ykF ku 1\n"
"iyT in 1\n"
"Ufx fo 1\n"
"nzU an 1\n"
"xbH bu 1\n"
"lSb le 1\n"
"Xpf pr 1\n"
"Uvf va 1\n"
"yyF ny 1\n"
"fxP fo 1\n"
"jYu qu 1\n"
"qjb qu 1\n"
"gxL ng 1\n"
"pwI pr 1\n"
"jUe er 1\n"
"rFc ch 1\n"
"fsF st 1\n"
"cdW ch 1\n"
"Xwp pr 1\n"
"xdH de 1\n"
"jYs ij 1\n"
"bFd de 1\n"
"qIh th 1\n"
"yIg ng 1\n"
"vTd de 1\n"
"wfE wa 1\n"
"qRb qu 1\n"
"yhK th 1\n"
"kMn an 1\n"
"cpB ch 1\n"
"txN th 1\n"
"kPd de 1\n"
"nbB an 1\n"
"skQ st 1\n"
"uKw qu 1\n"
"wQf wa 1\n"
"kWf ka 1\n"
"wqA qu 1\n"
"cwA ch 1\n"
"vJk ka 1\n"
"hcD th 1\n"
"nfK an 1\n"
"uXf qu 1\n"
"cgA ch 1\n"
"Pjd de 1\n"
"Lqs qu 1\n"
"zwC sz 1\n"
"ljN le 1\n"
"vkP ka 1\n"
"Rqp qu 1\n"
"zGx sz 1\n"
"jPg ng 1\n"
"kbT ka 1\n"
"kpQ ka 1\n"
"Mzq qu 1\n"
"Gjs st 1\n"
"kDl le 1\n"
"jwR ij 1\n"
"Wyq qu 1\n"
"qxS qu 1\n"
"qGt th 1\n"
"Wvr er 1\n"
"zNx sz 1\n"
"vCm va 1\n"
"hlD th 1\n"
"vBp va 1\n"
"mJc ch 1\n"
"hFb th 1\n"
"vDm va 1\n"
"pfC pr 1\n"
"Lpy pr 1\n"
"Fhd th 1\n"
"dxS de 1\n"
"wWg ng 1\n"
"Fgn an 1\n"
"nFf an 1\n"
"cxF ch 1\n"
"aVh th 1\n"
"Sqx qu 1\n"
"Vjz ij 1\n"
"znC an 1\n"
"qqv qu 1\n"
"zrZ er 1\n"
"bNl le 1\n"
"nvW an 1\n"
"Qyb be 1\n"
"Fht th 1\n"
"jGv ij 1\n"
"gLp ng 1\n"
"gLb ng 1\n"
"qKj qu 1\n"
"hJd th 1\n"
"Zjg ng 1\n"
"nQq an 1\n"
"npX an 1\n"
"qiO in 1\n"
"vvG va 1\n"
"jOx ij 1\n"
"hhE th 1\n"
"vdN de 1\n"
"Czz sz 1\n"
"gjU ng 1\n"
"hVb th 1\n"
"Kcg ch 1\n"
"dvH de 1\n"
"wtD th 1\n"
"jIo on 1\n"
"jQa an 1\n"
"Fyj ij 1\n"
"cpU ch 1\n"
"hxY th 1\n"
"qbD qu 1\n"
"svJ st 1\n"
"vjW ij 1\n"
"gpY ng 1\n"
"qnR an 1\n"
"gQn an 1\n"
"Cvh th 1\n"
"ykB ka 1\n"
"xgB ng 1\n"
"zfD sz 1\n"
"yHw wa 1\n"
"qdG qu 1\n"
"qTn an 1\n"
"lTm le 1\n"
"jgB ng 1\n"
"gxS ng 1\n"
"qPe qu 1\n"
"ppQ pr 1\n"
"yxW ny 1\n"
"Hjk ij 1\n"
"kNk ka 1\n"
"cnJ an 1\n"
"uHd qu 1\n"
"jvH ij 1\n"
"Ggn ng 1\n"
"lbS le 1\n"
"Qcx ch 1\n"
"cqR ch 1\n"
"Jyc ch 1\n"
"wRp pr 1\n"
"nfA an 1\n"
"lXw le 1\n"
"cmJ ch 1\n"
"Ysw st 1\n"
"qQs qu 1\n"
"gsX ng 1\n"
"cIq ch 1\n"
"jjZ ij 1\n"
"Llb le 1\n"
"mMv va 1\n"
"lVh th 1\n"
"Fph th 1\n"
"Zmm me 1\n"
"xMd de 1\n"
"Gwb wa 1\n"
"Qjv ij 1\n"
"lqZ qu 1\n"
"zJh th 1\n"
"Wky ka 1\n"
"hDk th 1\n"
"yLg ng 1\n"
"dYw de 1\n"
"dCq qu 1\n"
"Gmj ij 1\n"
"xTq qu 1\n"
"wkF ka 1\n"
"hFp th 1\n"
"qnB an 1\n"
"xyJ ny 1\n"
"nIj an 1\n"
"xYd de 1\n"
"Wqr qu 1\n"
"xqV qu 1\n"
"wYk ka 1\n"
"Qdz de 1\n"
"fbN be 1\n"
"qwY qu 1\n"
"Ubx be 1\n"
"wtL th 1\n"
"nQw an 1\n"
"jJk ij 1\n"
"Nzs st 1\n"
"dCn an 1\n"
"Nfv va 1\n"
"Hgh th 1\n"
"Hcq ch 1\n"
"Xvb va 1\n"
"sxJ st 1\n"
"wMx wa 1\n"
"qFn an 1\n"
"Gzf sz 1\n"
"qfJ qu 1\n"
"zdQ de 1\n"
"Xgz ng 1\n"
"fkI ka 1\n"
"pvK va 1\n"
"Cqr qu 1\n"
"zFd de 1\n"
"oHm on 1\n"
"aJj an 1\n"
"Fzd de 1\n"
"dWk de 1\n"
"wmE me 1\n"
"sMl le 1\n"
"tBp th 1\n"
"vNw va 1\n"
"Qdh th 1\n"
"whG th 1\n"
"qAp qu 1\n"
"jrM er 1\n"
"rHw er 1\n"
"Lvc ch 1\n"
"gRn an 1\n"
"yjV ij 1\n"
"hRk th 1\n"
"bkV ka 1\n"
"jWm ij 1\n"
"yYz sz 1\n"
"vTy va 1\n"
"dxV de 1\n"
"mKy me 1\n"
"Qlq qu 1\n"
"Upx pr 1\n"
"Qpq qu 1\n"
"Lwm me 1\n"
"yXr er 1\n"
"gTk ng 1\n"
"qnT an 1\n"
"Vlq qu 1\n"
"Qqd qu 1\n"
"Zdd de 1\n"
"Xqt th 1\n"
"Dfb be 1\n"
"oeO on 1\n"
"nCx an 1\n"
"lXd le 1\n"
"vHc ch 1\n"
"vAb va 1\n"
"Ybw wa 1\n"
"zDn an 1\n"
"dGk de 1\n"
"plH le 1\n"
"lxG le 1\n"
"Hgp ng 1\n"
"jRz ij 1\n"
"dTs de 1\n"
"mCj ij 1\n"
"lHf le 1\n"
"lLj le 1\n"
"tNb th 1\n"
"mKk ka 1\n"
"gGj ng 1\n"
"jlQ le 1\n"
"Yyg ng 1\n"
"fDv va 1\n"
"zXg ng 1\n"
"qzZ qu 1\n"
"fEg ng 1\n"
"lhS th 1\n"
"mzM sz 1\n"
"xqT qu 1\n"
"Ycj ch 1\n"
"fbF be 1\n"
"Xsj ij 1\n"
"Lnc an 1\n"
"Gqp qu 1\n"
"fjO ij 1\n"
"zhI th 1\n"
"zgH ng 1\n"
"gWc ch 1\n"
"yKf ny 1\n"
"uQd qu 1\n"
"Kwl le 1\n"
"dxG de 1\n"
"Yqw qu 1\n"
"tKc th 1\n"
"cWn an 1\n"
"hcI th 1\n"
"wfY wa 1\n"
"rBp er 1\n"
"cJd ch 1\n"
"sYf sz 1\n"
"Sqj qu 1\n"
"kQv ka 1\n"
"xpF pr 1\n"
"fcX ch 1\n"
"yfK ny 1\n"
"jQo on 1\n"
"gTg ng 1\n"
"Qwn an 1\n"
"Pnx an 1\n"
"yZt th 1\n"
"wPz sz 1\n"
"juX qu 1\n"
"Lxv va 1\n"
"iXr in 1\n"
"pcE ch 1\n"
"Nqy qu 1\n"
"hjI th 1\n"
"hzV th 1\n"
"nmF an 1\n"
"pvW va 1\n"
"eJw er 1\n"
"Iqd qu 1\n"
"gXy ng 1\n"
"wfW wa 1\n"
"Vdw de 1\n"
"qJx qu 1\n"
"Pdq qu 1\n"
"Bjb ij 1\n"
"qLl qu 1\n"
"zdW de 1\n"
"fQr er 1\n"
"xzW sz 1\n"
"vwQ va 1\n"
"rwU er 1\n"
"qPn an 1\n"
"bFw wa 1\n"
"vHl le 1\n"
"hWl th 1\n"
"wgO ng 1\n"
"hLk th 1\n"
"Jkb ka 1\n"
"zBh th 1\n"
"Dhx th 1\n"
"Fgv ng 1\n"
"bpA pr 1\n"
"zxC sz 1\n"
"gfS ng 1\n"
"Mvx va 1\n"
"uPk qu 1\n"
"Vqn an 1\n"
"yqC qu 1\n"
"vMk ka 1\n"
"wqL qu 1\n"
"wrJ er 1\n"
"cdN ch 1\n"
"pwR pr 1\n"
"hMf th 1\n"
"jPf ij 1\n"
"Vbv va 1\n"
"qzF qu 1\n"
"qNc ch 1\n"
"Jbq qu 1\n"
"fTk ka 1\n"
"Zff fo 1\n"
"Fzt th 1\n"
"Kcw ch 1\n"
"eKf er 1\n"
"pqZ qu 1\n"
"Wpb pr 1\n"
"jkF ij 1\n"
"Vxp pr 1\n"
"hGq th 1\n"
"qBc ch 1\n"
"fcT ch 1\n"
"jMq qu 1\n"
"kZv ka 1\n"
"qkG qu 1\n"
"Ifp pr 1\n"
"dRw de 1\n"
"Zlj le 1\n"
"Kwj ij 1\n"
"fNb be 1\n"
"dYy de 1\n"
"hZl th 1\n"
"wtP th 1\n"
"hPz th 1\n"
"Ykc ch 1\n"
"Jlw le 1\n"
"jNt th 1\n"
"yrW er 1\n"
"gWd ng 1\n"
"yXd de 1\n"
"fQl le 1\n"
"jfF ij 1\n"
"Ejx ij 1\n"
"fGk ka 1\n"
"Zjz ij 1\n"
"wdM de 1\n"
"jlF le 1\n"
"cxZ ch 1\n"
"Zgk ng 1\n"
"mcJ ch 1\n"
"slE le 1\n"
"nYq an 1\n"
"Wfg ng 1\n"
"zJk ka 1\n"
"bvF va 1\n"
"Hnz an 1\n"
"Wkv ka 1\n"
"Mvq qu 1\n"
"Dxh th 1\n"
"Bvt th 1\n"
"sMj ij 1\n"
"wRf wa 1\n"
"vLb va 1\n"
"zGq qu 1\n"
"mFp me 1\n"
"gNb ng 1\n"
"pCg ng 1\n"
"xFs sz 1\n"
"jKf ij 1\n"
"qJb qu 1\n"
"pzI sz 1\n"
"jgG ng 1\n"
"pKs sz 1\n"
"fqD qu 1\n"
"gxQ ng 1\n"
"fvG va 1\n"
"wgF ng 1\n"
"Xxz sz 1\n"
"Lwu qu 1\n"
"dlX le 1\n"
"lPz le 1\n"
"Wqk qu 1\n"
"Xzj ij 1\n"
"uHj qu 1\n"
"uFj qu 1\n"
"jvV ij 1\n"
"jXe le 1\n"
"Zfm me 1\n"
"qIm qu 1\n"
"zbB sz 1\n"
"yZf ny 1\n"
"sKk sz 1\n"
"zpL sz 1\n"
"qKg qu 1\n"
"Ibj ij 1\n"
"iQb in 1\n"
"Fxu qu 1\n"
"Fpb pr 1\n"
"Wva an 1\n"
"fzD sz 1\n"
"bkT ka 1\n"
"Ykt th 1\n"
"njG an 1\n"
"Uvh th 1\n"
"gfT ng 1\n"
"zcI ch 1\n"
"bDq qu 1\n"
"Jdh th 1\n"
"xMg ng 1\n"
"Jby be 1\n"
"lwJ le 1\n"
"sWw sz 1\n"
"Svw va 1\n"
"nrX an 1\n"
"uvV qu 1\n"
"jVr er 1\n"
"tqB th 1\n"
"bVr er 1\n"
"kQl le 1\n"
"fbG be 1\n"
"rqM qu 1\n"
"zHj ij 1\n"
"fhY th 1\n"
"Yzr er 1\n"
"vFf va 1\n"
"Qpg ng 1\n"
"uAq qu 1\n"
"zxP sz 1\n"
"jCn an 1\n"
"qaM an 1\n"
"xlY le 1\n"
"cTf ch 1\n"
"kBf ka 1\n"
"cQc ch 1\n"
"Rbj ij 1\n"
"kVs sz 1\n"
"bGv va 1\n"
"wdN de 1\n"
"gfN ng 1\n"
"bPj ij 1\n"
"gcI ch 1\n"
"gxj ng 1\n"
"rHb er 1\n"
"pVr er 1\n"
"rVj er 1\n"
"vgS ng 1\n"
"Fqz qu 1\n"
"xMk ka 1\n"
"qQm qu 1\n"
"jZc ch 1\n"
"jBc ch 1\n"
"uwY qu 1\n"
"rHf er 1\n"
"czX ch 1\n"
"zcT ch 1\n"
"bFj ij 1\n"
"qcB ch 1\n"
"hfT th 1\n"
"xqO qu 1\n"
"qfp qu 1\n"
"xjU ij 1\n"
"bhR th 1\n"
"tWv th 1\n"
"iqE in 1\n"
"gpU ng 1\n"
"iWb in 1\n"
"tlP th 1\n"
"tYq th 1\n"
"bCv va 1\n"
"oKc ch 1\n"
"Sgj ng 1\n"
"hvq th 1\n"
"kfY ka 1\n"
"zbM sz 1\n"
"zvA sz 1\n"
"cHp ch 1\n"
"vvK va 1\n"
"fpZ pr 1\n"
"dfX de 1\n"
"wrK er 1\n"
"xeE er 1\n"
"fkY ka 1\n"
"sbX sz 1\n"
"fcS ch 1\n"
"vKh th 1\n"
"Qlx le 1\n"
"Zqh th 1\n"
"qWg qu 1\n"
"cdL ch 1\n"
"jvG ij 1\n"
"Mgx ng 1\n"
"gwF ng 1\n"
"kdP de 1\n"
"uMr qu 1\n"
"tcD th 1\n"
"qrL qu 1\n"
"Mtm th 1\n"
"bQz sz 1\n"
"Hpx pr 1\n"
"zpI sz 1\n"
"jkR ij 1\n"
"khH th 1\n"
"mSq qu 1\n"
"pFz sz 1\n"
"juO qu 1\n"
"Xyq qu 1\n"
"jGd de 1\n"
"Yzd de 1\n"
"wbC wa 1\n"
"wSb wa 1\n"
"sZd de 1\n"
"Rzx sz 1\n"
"Flx le 1\n"
"bqC qu 1\n"
"lcH ch 1\n"
"wmG me 1\n"
"zCj ij 1\n"
"xaD an 1\n"
"iwH in 1\n"
"qDp qu 1\n"
"sGx sz 1\n"
"Xhy th 1\n"
"eVc ch 1\n"
"wkJ wa 1\n"
"Lcf ch 1\n"
"lgQ ng 1\n"
"Dhh th 1\n"
"zfO sz 1\n"
"kVc ch 1\n"
"hmL th 1\n"
"Owf wa 1\n"
"wZc ch 1\n"
"dnN an 1\n"
"Mzp sz 1\n"
"mYw me 1\n"
"yLh th 1\n"
"Xxr er 1\n"
"qwI qu 1\n"
"Txs sz 1\n"
"yKp pr 1\n"
"bjX ij 1\n"
"pbS pr 1\n"
"zrP er 1\n"
"hJm th 1\n"
"qgA qu 1\n"
"zwY sz 1\n"
"rXk er 1\n"
"nDx an 1\n"
"vGz sz 1\n"
"mQq qu 1\n"
"upY qu 1\n"
"rLn an 1\n"
"Vfk ka 1\n"
"wCv va 1\n"
"cgx ch 1\n"
"kZq qu 1\n"
"Wjw ij 1\n"
"Qax an 1\n"
"grG ng 1\n"
"bJd de 1\n"
"dJx de 1\n"
"cMd ch 1\n"
"Qcs ch 1\n"
"mkK ka 1\n"
"jNx ij 1\n"
"mrY er 1\n"
"Xwx wa 1\n"
"rZl er 1\n"
"gxU ng 1\n"
"Lnv an 1\n"
"ygC ng 1\n"
"Dqh th 1\n"
"lLn an 1\n"
"mnQ an 1\n"
"kjU ij 1\n"
"bvO va 1\n"
"oVm on 1\n"
"vWt th 1\n"
"rGq qu 1\n"
"tbJ th 1\n"
"fSv va 1\n"
"wJn an 1\n"
"fJv va 1\n"
"oQv on 1\n"
"Vws sz 1\n"
"pnU an 1\n"
"Nmh th 1\n"
"cTq ch 1\n"
"Edx de 1\n"
"uqw qu 1\n"
"Yrh th 1\n"
"Qnx an 1\n"
"mJf me 1\n"
"kDq qu 1\n"
"Xhd th 1\n"
"nLx an 1\n"
"xkU ka 1\n"
"fqT qu 1\n"
"qYh th 1\n"
"bFv va 1\n"
"xbQ be 1\n"
"vcS ch 1\n"
"qqT qu 1\n"
"gkF ng 1\n"
"zFh th 1\n"
"kpE ka 1\n"
"Gxb be 1\n"
"Ztw th 1\n"
"qIl qu 1\n"
"Qkd de 1\n"
"wdV de 1\n"
"rwP er 1\n"
"aCg an 1\n"
"Zrs er 1\n"
"zmW sz 1\n"
"vfO va 1\n"
"hBj th 1\n"
"tbH th 1\n"
"Dxv va 1\n"
"zdD de 1\n"
"nBw an 1\n"
"lrV er 1\n"
"gQq ng 1\n"
"tlK th 1\n"
"ztP th 1\n"
"yqV qu 1\n"
"nRm an 1\n"
"jVz sz 1\n"
"Crq er 1\n"
"fFg ng 1\n"
"Xjg ng 1\n"
"Cml le 1\n"
"qWj qu 1\n"
"jzO ij 1\n"
"Mdq qu 1\n"
"mtQ th 1\n"
"rGv er 1\n"
"kGn an 1\n"
"mLg ng 1\n"
"uWj qu 1\n"
"Rcq ch 1\n"
"cVp ch 1\n"
"bWk ka 1\n"
"Xzx sz 1\n"
"Wkb ka 1\n"
"xzH sz 1\n"
"quP un 1\n"
"dHv de 1\n"
"Dmq qu 1\n"
"Dgv ng 1\n"
"tgY th 1\n"
"jtM th 1\n"
"tMz th 1\n"
"bHm me 1\n"
"Zfk ka 1\n"
"xZp pr 1\n"
"jkH ij 1\n"
"rNp er 1\n"
"xMv va 1\n"
"wpF pr 1\n"
"djD de 1\n"
"bxV be 1\n"
"hgS th 1\n"
"Pkh th 1\n"
"Dxq qu 1\n"
"mMx me 1\n"
"dGj de 1\n"
"kbH ka 1\n"
"Lhg th 1\n"
"Dvq qu 1\n"
"qrT qu 1\n"
"Ijw ij 1\n"
"wuI qu 1\n"
"Zwn an 1\n"
"dhJ th 1\n"
"qcR ch 1\n"
"whM th 1\n"
"pgP ng 1\n"
"qkR qu 1\n"
"sqR qu 1\n"
"lxY le 1\n"
"vVw va 1\n"
"lKd le 1\n"
"Nly le 1\n"
"yKz sz 1\n"
"qBb qu 1\n"
"wQx wa 1\n"
"kYw ka 1\n"
"fQd de 1\n"
"svW sz 1\n"
"yGp pr 1\n"
"ytB th 1\n"
"jvU ij 1\n"
"kjz ka 1\n"
"jVc ch 1\n"
"Qbz sz 1\n"
"pqM qu 1\n"
"vwu ku 1\n"
"Qww wa 1\n"
"dcZ ch 1\n"
"lhG th 1\n"
"gmS ng 1\n"
"Iqz qu 1\n"
"zZf sz 1\n"
"hLn th 1\n"
"eMf er 1\n"
"xNq qu 1\n"
"mPm um 1\n"
"pMg ng 1\n"
"wzW sz 1\n"
"kRl le 1\n"
"hzK th 1\n"
"fbO be 1\n"
"Xxt th 1\n"
"Fnx an 1\n"
"Bvn an 1\n"
"bjZ ij 1\n"
"tcY th 1\n"
"dmB de 1\n"
"qFe qu 1\n"
"kxB ka 1\n"
"qBz qu 1\n"
"pVp pr 1\n"
"boQ on 1\n"
"xoH on 1\n"
"dWg de 1\n"
"Tdq qu 1\n"
"zNq qu 1\n"
"vYp va 1\n"
"pDf pr 1\n"
"lwG le 1\n"
"hDq th 1\n"
"Jdy de 1\n"
"snZ an 1\n"
"mzU sz 1\n"
"zKx sz 1\n"
"rvC er 1\n"
"wuS qu 1\n"
"dnQ an 1\n"
"vCy va 1\n"
"Udw wa 1\n"
"bTl le 1\n"
"qbC qu 1\n"
"tbT th 1\n"
"iDk ka 1\n"
"Whb th 1\n"
"tbX th 1\n"
"tfO th 1\n"
"Tfq qu 1\n"
"dbW de 1\n"
"Bdy de 1\n"
"vjR ij 1\n"
"cbC ch 1\n"
"wuW qu 1\n"
"wCw wa 1\n"
"Wdq qu 1\n"
"vRb va 1\n"
"bWm me 1\n"
"vZw va 1\n"
"dJj de 1\n"
"qZy qu 1\n"
"Jgq ng 1\n"
"zbH sz 1\n"
"hJl th 1\n"
"Xhg th 1\n"
"nVp an 1\n"
"dVc ch 1\n"
"qCc ch 1\n"
"oYg ng 1\n"
"kwH ka 1\n"
"vwN va 1\n"
"zfw sz 1\n"
"vlO le 1\n"
"ztX ti 1\n"
"dKx de 1\n"
"xQs sz 1\n"
"cDl ch 1\n"
"yVv va 1\n"
"zpN sz 1\n"
"xkG ka 1\n"
"eqW qu 1\n"
"jdD di 1\n"
"fQm me 1\n"
"Yhl th 1\n"
"tBf th 1\n"
"qEf qu 1\n"
"whX th 1\n"
"Vgv ng 1\n"
"Lsq qu 1\n"
"dfJ de 1\n"
"Zdp de 1\n"
"rZc ch 1\n"
"tZh ch 1\n"
"mtC th 1\n"
"zxQ sz 1\n"
"Vnj an 1\n"
"sHg ng 1\n"
"wYl le 1\n"
"Bqb qu 1\n"
"yrV er 1\n"
"Ycs ch 1\n"
"jRw ij 1\n"
"iWt th 1\n"
"hVw th 1\n"
"wZs sz 1\n"
"Cqo qu 1\n"
"Gfn an 1\n"
"rBv er 1\n"
"Ojz sz 1\n"
"zGf sz 1\n"
"bZc ch 1\n"
"Fvd de 1\n"
"Zgs ng 1\n"
"Rfg ng 1\n"
"Rww wa 1\n"
"Yrp er 1\n"
"iFp in 1\n"
"bVx be 1\n"
"zfM sz 1\n"
"qdV qu 1\n"
"bGm me 1\n"
"tnJ th 1\n"
"pdR de 1\n"
"gBc ch 1\n"
"gzC ng 1\n"
"Pwc ch 1\n"
"uAw qu 1\n"
"znX an 1\n"
"vgT ng 1\n"
"oAw ko 1\n"
"xBm me 1\n"
"dNf de 1\n"
"Pqs qu 1\n"
"Npd di 1\n"
"oUy ko 1\n"
"fpD pr 1\n"
"Rfx fo 1\n"
"lXm le 1\n"
"qWs qu 1\n"
"gWv vi 1\n"
"Fwv va 1\n"
"Lqj qu 1\n"
"fvQ va 1\n"
"zgB ng 1\n"
"kJl le 1\n"
"vWo on 1\n"
"Xvc ch 1\n"
"yDq qu 1\n"
"bdP de 1\n"
"jVf ij 1\n"
"wPw wa 1\n"
"dwA de 1\n"
"Oqp qu 1\n"
"qiZ in 1\n"
"xdV de 1\n"
"qFg ng 1\n"
"qzI qu 1\n"
"ywL wa 1\n"
"sWv sz 1\n"
"Tpy pr 1\n"
"wbf wa 1\n"
"uPg ng 1\n"
"Knw an 1\n"
"iuO in 1\n"
"Qdn an 1\n"
"Yfv va 1\n"
"wuK qu 1\n"
"xLn an 1\n"
"yJg ng 1\n"
"Nfk ka 1\n"
"Yql qu 1\n"
"qsH qu 1\n"
"Rzv sz 1\n"
"bIp pr 1\n"
"sQt th 1\n"
"tgC th 1\n"
"qSa an 1\n"
"fxQ fo 1\n"
"hcZ th 1\n"
"wbJ wa 1\n"
"qRl qu 1\n"
"Gcy ch 1\n"
"vZm va 1\n"
"Xzl le 1\n"
"wgR ng 1\n"
"dlO le 1\n"
"tCb th 1\n"
"qmY qu 1\n"
"qZx qu 1\n"
"Lbp pr 1\n"
"Dgq ng 1\n"
"Vkj ij 1\n"
"wqU qu 1\n"
"Mqk qu 1\n"
"wUv va 1\n"
"qgC ng 1\n"
"sbD sz 1\n"
"Sqy qu 1\n"
"bMq qu 1\n"
"Bzt th 1\n"
"sIq qu 1\n"
"cVj ch 1\n"
"wJt th 1\n"
"Xjm ij 1\n"
"Hmg ng 1\n"
"aQd an 1\n"
"iHt th 1\n"
"fMm me 1\n"
"wWc ch 1\n"
"fuE qu 1\n"
"mCf me 1\n"
"qnP an 1\n"
"zLn an 1\n"
"kRt th 1\n"
"Mvl le 1\n"
"mRd de 1\n"
"yfJ ny 1\n"
"xCb be 1\n"
"sQb sz 1\n"
"quC un 1\n"
"Ctc th 1\n"
"pPv va 1\n"
"zjI sz 1\n"
"xmC me 1\n"
"xdJ de 1\n"
"nXv an 1\n"
"vsO sz 1\n"
"pRd de 1\n"
"vbF va 1\n"
"wNl le 1\n"
"kHq qu 1\n"
"rwM er 1\n"
"gxD ng 1\n"
"Qhi th 1\n"
"mqB qu 1\n"
"pnL an 1\n"
"bKb be 1\n"
"iqN in 1\n"
"dkX de 1\n"
"bQd de 1\n"
"bNj ij 1\n"
"Tlk le 1\n"
"Nlg ng 1\n"
"Cxh th 1\n"
"Mqf qu 1\n"
"Pvj ij 1\n"
"zwZ sz 1\n"
"pGb pr 1\n"
"nrF an 1\n"
"bkS ka 1\n"
"dRv de 1\n"
"jJm ij 1\n"
"iqF in 1\n"
"fGc ch 1\n"
"nxW an 1\n"
"xsW sz 1\n"
"mfQ me 1\n"
"fgP ng 1\n"
"jlH le 1\n"
"nrI an 1\n"
"kXv ka 1\n"
"Vpq qu 1\n"
"zMk sz 1\n"
"pHf pr 1\n"
"jdM de 1\n"
"bqJ qu 1\n"
"Ckt th 1\n"
"zKv sz 1\n"
"jzG sz 1\n"
"uIx qu 1\n"
"yNm me 1\n"
"jYt th 1\n"
"fwL wa 1\n"
"dZx de 1\n"
"vgF ng 1\n"
"wXi in 1\n"
"vZt th 1\n"
"Ctf th 1\n"
"xqC qu 1\n"
"qOc ch 1\n"
"ygX ng 1\n"
"kWk ka 1\n"
"grF ng 1\n"
"qnX an 1\n"
"xUi in 1\n"
"pmC me 1\n"
"uzE qu 1\n"
"Ivw va 1\n"
"gvI ng 1\n"
"knZ an 1\n"
"lxZ le 1\n"
"Xwf wa 1\n"
"Dqb qu 1\n"
"yKg ng 1\n"
"Vwg ng 1\n"
"xSb be 1\n"
"Hwp pr 1\n"
"yNx ny 1\n"
"yoQ on 1\n"
"cSx ch 1\n"
"Evq qu 1\n"
"tIw th 1\n"
"dfZ de 1\n"
"hzP th 1\n"
"xBk ka 1\n"
"kqr qu 1\n"
"yBm me 1\n"
"lJj le 1\n"
"cjq ch 1\n"
"drW er 1\n"
"qaD an 1\n"
"wDf wa 1\n"
"Lxz sz 1\n"
"zQf fo 1\n"
"Jtq th 1\n"
"qRv qu 1\n"
"Gfc ch 1\n"
"Xbt th 1\n"
"wZb wa 1\n"
"srQ er 1\n"
"gJq ng 1\n"
"jFt th 1\n"
"gNc ch 1\n"
"Rkr er 1\n"
"pzJ sz 1\n"
"lbA le 1\n"
"cBq ch 1\n"
"Kyq qu 1\n"
"xcO ch 1\n"
"zXr er 1\n"
"cVs ch 1\n"
"rYm er 1\n"
"kVm ka 1\n"
"fcZ ch 1\n"
"fzC sz 1\n"
"tKp th 1\n"
"gPz ng 1\n"
"qcL ch 1\n"
"Yjr er 1\n"
"zxU sz 1\n"
"xbT be 1\n"
"nvX an 1\n"
"qmR qu 1\n"
"bxL be 1\n"
"Xww wa 1\n"
"jSf ij 1\n"
"lNf le 1\n"
"zTs sz 1\n"
"kFq qu 1\n"
"qLz qu 1\n"
"rrX er 1\n"
"wXg ng 1\n"
"zvE sz 1\n"
"Hwx wa 1\n"
"qFm qu 1\n"
"cgR ch 1\n"
"pDp pr 1\n"
"Oqb qu 1\n"
"sVc ch 1\n"
"Xtx th 1\n"
"Qwt th 1\n"
"Wfe er 1\n"
"Pcx ch 1\n"
"bpO pr 1\n"
"Cwg ng 1\n"
"wxO wa 1\n"
"bVs sz 1\n"
"jFw ij 1\n"
"fnF an 1\n"
"kxH ka 1\n"
"Yws sz 1\n"
"gdD ng 1\n"
"jWx ij 1\n"
"cTl ch 1\n"
"kmW ka 1\n"
"mhW th 1\n"
"bzT sz 1\n"
"rvJ er 1\n"
"xcJ ch 1\n"
"vkS ka 1\n"
"sXr er 1\n"
"sCv sz 1\n"
"Ntp th 1\n"
"oHh lo 1\n"
"Yvs sz 1\n"
"pVf pr 1\n"
"kEq qu 1\n"
"qfE qu 1\n"
"oWm on 1\n"
"tMw th 1\n"
"zYp sz 1\n"
"nFw an 1\n"
"yQc ch 1\n"
"zQj sz 1\n"
"wKq qu 1\n"
"mKf me 1\n"
"uLr qu 1\n"
"wIb wa 1\n"
"wrH er 1\n"
"pgL ng 1\n"
"Lbt th 1\n"
"zjF sz 1\n"
"qFp qu 1\n"
"zdX de 1\n"
"wTc ch 1\n"
"Jwl le 1\n"
"lxU le 1\n"
"hjA th 1\n"
"iPg in 1\n"
"Xns an 1\n"
"wkW ka 1\n"
"pfP pr 1\n"
"Dyq qu 1\n"
"jWu qu 1\n"
"qzR qu 1\n"
"Yjz sz 1\n"
"twX th 1\n"
"Nwj ij 1\n"
"jbB ij 1\n"
"qwR qu 1\n"
"Ytf th 1\n"
"blX le 1\n"
"xZk ka 1\n"
"Ymw me 1\n"
"wfX wa 1\n"
"Vqy qu 1\n"
"Xqn an 1\n"
"yUw wa 1\n"
"jzT jo 1\n"
"kNt th 1\n"
"pmQ me 1\n"
"dXr er 1\n"
"ylq qu 1\n"
"tWz th 1\n"
"Kvr er 1\n"
"bhQ th 1\n"
"uJn an 1\n"
"pbT pr 1\n"
"aBf an 1\n"
"Rhj th 1\n"
"uAx qu 1\n"
"Bgx ng 1\n"
"jqN qu 1\n"
"jdC ij 1\n"
"fBs st 1\n"
"cXk ch 1\n"
"nmM an 1\n"
"xRr er 1\n"
"Hkz sz 1\n"
"dhZ th 1\n"
"Fyp pr 1\n"
"kGm ka 1\n"
"sGq qu 1\n"
"jKh th 1\n"
"vDz sz 1\n"
"vLq qu 1\n"
"lJs le 1\n"
"zNn an 1\n"
"Wgj ng 1\n"
"jmL ij 1\n"
"gVt th 1\n"
"wFz sz 1\n"
"zbD sz 1\n"
"kTd de 1\n"
"dwX de 1\n"
"xRl le 1\n"
"Azv sz 1\n"
"bQh th 1\n"
"qQf qu 1\n"
"yoZ on 1\n"
"jPs sz 1\n"
"jyG ij 1\n"
"kXj ka 1\n"
"yBv va 1\n"
"nwP an 1\n"
"xnA an 1\n"
"bKf be 1\n"
"qbP qu 1\n"
"vGs sz 1\n"
"jjG ij 1\n"
"Kqc ch 1\n"
"zVt th 1\n"
"wSg ng 1\n"
"sWm sz 1\n"
"fDg ng 1\n"
"pHz sz 1\n"
"fYp pr 1\n"
"zrW er 1\n"
"lDx le 1\n"
"hQh th 1\n"
"Bdp de 1\n"
"fqZ qu 1\n"
"oQm on 1\n"
"Qsq qu 1\n"
"xjq qu 1\n"
"Mfv va 1\n"
"zbQ sz 1\n"
"quR un 1\n"
"cMb ch 1\n"
"zqD qu 1\n"
"dXf de 1\n"
"rHh th 1\n"
"jhF th 1\n"
"nNf an 1\n"
"wHb wa 1\n"
"Tpq qu 1\n"
"bjY ij 1\n"
"cJq ch 1\n"
"lCk le 1\n"
"Pfp pr 1\n"
"Oqn an 1\n"
"fmR me 1\n"
"Qpu qu 1\n"
"Ncv ch 1\n"
"qYr qu 1\n"
"sfA sz 1\n"
"frS er 1\n"
"Gpf pr 1\n"
"jmD ij 1\n"
"hwI th 1\n"
"Rbz sz 1\n"
"jhB th 1\n"
"xXj ij 1\n"
"qYd qu 1\n"
"sVf sz 1\n"
"cCz ch 1\n"
"qMl qu 1\n"
"fpK pr 1\n"
"hVy th 1\n"
"lcJ ch 1\n"
"Okj ij 1\n"
"qJg ng 1\n"
"jLp ij 1\n"
"nYf an 1\n"
"npF on 1\n"
"rWk er 1\n"
"mcP ch 1\n"
"nZm an 1\n"
"fYb fo 1\n"
"zbC sz 1\n"
"nBq an 1\n"
"fjy ij 1\n"
"bIx be 1\n"
"twN th 1\n"
"Ggk ng 1\n"
"Czm sz 1\n"
"jtO th 1\n"
"nRl an 1\n"
"jyC ij 1\n"
"yEh th 1\n"
"vmH va 1\n"
"wtQ th 1\n"
"wIf wa 1\n"
"jIf ij 1\n"
"qbM qu 1\n"
"Rwq qu 1\n"
"fqF qu 1\n"
"Wfj ij 1\n"
"jfW ij 1\n"
"wWm me 1\n"
"Wpp pr 1\n"
"Mgj ng 1\n"
"dSf de 1\n"
"wYv va 1\n"
"ccI ch 1\n"
"ylT le 1\n"
"Gqh th 1\n"
"Cmz sz 1\n"
"Hfk ka 1\n"
"qBt th 1\n"
"yCf ny 1\n"
"qzO qu 1\n"
"ydF de 1\n"
"Vdt th 1\n"
"pJd de 1\n"
"sfR sz 1\n"
"dlV le 1\n"
"jOd de 1\n"
"nfF an 1\n"
"wTt th 1\n"
"rGk er 1\n"
"xAw wa 1\n"
"vfF va 1\n"
"Dzg ng 1\n"
"kFp ka 1\n"
"jTm ij 1\n"
"nNq an 1\n"
"qcN ch 1\n"
"Jjx ij 1\n"
"tKf th 1\n"
"Zrq qu 1\n"
"hmK th 1\n"
"Mqz qu 1\n"
"xfR fo 1\n"
"wQq qu 1\n"
"mqG qu 1\n"
"xUr er 1\n"
"oiU in 1\n"
"qsS qu 1\n"
"qGg ng 1\n"
"qtO th 1\n"
"tPb th 1\n"
"Rqm qu 1\n"
"vkX ka 1\n"
"Wsb st 1\n"
"cxR ch 1\n"
"fZr er 1\n"
"yQg ng 1\n"
"ziU in 1\n"
"xvW va 1\n"
"aDx an 1\n"
"bQj ij 1\n"
"jxC ij 1\n"
"Twk ka 1\n"
"sQh th 1\n"
"Bfx fo 1\n"
"aGj an 1\n"
"Pgc ch 1\n"
"Hzh th 1\n"
"qgW ng 1\n"
"kdF de 1\n"
"kbY ka 1\n"
"Qjx ij 1\n"
"Hxj ij 1\n"
"tVx th 1\n"
"nxZ an 1\n"
"oVd on 1\n"
"Hlq qu 1\n"
"jKz sz 1\n"
"qAi in 1\n"
"dNl le 1\n"
"pqA qu 1\n"
"eIv er 1\n"
"xmW me 1\n"
"ycK ch 1\n"
"mQd de 1\n"
"hmU th 1\n"
"nlF an 1\n"
"Gkl le 1\n"
"qBq qu 1\n"
"rhQ th 1\n"
"Znk an 1\n"
"Vfp pr 1\n"
"nBn an 1\n"
"qvL qu 1\n"
"aqN an 1\n"
"kLf ka 1\n"
"zJr er 1\n"
"tQw th 1\n"
"sWq qu 1\n"
"bwW wa 1\n"
"vzB sz 1\n"
"yyR ny 1\n"
"qqN qu 1\n"
"wyI ny 1\n"
"jzJ sz 1\n"
"qgI qu 1\n"
"bgQ ng 1\n"
"yLt th 1\n"
"Vqq qu 1\n"
"Xnr an 1\n"
"wHg ng 1\n"
"aQg an 1\n"
"cFh th 1\n"
"zjQ sz 1\n"
"gpD ng 1\n"
"xzN sz 1\n"
"iIw in 1\n"
"dQg ng 1\n"
"pQy pr 1\n"
"Xyx ny 1\n"
"sWc ch 1\n"
"jFd de 1\n"
"bpF pr 1\n"
"Vsv st 1\n"
"Qql qu 1\n"
"wzT sz 1\n"
"sqQ qu 1\n"
"Kzm sz 1\n"
"oFq qu 1\n"
"gkJ ng 1\n"
"hkH th 1\n"
"qLg ng 1\n"
"bmU me 1\n"
"crJ ch 1\n"
"slX le 1\n"
"Tzx sz 1\n"
"qbx qu 1\n"
"kpI ka 1\n"
"xCf fo 1\n"
"Fml le 1\n"
"Qhj th 1\n"
"tQs th 1\n"
"vRd de 1\n"
"Ycb ch 1\n"
"cjP ch 1\n"
"yuE qu 1\n"
"gIi in 1\n"
"kWg ng 1\n"
"Jwh th 1\n"
"fVy ny 1\n"
"jqy qu 1\n"
"Wzp sz 1\n"
"Cwc ch 1\n"
"qEy qu 1\n"
"jrX er 1\n"
"Kqi in 1\n"
"lYv le 1\n"
"dGv de 1\n"
"Cwj ij 1\n"
"nDv an 1\n"
"Ojm ij 1\n"
"Dnx an 1\n"
"vrF er 1\n"
"Jmr er 1\n"
"zfI sz 1\n"
"bqT qu 1\n"
"Xvj ij 1\n"
"nPp an 1\n"
"aVw an 1\n"
"wBv va 1\n"
"kVb ka 1\n"
"gcH ch 1\n"
"Xbs sz 1\n"
"tRd th 1\n"
"mQz sz 1\n"
"Hxe er 1\n"
"Dnw an 1\n"
"xWg ng 1\n"
"pGc ch 1\n"
"hgI th 1\n"
"ywP wa 1\n"
"nrW an 1\n"
"iVq di 1\n"
"xzE sz 1\n"
"Vxd de 1\n"
"Lzc ch 1\n"
"Jwp pr 1\n"
"gCq ng 1\n"
"Otq th 1\n"
"wvP va 1\n"
"cNr ch 1\n"
"iXq in 1\n"
"Qnl in 1\n"
"tPz th 1\n"
"hIb th 1\n"
"aPg an 1\n"
"zvw sz 1\n"
"nqO an 1\n"
"sqO qu 1\n"
"bjQ ij 1\n"
"lwQ le 1\n"
"pEq qu 1\n"
"bWj ij 1\n"
"swT sz 1\n"
"gmY ng 1\n"
"gRk ng 1\n"
"dZr er 1\n"
"fMr er 1\n"
"lxO le 1\n"
"kbQ ka 1\n"
"yfN ny 1\n"
"ymq qu 1\n"
"jpK ij 1\n"
"Wjn an 1\n"
"fmW me 1\n"
"rKx er 1\n"
"dlH le 1\n"
"kcK ch 1\n"
"vbV va 1\n"
"qNl qu 1\n"
"pHt th 1\n"
"hlT th 1\n"
"lBv le 1\n"
"oaF an 1\n"
"xfM fo 1\n"
"rZd er 1\n"
"jgW ng 1\n"
"Hvh th 1\n"
"Fkf ka 1\n"
"cDc ch 1\n"
"hLh th 1\n"
"qQp qu 1\n"
"zhJ th 1\n"
"ivQ in 1\n"
"Ukq qu 1\n"
"bpV pr 1\n"
"bJq qu 1\n"
"aPw an 1\n"
"sdK de 1\n"
"cGf ch 1\n"
"Ljw ij 1\n"
"qhP th 1\n"
"mFw me 1\n"
"fIu qu 1\n"
"zhB th 1\n"
"fuH qu 1\n"
"bFq qu 1\n"
"Wgk ng 1\n"
"Fqh th 1\n"
"zmf sz 1\n"
"Zpf pr 1\n"
"nFh th 1\n"
"yBw wa 1\n"
"gIj ng 1\n"
"qBf fo 1\n"
"Uwl le 1\n"
"zrM er 1\n"
"yBd de 1\n"
"Rlf le 1\n"
"Pzh ch 1\n"
"rZx er 1\n"
"qVs qu 1\n"
"dxJ de 1\n"
"Lcz ch 1\n"
"gFn an 1\n"
"vIm va 1\n"
"qtG th 1\n"
"qbG qu 1\n"
"bHg ng 1\n"
"xrY er 1\n"
"tBd th 1\n"
"nKq an 1\n"
"Nkt th 1\n"
"jCq qu 1\n"
"byX be 1\n"
"oBp on 1\n"
"Wjz sz 1\n"
"zfP sz 1\n"
"aQz an 1\n"
"sjx ij 1\n"
"nfW an 1\n"
"nXw an 1\n"
"bJw wa 1\n"
"aSf an 1\n"
"iRf in 1\n"
"yMd de 1\n"
"fBc ch 1\n"
"vxR va 1\n"
"Llx le 1\n"
"yGs sz 1\n"
"Jsy sz 1\n"
"Lvx va 1\n"
"eFh th 1\n"
"wbM wa 1\n"
"uOq qu 1\n"
"wWl le 1\n"
"bvU va 1\n"
"fnO an 1\n"
"mzI sz 1\n"
"Vcf ch 1\n"
"mhE th 1\n"
"vgQ ng 1\n"
"jgP ng 1\n"
"qbj qu 1\n"
"bZf be 1\n"
"Xtj th 1\n"
"yYq qu 1\n"
"jdK de 1\n"
"jzB sz 1\n"
"Yys sz 1\n"
"wUg ng 1\n"
"yBb be 1\n"
"qjM qu 1\n"
"sXw sz 1\n"
"Xqw qu 1\n"
"cTb ch 1\n"
"jrE er 1\n"
"sNp sz 1\n"
"Zhm th 1\n"
"xVs sz 1\n"
"jGz sz 1\n"
"Jqh th 1\n"
"zTm sz 1\n"
"vhE th 1\n"
"dQi in 1\n"
"Tmv va 1\n"
"qxD qu 1\n"
"fzE sz 1\n"
"vMr er 1\n"
"Cqx qu 1\n"
"twY th 1\n"
"nVz an 1\n"
"lRk le 1\n"
"Owq qu 1\n"
"qYj qu 1\n"
"yQk ka 1\n"
"Nlf le 1\n"
"qDn an 1\n"
"bHw wa 1\n"
"cjA ch 1\n"
"sgU ng 1\n"
"kQi in 1\n"
"yNf ny 1\n"
"lwZ le 1\n"
"vGd de 1\n"
"Vmn an 1\n"
"tpB th 1\n"
"cFd ch 1\n"
"xHm me 1\n"
"bSg ng 1\n"
"hEq th 1\n"
"ewQ er 1\n"
"eWd er 1\n"
"jfR ij 1\n"
"zpY sz 1\n"
"cvQ ch 1\n"
"hXr th 1\n"
"cJw ch 1\n"
"wEp pr 1\n"
"Nxl le 1\n"
"qMf qu 1\n"
"vGc ch 1\n"
"pyQ pr 1\n"
"jpU ij 1\n"
"xoA on 1\n"
"gXn an 1\n"
"qqG qu 1\n"
"pXn an 1\n"
"vlP le 1\n"
"Lzv sz 1\n"
"jxB ij 1\n"
"cJc ch 1\n"
"jcT ch 1\n"
"Wtm th 1\n"
"cLg ch 1\n"
"kUx ka 1\n"
"nFp an 1\n"
"Jsw sz 1\n"
"sBg ng 1\n"
"jFn an 1\n"
"gvC ng 1\n"
"fFy ny 1\n"
"qnA an 1\n"
"Zbb be 1\n"
"Pzx sz 1\n"
"psJ sz 1\n"
"lZq qu 1\n"
"yfP ny 1\n"
"gYv ng 1\n"
"bfC be 1\n"
"dMx de 1\n"
"hlN th 1\n"
"wRl le 1\n"
"qjH qu 1\n"
"Wjc ch 1\n"
"uQp qu 1\n"
"zTb sz 1\n"
"qUr qu 1\n"
"zqp qu 1\n"
"vlR le 1\n"
"jqX qu 1\n"
"swR sz 1\n"
"qMy ny 1\n"
"zkT sz 1\n"
"yqX qu 1\n"
"nlR an 1\n"
"Hqn an 1\n"
"aaJ an 1\n"
"lKw le 1\n"
"bzB sz 1\n"
"Vgk ng 1\n"
"aVm an 1\n"
"dnR an 1\n"
"txQ th 1\n"
"Qzi in 1\n"
"zxV sz 1\n"
"xgQ ng 1\n"
"tvZ th 1\n"
"jwN ij 1\n"
"Eqj qu 1\n"
"Bxj ij 1\n"
"hzH th 1\n"
"Qfy ny 1\n"
"Ppj ij 1\n"
"Aqp qu 1\n"
"zJn an 1\n"
"szF st 1\n"
"qfX qu 1\n"
"pzV sz 1\n"
"tgN th 1\n"
"xsS sz 1\n"
"nQz an 1\n"
"tkF th 1\n"
"Qhq th 1\n"
"gJc ch 1\n"
"uOa an 1\n"
"rqW qu 1\n"
"fYz sz 1\n"
"uFc ch 1\n"
"Ncx ch 1\n"
"lMw le 1\n"
"cjI ch 1\n"
"Jcw ch 1\n"
"vEo on 1\n"
"eQy er 1\n"
"Sxc ch 1\n"
"bUx mb 1\n"
"zdJ sz 1\n"
"lpN le 1\n"
"Rkq qu 1\n"
"vvI va 1\n"
"Qmq qu 1\n"
"tgJ th 1\n"
"gfE ng 1\n"
"qcX ch 1\n"
"klT le 1\n"
"bbV be 1\n"
"pmZ me 1\n"
"uqA qu 1\n"
"cYy ch 1\n"
"wmY me 1\n"
"zlB le 1\n"
"zNd sz 1\n"
"cvZ ch 1\n"
"dvL de 1\n"
"wLz sz 1\n"
"qcG ch 1\n"
"Qjl le 1\n"
"nqf an 1\n"
"gxY ng 1\n"
"aqI an 1\n"
"Kqa an 1\n"
"Xqp qu 1\n"
"Yvg ng 1\n"
"qqF qu 1\n"
"yHh th 1\n"
"nHc an 1\n"
"Uqq qu 1\n"
"zfN sz 1\n"
"mXq qu 1\n"
"Fgj ng 1\n"
"Dsx sz 1\n"
"xRv va 1\n"
"wbZ wa 1\n"
"Hnp an 1\n"
"fUx fo 1\n"
"cYd ch 1\n"
"qTg ng 1\n"
"Bgq ng 1\n"
"pCn an 1\n"
"Xmh th 1\n"
"vjJ ij 1\n"
"tdG th 1\n"
"Zhk th 1\n"
"xFn an 1\n"
"dkQ de 1\n"
"Lcg ch 1\n"
"mIu qu 1\n"
"Iwd de 1\n"
"wjw ij 1\n"
"zbX sz 1\n"
"Yhp th 1\n"
"cvH ch 1\n"
"Lcx ch 1\n"
"Wfn an 1\n"
"Nfq qu 1\n"
"qMv qu 1\n"
"Uvw va 1\n"
"Qnh th 1\n"
"nbG an 1\n"
"sFg ng 1\n"
"xlJ le 1\n"
"bPb be 1\n"
"xpI pr 1\n"
"mrV er 1\n"
"Fwu qu 1\n"
"wOy wa 1\n"
"Pmh th 1\n"
"Jhq th 1\n"
"Zbx be 1\n"
"pgY ng 1\n"
"Rbw wa 1\n"
"Awx wa 1\n"
"mcB ch 1\n"
"gkG ng 1\n"
"xkW ka 1\n"
"Pnw in 1\n"
"bNs sz 1\n"
"nXr an 1\n"
"Vmt th 1\n"
"eUv er 1\n"
"yQv va 1\n"
"kxr er 1\n"
"Ksw sz 1\n"
"bpW pr 1\n"
"qeD qu 1\n"
"Qvh th 1\n"
"bRm me 1\n"
"qJm qu 1\n"
"csY ch 1\n"
"qwH qu 1\n"
"Cqc ch 1\n"
"lYq qu 1\n"
"dPp de 1\n"
"oAe er 1\n"
"dcS ch 1\n"
"uwU qu 1\n"
"zjL sz 1\n"
"oZx on 1\n"
"kjR ij 1\n"
"cDy ch 1\n"
"fSs sz 1\n"
"eQf le 1\n"
"qBm qu 1\n"
"mLb me 1\n"
"Zrj er 1\n"
"Gkx ka 1\n"
"pkX ka 1\n"
"vTk ka 1\n"
"Zgp ng 1\n"
"dhP th 1\n"
"nPv an 1\n"
"xnQ an 1\n"
"bHp pr 1\n"
"Xgf ng 1\n"
"Cwf wa 1\n"
"lbN le 1\n"
"jNm ij 1\n"
"xNt th 1\n"
"rJp er 1\n"
"oJd on 1\n"
"Ryq qu 1\n"
"lvL le 1\n"
"qvY qu 1\n"
"vwC va 1\n"
"kFj ij 1\n"
"qHd qu 1\n"
"wcB ch 1\n"
"xTs sz 1\n"
"fQz sz 1\n"
"Dlf le 1\n"
"wLt th 1\n"
"Fbh th 1\n"
"rqJ qu 1\n"
"hhO th 1\n"
"xOi in 1\n"
"mqz qu 1\n"
"qmQ me 1\n"
"qQj qu 1\n"
"ovQ on 1\n"
"gfR ng 1\n"
"Pmq qu 1\n"
"Tcj ch 1\n"
"mqQ qu 1\n"
"mwV me 1\n"
"bXw wa 1\n"
"jlA le 1\n"
"fjG ij 1\n"
"jxY ij 1\n"
"qwM qu 1\n"
"kvU ka 1\n"
"Bkq qu 1\n"
"gfA ng 1\n"
"Awc ch 1\n"
"Vmv va 1\n"
"Qhl th 1\n"
"Wmj ij 1\n"
"cMq ch 1\n"
"tHp th 1\n"
"lPb le 1\n"
"vlK le 1\n"
"Ygk ng 1\n"
"gJs ng 1\n"
"tWl th 1\n"
"xVw wa 1\n"
"srN er 1\n"
"Uhb th 1\n"
"vfR va 1\n"
"kFf ka 1\n"
"Jlz le 1\n"
"fKq qu 1\n"
"mRq qu 1\n"
"kWw ka 1\n"
"zvO sz 1\n"
"Xqz qu 1\n"
"dIj de 1\n"
"wJm me 1\n"
"Fqv qu 1\n"
"wNt th 1\n"
"lxL le 1\n"
"xLm me 1\n"
"dqN qu 1\n"
"wRj ij 1\n"
"Ljt th 1\n"
"wRw wa 1\n"
"cxB ch 1\n"
"cjH ch 1\n"
"Vqj qu 1\n"
"qJs qu 1\n"
"cFk ch 1\n"
"xqd qu 1\n"
"Eqh th 1\n"
"qRd qu 1\n"
"vfT va 1\n"
"Zqb qu 1\n"
"mGc ch 1\n"
"Sbd de 1\n"
"iwV in 1\n"
"jfI ij 1\n"
"nWz an 1\n"
"Ljg ng 1\n"
"rjG er 1\n"
"cFb ch 1\n"
"uqZ qu 1\n"
"mVm me 1\n"
"jgK ng 1\n"
"dZh th 1\n"
"Bqx qu 1\n"
"quG un 1\n"
"lCv le 1\n"
"lxW le 1\n"
"gGb ng 1\n"
"gvY ng 1\n"
"mjF ij 1\n"
"ptX th 1\n"
"pYy pr 1\n"
"Yrf er 1\n"
"mVd de 1\n"
"zpR sz 1\n"
"xKw wa 1\n"
"wpM pr 1\n"
"cLk ch 1\n"
"Sqz qu 1\n"
"gWn an 1\n"
"sWz st 1\n"
"srS er 1\n"
"cVx ch 1\n"
"xNb be 1\n"
"hPb th 1\n"
"bGq qu 1\n"
"tdH th 1\n"
"yJl le 1\n"
"vUk ka 1\n"
"dJz sz 1\n"
"qhI th 1\n"
"mtP th 1\n"
"lGb le 1\n"
"hDx th 1\n"
"zfW sz 1\n"
"Nml le 1\n"
"Hsw st 1\n"
"pfG pr 1\n"
"dMj de 1\n"
"kKq qu 1\n"
"rjS er 1\n"
"Qlg ng 1\n"
"Nfy ny 1\n"
"cqM ch 1\n"
"hWm th 1\n"
"fuO qu 1\n"
"zfF sz 1\n"
"qgH ng 1\n"
"bpZ pr 1\n"
"btY th 1\n"
"uqB qu 1\n"
"qyA qu 1\n"
"Xrp er 1\n"
"ytX th 1\n"
"dHm de 1\n"
"vBg ng 1\n"
"yyN ny 1\n"
"Qrj er 1\n"
"gKd ng 1\n"
"bfU be 1\n"
"Qft th 1\n"
"bqP qu 1\n"
"qOz qu 1\n"
"Xhc th 1\n"
"dqY qu 1\n"
"hjQ th 1\n"
"Yfu qu 1\n"
"aXk an 1\n"
"pbV pr 1\n"
"vjP ij 1\n"
"Ybp pr 1\n"
"Jmb me 1\n"
"qFq qu 1\n"
"yPq qu 1\n"
"yWw wa 1\n"
"vhX th 1\n"
"iwT in 1\n"
"qZf qu 1\n"
"uqU qu 1\n"
"uFk qu 1\n"
"cpW ch 1\n"
"Lpq qu 1\n"
"kfL ka 1\n"
"pQe er 1\n"
"gwz ng 1\n"
"jpM ij 1\n"
"Qkm ka 1\n"
"jgH ng 1\n"
"xjP ij 1\n"
"xgL ng 1\n"
"jLm ij 1\n"
"dxN de 1\n"
"vWs st 1\n"
"Jjh th 1\n"
"hhG th 1\n"
"Yvc ch 1\n"
"xrE er 1\n"
"bZw wa 1\n"
"Lvw va 1\n"
"eNw er 1\n"
"fjB ij 1\n"
"dcQ ch 1\n"
"lZt th 1\n"
"Jwq qu 1\n"
"qPg ng 1\n"
"xMb be 1\n"
"hfD th 1\n"
"jzQ sz 1\n"
"Uuf qu 1\n"
"zGk sz 1\n"
"zCc ch 1\n"
"npC an 1\n"
"tWd th 1\n"
"hjF th 1\n"
"Pzs st 1\n"
"wuA qu 1\n"
"Qhg th 1\n"
"Mqm qu 1\n"
"fsI st 1\n"
"fdU de 1\n"
"Xrm er 1\n"
"qQg ng 1\n"
"bkW ka 1\n"
"dHg ng 1\n"
"rcB ch 1\n"
"hWu th 1\n"
"nIq an 1\n"
"rYq qu 1\n"
"xXv va 1\n"
"wqP qu 1\n"
"xmN me 1\n"
"sJf st 1\n"
"yMf ny 1\n"
"Sfk ka 1\n"
"qzW qu 1\n"
"cvT ch 1\n"
"kmX ka 1\n"
"xqU qu 1\n"
"cnG an 1\n"
"Jpi in 1\n"
"frX er 1\n"
"yLf ny 1\n"
"uyU qu 1\n"
"Ddw de 1\n"
"Tgj ng 1\n"
"qeH qu 1\n"
"fEz sz 1\n"
"pCk ka 1\n"
"qmf qu 1\n"
"rjH er 1\n"
"xMp pr 1\n"
"Ywo on 1\n"
"zgD ng 1\n"
"Pqx qu 1\n"
"nqM on 1\n"
"wdX de 1\n"
"Bpz sz 1\n"
"lhM th 1\n"
"Epb pr 1\n"
"bhJ th 1\n"
"kvQ ka 1\n"
"Rsq qu 1\n"
"xbP be 1\n"
"nMm an 1\n"
"xuC qu 1\n"
"wjs sz 1\n"
"fxX fo 1\n"
"hvT th 1\n"
"uPx qu 1\n"
"Jmy me 1\n"
"Qzd de 1\n"
"Nsz st 1\n"
"vWd de 1\n"
"hfX th 1\n"
"jCg ng 1\n"
"yQx ny 1\n"
"whJ th 1\n"
"wrq qu 1\n"
"xgW ng 1\n"
"Jhj th 1\n"
"lhC th 1\n"
"Pwf ow 1\n"
"ljC le 1\n"
"vvB va 1\n"
"mcN ch 1\n"
"yHx ny 1\n"
"bBj ij 1\n"
"qRz qu 1\n"
"glH ng 1\n"
"cZp ch 1\n"
"qJh th 1\n"
"tSg th 1\n"
"xVm me 1\n"
"uWs qu 1\n"
"Vxo on 1\n"
"fjM ij 1\n"
"zhK th 1\n"
"Cjh th 1\n"
"vZr er 1\n"
"bCs sz 1\n"
"rwY er 1\n"
"xEi in 1\n"
"dUv de 1\n"
"fRg ng 1\n"
"Gcu ch 1\n"
"jDf ij 1\n"
"djH de 1\n"
"vlU le 1\n"
"qyG qu 1\n"
"kfq qu 1\n"
"lXg ng 1\n"
"lbC le 1\n"
"Pwg ng 1\n"
"Oae an 1\n"
"pbC pr 1\n"
"dWt th 1\n"
"lzU le 1\n"
"wJz sz 1\n"
"dYj de 1\n"
"cBj ch 1\n"
"fRv va 1\n"
"djG de 1\n"
"mYg ng 1\n"
"Qbc ch 1\n"
"gnX an 1\n"
"wPm me 1\n"
"wvN va 1\n"
"qGm qu 1\n"
"qNh th 1\n"
"mRg ng 1\n"
"Uqv qu 1\n"
"Qxm me 1\n"
"fzX sz 1\n"
"zjM sz 1\n"
"xqA qu 1\n"
"bMs sz 1\n"
"vmL me 1\n"
"Eyx ny 1\n"
"hHj th 1\n"
"jGp ij 1\n"
"mfD me 1\n"
"Jfw wa 1\n"
"Wjh th 1\n"
"bZs sz 1\n"
"Iyk ka 1\n"
"zRn an 1\n"
"cdU ch 1\n"
"mJh th 1\n"
"Qjy ij 1\n"
"Qao an 1\n"
"bXv va 1\n"
"hSg th 1\n"
"rAo er 1\n"
"hLs th 1\n"
"lCs le 1\n"
"qkJ qu 1\n"
"Rxu qu 1\n"
"xdN de 1\n"
"yYx ny 1\n"
"dkN de 1\n"
"Rgw ng 1\n"
"zgL sz 1\n"
"Rcj ch 1\n"
"iWz in 1\n"
"dLk de 1\n"
"mpX me 1\n"
"Gbd de 1\n"
"bnH an 1\n"
"kdM de 1\n"
"wqG qu 1\n"
"vMz sz 1\n"
"zwH sz 1\n"
"wgx ng 1\n"
"Ljk ij 1\n"
"tlG th 1\n"
"tgE th 1\n"
"Wcw ch 1\n"
"Vby be 1\n"
"mVz sz 1\n"
"Hgc ch 1\n"
"gqP ng 1\n"
"hhB th 1\n"
"nFx an 1\n"
"yBf ny 1\n"
"Wmx me 1\n"
"vNb va 1\n"
"Mnv an 1\n"
"Zmc ch 1\n"
"bzS sz 1\n"
"yfC ny 1\n"
"Epx pr 1\n"
"ljG le 1\n"
"wUa an 1\n"
"Qgo ng 1\n"
"pqb qu 1\n"
"Jkm ka 1\n"
"Wvy va 1\n"
"Bjp ij 1\n"
"vfZ va 1\n"
"wxT wa 1\n"
"Vxw wa 1\n"
"dRt th 1\n"
"nVq an 1\n"
"iWf in 1\n"
"Smq qu 1\n"
"jwG ij 1\n"
"vcW ch 1\n"
"Qgz ng 1\n"
"Wkq qu 1\n"
"xrL er 1\n"
"tVh ch 1\n"
"Zlr er 1\n"
"zDt th 1\n"
"yxP ny 1\n"
"Yyw wa 1\n"
"zPk sz 1\n"
"Bgg ng 1\n"
"xOk ka 1\n"
"oXq qu 1\n"
"tQf th 1\n"
"fxF fo 1\n"
"dOq qu 1\n"
"Vtp th 1\n"
"jhP th 1\n"
"vhZ th 1\n"
"Gqq qu 1\n"
"dFg ng 1\n"
"eCg ng 1\n"
"kjH ij 1\n"
"vqQ qu 1\n"
"jpL ij 1\n"
"hgZ th 1\n"
"xFd de 1\n"
"Qjd de 1\n"
"xKm me 1\n"
"zQc ch 1\n"
"Nhw th 1\n"
"Kqo qu 1\n"
"hwO th 1\n"
"oYn an 1\n"
"Wnf an 1\n"
"vSc ch 1\n"
"Afq qu 1\n"
"jqJ qu 1\n"
"jEg ng 1\n"
"dKp de 1\n"
"nmK an 1\n"
"wXw wa 1\n"
"vjC ij 1\n"
"dXb de 1\n"
"tQn th 1\n"
"qoR qu 1\n"
"bRf be 1\n"
"yyL ny 1\n"
"kSj ij 1\n"
"Xyu qu 1\n"
"vmA va 1\n"
"Zgm ng 1\n"
"Lbx be 1\n"
"bIv va 1\n"
"Zdq qu 1\n"
"gHn an 1\n"
"bYq qu 1\n"
"Mqd qu 1\n"
"qMk qu 1\n"
"Qsv st 1\n"
"zXx sz 1\n"
"hQf th 1\n"
"wcV ch 1\n"
"Xfz sz 1\n"
"Mhc th 1\n"
"kBz sz 1\n"
"bWp pr 1\n"
"Wzu qu 1\n"
"hWw th 1\n"
"yNp pr 1\n"
"xbZ be 1\n"
"mTb me 1\n"
"Kdf de 1\n"
"pfQ pr 1\n"
"vCd de 1\n"
"Pqf qu 1\n"
"ofZ on 1\n"
"wYd de 1\n"
"Tfc ch 1\n"
"Gnb an 1\n"
"Zdx de 1\n"
"zVj sz 1\n"
"Tqw qu 1\n"
"fzV sz 1\n"
"Igq ng 1\n"
"Qvv vi 1\n"
"Pmf me 1\n"
"qHe qu 1\n"
"ybR be 1\n"
"cFg ch 1\n"
"Kvf va 1\n"
"Zxm me 1\n"
"oVc ch 1\n"
"Yhb th 1\n"
"bwP wa 1\n"
"Vvz sz 1\n"
"sdW de 1\n"
"gFz ng 1\n"
"mRl le 1\n"
"bqN qu 1\n"
"bhU th 1\n"
"tBw th 1\n"
"Hbb be 1\n"
"Jzp sz 1\n"
"zrS er 1\n"
"mkZ me 1\n"
"bKw wa 1\n"
"jPx ij 1\n"
"Xqa an 1\n"
"fGz sz 1\n"
"xLk ka 1\n"
"nrV an 1\n"
"Tmx me 1\n"
"zvZ sz 1\n"
"gWl ng 1\n"
"Yxb be 1\n"
"yWt th 1\n"
"lqN qu 1\n"
"tWu th 1\n"
"xZt th 1\n"
"iqI in 1\n"
"cpQ ch 1\n"
"zPf sz 1\n"
"bqG qu 1\n"
"gmI ng 1\n"
"Wkc ch 1\n"
"Zvs sz 1\n"
"qdN qu 1\n"
"hYf th 1\n"
"sBn an 1\n"
"Dwb ow 1\n"
"Wzq qu 1\n"
"Qdw de 1\n"
"svR sz 1\n"
"Nvv va 1\n"
"jRc ch 1\n"
"qDv qu 1\n"
"qGe qu 1\n"
"cwT ch 1\n"
"fTy ny 1\n"
"Cvv va 1\n"
"flQ le 1\n"
"mWg ng 1\n"
"twS th 1\n"
"npM an 1\n"
"Ufq qu 1\n"
"fuG qu 1\n"
"oCj on 1\n"
"txF th 1\n"
"Yft th 1\n"
"qwy qu 1\n"
"Vdz de 1\n"
"Vgq ng 1\n"
"Rkg ng 1\n"
"Pxz sz 1\n"
"mCn an 1\n"
"whZ th 1\n"
"fgB ng 1\n"
"jvW ij 1\n"
"kdL de 1\n"
"Lxi in 1\n"
"svB sz 1\n"
"xuH qu 1\n"
"gFy ng 1\n"
"oVv on 1\n"
"Zhq th 1\n"
"oqG qu 1\n"
"oJp on 1\n"
"gIf ng 1\n"
"bwF wa 1\n"
"vLh th 1\n"
"jgX ng 1\n"
"qKi in 1\n"
"xRh th 1\n"
"qwV qu 1\n"
"mNl le 1\n"
"Gvv va 1\n"
"pQf pr 1\n"
"xbV be 1\n"
"dpZ de 1\n"
"fHq qu 1\n"
"bBd de 1\n"
"vUh th 1\n"
"hzA th 1\n"
"Mnz an 1\n"
"pBt th 1\n"
"oaE an 1\n"
"slK le 1\n"
"Wlg ng 1\n"
"jhK th 1\n"
"xvX va 1\n"
"Ffx fo 1\n"
"gXh th 1\n"
"cWf ch 1\n"
"Gpy pr 1\n"
"xmS me 1\n"
"gZn an 1\n"
"djX de 1\n"
"bkX ka 1\n"
"xlP le 1\n"
"hCt th 1\n"
"Yhj th 1\n"
"gwQ ng 1\n"
"klD le 1\n"
"Rhq th 1\n"
"aEj an 1\n"
"jpY ij 1\n"
"pVn an 1\n"
"nJx an 1\n"
"zdV de 1\n"
"Rvf va 1\n"
"Oqy qu 1\n"
"zpT sz 1\n"
"Pzc ch 1\n"
"qTm qu 1\n"
"jfq ij 1\n"
"ztY th 1\n"
"Zqv qu 1\n"
"nZb an 1\n"
"pHl le 1\n"
"Qcr ch 1\n"
"zVm sz 1\n"
"pNm me 1\n"
"Xhj th 1\n"
"oYy on 1\n"
"Flq qu 1\n"
"lwj le 1\n"
"rwH er 1\n"
"oWq qu 1\n"
"Bwm me 1\n"
"jXs sz 1\n"
"Lkt th 1\n"
"lVn an 1\n"
"jXa an 1\n"
"hkB th 1\n"
"qrQ qu 1\n"
"dqK qu 1\n"
"Zxn an 1\n"
"ygZ ng 1\n"
"Fgt th 1\n"
"nwM an 1\n"
"Wzx sz 1\n"
"qgb ng 1\n"
"Ygv ng 1\n"
"Xdd de 1\n"
"xjM ij 1\n"
"qHb qu 1\n"
"zKz sz 1\n"
"dvM de 1\n"
"Zpx pr 1\n"
"wPt th 1\n"
"qiA in 1\n"
"jyV ij 1\n"
"jyR ij 1\n"
"Uox on 1\n"
"Qkz ka 1\n"
"Lxq qu 1\n"
"fpq qu 1\n"
"Xmf me 1\n"
"kRx ka 1\n"
"jFk ij 1\n"
"nZc an 1\n"
"hCp th 1\n"
"Hbw wa 1\n"
"zlF le 1\n"
"kqI qu 1\n"
"wWj ij 1\n"
"qKk qu 1\n"
"Jpf pr 1\n"
"lbR le 1\n"
"rbJ er 1\n"
"zfK sz 1\n"
"gVk ng 1\n"
"bZx be 1\n"
"znQ an 1\n"
"gZb ga 1\n"
"wtI th 1\n"
"bvW va 1\n"
"qhG th 1\n"
"xrV er 1\n"
"pYc ch 1\n"
"bQq qu 1\n"
"qpV qu 1\n"
"pFm me 1\n"
"zdO de 1\n"
"Jvj ij 1\n"
"mQl le 1\n"
"xWm me 1\n"
"Dtz th 1\n"
"lKz le 1\n"
"dkI de 1\n"
"fSx fo 1\n"
"yCp pr 1\n"
"whF th 1\n"
"lVm le 1\n"
"yHv va 1\n"
"Plm le 1\n"
"Jpm me 1\n"
"hEw ha 1\n"
"zHz sz 1\n"
"uIj qu 1\n"
"gzB ng 1\n"
"qsV qu 1\n"
"pbX pr 1\n"
"jyY ij 1\n"
"mjq qu 1\n"
"zDd de 1\n"
"Tqc ch 1\n"
"fTg ng 1\n"
"qbh th 1\n"
"Cjq qu 1\n"
"pcW ch 1\n"
"Xhp th 1\n"
"fwR wa 1\n"
"dQm de 1\n"
"xCk ka 1\n"
"yhM th 1\n"
"glQ ng 1\n"
"gVb ng 1\n"
"Pdy de 1\n"
"yOj ij 1\n"
"jZg ng 1\n"
"oqZ qu 1\n"
"bqI qu 1\n"
"jkX ij 1\n"
"Kfh th 1\n"
"xpQ pr 1\n"
"rhX th 1\n"
"wjI ij 1\n"
"Bqf qu 1\n"
"aCp an 1\n"
"ccX ch 1\n"
"vGm ma 1\n"
"paU an 1\n"
"xUh th 1\n"
"gLd ng 1\n"
"tfJ th 1\n"
"fwH wa 1\n"
"Pnq an 1\n"
"kxV ka 1\n"
"Nbk ka 1\n"
"sqE qu 1\n"
"Cjp ij 1\n"
"kcZ ka 1\n"
"Wqj ij 1\n"
"tzY th 1\n"
"nqX an 1\n"
"Yyc ch 1\n"
"Lzd de 1\n"
"xZy ny 1\n"
"sdY de 1\n"
"jXn an 1\n"
"Nbm me 1\n"
"wLr er 1\n"
"Nqr qu 1\n"
"Zwx wa 1\n"
"yvH va 1\n"
"ylC le 1\n"
"qyh th 1\n"
"Jnz an 1\n"
"hHv th 1\n"
"zUq qu 1\n"
"xgI ng 1\n"
"Ztp th 1\n"
"Vvb va 1\n"
"tGn th 1\n"
"Ujq qu 1\n"
"jHs sz 1\n"
"bWq qu 1\n"
"bXr er 1\n"
"hFg th 1\n"
"gdT ng 1\n"
"qHc ch 1\n"
"lCj le 1\n"
"mVg ng 1\n"
"pQq qu 1\n"
"vWl le 1\n"
"yFq qu 1\n"
"djY de 1\n"
"btQ th 1\n"
"vlM le 1\n"
"Iwt th 1\n"
"Pdb de 1\n"
"jtQ th 1\n"
"xjR ij 1\n"
"dhW th 1\n"
"zXs sz 1\n"
"fbE be 1\n"
"Hqr qu 1\n"
"vLt th 1\n"
"kbD ka 1\n"
"vUd de 1\n"
"yZc ch 1\n"
"Qke le 1\n"
"fhG th 1\n"
"eHt th 1\n"
"vHj ij 1\n"
"Tfg ng 1\n"
"uoA qu 1\n"
"zCx sz 1\n"
"zLk sz 1\n"
"jdW de 1\n"
"Cgn an 1\n"
"Lrq qu 1\n"
"yOi in 1\n"
"qOw qu 1\n"
"fqs qu 1\n"
"ltQ th 1\n"
"nwU an 1\n"
"zYq qu 1\n"
"Gzs st 1\n"
"nWv an 1\n"
"lNx le 1\n"
"Wql qu 1\n"
"dcD ch 1\n"
"vfD va 1\n"
"qVd qu 1\n"
"Wzz sz 1\n"
"jfH ij 1\n"
"Rrt th 1\n"
"qDr qu 1\n"
"lOh th 1\n"
"wwZ wa 1\n"
"mQw me 1\n"
"nqK an 1\n"
"Uvl le 1\n"
"kRq qu 1\n"
"Vhg th 1\n"
"xsD st 1\n"
"Ldd de 1\n"
"sQv st 1\n"
"qMj qu 1\n"
"hbQ th 1\n"
"cjX ch 1\n"
"nbT an 1\n"
"xNf fo 1\n"
"wCt th 1\n"
"jnX an 1\n"
"tZf th 1\n"
"qCk qu 1\n"
"dHk de 1\n"
"Ccq ch 1\n"
"uMf qu 1\n"
"bvG va 1\n"
"zPz sz 1\n"
"yIy ny 1\n"
"lHx le 1\n"
"fnB an 1\n"
"Ebx be 1\n"
"rGc ch 1\n"
"mgD ng 1\n"
"hJg th 1\n"
"jcG ch 1\n"
"Ybd de 1\n"
"oDq qu 1\n"
"jRx ij 1\n"
"kJf ka 1\n"
"tFv th 1\n"
"Gdv de 1\n"
"fHn an 1\n"
"Uqp qu 1\n"
"cYh th 1\n"
"kHp ka 1\n"
"qhZ th 1\n"
"wZh th 1\n"
"kQt th 1\n"
"hwH th 1\n"
"xzU sz 1\n"
"tQg th 1\n"
"Qbj ij 1\n"
"zVl le 1\n"
"qJd qu 1\n"
"Xrf er 1\n"
"fMv va 1\n"
"qJc ch 1\n"
"Dqy qu 1\n"
"qMs qu 1\n"
"fzl le 1\n"
"Wdx de 1\n"
"Tdw wa 1\n"
"mcT ch 1\n"
"fOd de 1\n"
"Kgj ng 1\n"
"yrT er 1\n"
"bqA qu 1\n"
"snq an 1\n"
"Lzt th 1\n"
"gLw ng 1\n"
"dLq qu 1\n"
"Qzr er 1\n"
"Qrn an 1\n"
"eFn an 1\n"
"Nmw wa 1\n"
"pxE pr 1\n"
"Cqk qu 1\n"
"Wcd ch 1\n"
"fXw wa 1\n"
"fbU be 1\n"
"aeO an 1\n"
"svV st 1\n"
"yVt th 1\n"
"sRp st 1\n"
"rxU er 1\n"
"qhK th 1\n"
"uQw qu 1\n"
"oXw on 1\n"
"Jvw va 1\n"
"kvH ka 1\n"
"zVy sz 1\n"
"rOq qu 1\n"
"cWx ch 1\n"
"iXv in 1\n"
"cBk ch 1\n"
"xkM ka 1\n"
"vHb va 1\n"
"jbW ij 1\n"
"mYq qu 1\n"
"fnH an 1\n"
"zRj sz 1\n"
"hvN th 1\n"
"oMh th 1\n"
"yqO qu 1\n"
"fBf fo 1\n"
"oPj on 1\n"
"fFc ch 1\n"
"lVq qu 1\n"
"ptJ th 1\n"
"Ntj th 1\n"
"rwL er 1\n"
"cFz ch 1\n"
"jVd de 1\n"
"Gbv va 1\n"
"oJn an 1\n"
"wkL ka 1\n"
"qoT qu 1\n"
"Qxk ka 1\n"
"rZj ij 1\n"
"Cgd ng 1\n"
"gvW ng 1\n"
"kYv ka 1\n"
"qjR qu 1\n"
"Vnq an 1\n"
"yJt th 1\n"
"xWy ny 1\n"
"bXl le 1\n"
"xVk ka 1\n"
"xuG qu 1\n"
"Hzs st 1\n"
"uDq qu 1\n"
"Ywk ka 1\n"
"Jkh th 1\n"
"Gdm de 1\n"
"qcO ch 1\n"
"hlH th 1\n"
"Jfv va 1\n"
"cLn an 1\n"
"wzG sz 1\n"
"yhF th 1\n"
"kfD ka 1\n"
"kbJ ka 1\n"
"Nqp qu 1\n"
"gYq ng 1\n"
"ztM th 1\n"
"jcD ch 1\n"
"wgY ng 1\n"
"qdT da 1\n"
"vTw va 1\n"
"cNz ch 1\n"
"Jbc ch 1\n"
"Xcj ch 1\n"
"rUw er 1\n"
"gXv ng 1\n"
"dRf de 1\n"
"bJz sz 1\n"
"aqA an 1\n"
"uOz qu 1\n"
"wPj ij 1\n"
"uDw qu 1\n"
"mqF qu 1\n"
"cXr ch 1\n"
"yrL er 1\n"
"nJk an 1\n"
"hsY th 1\n"
"Zqs qu 1\n"
"qeS qu 1\n"
"bLv va 1\n"
"jEo on 1\n"
"pmE me 1\n"
"jIt th 1\n"
"vzZ sz 1\n"
"Qhd th 1\n"
"cnN an 1\n"
"bPq qu 1\n"
"pZw pr 1\n"
"iwR in 1\n"
"oJv ko 1\n"
"ufI qu 1\n"
"wKm me 1\n"
"uWv qu 1\n"
"fCf fo 1\n"
"wBn an 1\n"
"Uyf ny 1\n"
"uVx qu 1\n"
"kKf ka 1\n"
"mrZ er 1\n"
"lXb le 1\n"
"zJm sz 1\n"
"wYr er 1\n"
"Hkw ka 1\n"
"Ewz sz 1\n"
"xJy ny 1\n"
"Emx me 1\n"
"cqL ch 1\n"
"zVk sz 1\n"
"yPb be 1\n"
"zcC ch 1\n"
"Ndq qu 1\n"
"uWf qu 1\n"
"kcM ch 1\n"
"tkB th 1\n"
"yhq th 1\n"
"qaP an 1\n"
"rVs er 1\n"
"dLd de 1\n"
"Sgm ng 1\n"
"Xhx th 1\n"
"xqH qu 1\n"
"Kqy qu 1\n"
"yRw wa 1\n"
"Wdw de 1\n"
"qcQ ch 1\n"
"zbp sz 1\n"
"dtY th 1\n"
"cwB ch 1\n"
"nfV an 1\n"
"cgP ch 1\n"
"pwW pr 1\n"
"pqf qu 1\n"
"Xkp ka 1\n"
"izJ in 1\n"
"cYw ch 1\n"
"iQl in 1\n"
"Qvy va 1\n"
"ylR le 1\n"
"sFp st 1\n"
"Lqg ng 1\n"
"xnP an 1\n"
"gYl ng 1\n"
"wIr er 1\n"
"fqR qu 1\n"
"Qpk ka 1\n"
"qXz qu 1\n"
"Lrr er 1\n"
"sjI st 1\n"
"iyX in 1\n"
"Zfq qu 1\n"
"vtH th 1\n"
"cZf ch 1\n"
"hXp th 1\n"
"rJw er 1\n"
"gbP ng 1\n"
"Qug ng 1\n"
"jRt th 1\n"
"lXh th 1\n"
"pVc ch 1\n"
"kGc ch 1\n"
"Nxr er 1\n"
"yKk ka 1\n"
"xAo on 1\n"
"oUx on 1\n"
"nWx an 1\n"
"fwU wa 1\n"
"mKg ng 1\n"
"qhO th 1\n"
"sGg ng 1\n"
"Wwu qu 1\n"
"cnE an 1\n"
"tjS th 1\n"
"Qyd de 1\n"
"yWm me 1\n"
"Qdj de 1\n"
"jSd de 1\n"
"Ioy on 1\n"
"Xpp pr 1\n"
"xJb be 1\n"
"xvT va 1\n"
"cdT ch 1\n"
"khX th 1\n"
"hVp th 1\n"
"cjT ch 1\n"
"Hqf qu 1\n"
"nbP an 1\n"
"Uwb wa 1\n"
"Kcb ch 1\n"
"qsQ qu 1\n"
"tkZ th 1\n"
"zrX er 1\n"
"zbN sz 1\n"
"mYi in 1\n"
"gLx ng 1\n"
"sGc ch 1\n"
"Pbv va 1\n"
"gcV ch 1\n"
"Qjf ij 1\n"
"wvB va 1\n"
"gKp ng 1\n"
"jZy ij 1\n"
"qhW th 1\n"
"vCg ng 1\n"
"Lrk er 1\n"
"fRw wa 1\n"
"cMj ch 1\n"
"ohK th 1\n"
"frK er 1\n"
"dQq qu 1\n"
"Hdj de 1\n"
"Bkx ka 1\n"
"yXv va 1\n"
"fdO de 1\n"
"sWg ng 1\n"
"Xtf th 1\n"
"rUx ar 1\n"
"qHm qu 1\n"
"kQh th 1\n"
"wzU sz 1\n"
"vTt th 1\n"
"zkN sz 1\n"
"Fqp qu 1\n"
"xJc ch 1\n"
"wkQ ka 1\n"
"wxF wa 1\n"
"vRj ij 1\n"
"jzD sz 1\n"
"Zqu un 1\n"
"zWw sz 1\n"
"zgU ng 1\n"
"ugX ng 1\n"
"pmB me 1\n"
"gzA ng 1\n"
"Zjj ij 1\n"
"xIj ij 1\n"
"xoK on 1\n"
"Gqx qu 1\n"
"uLq qu 1\n"
"lGw le 1\n"
"tZq th 1\n"
"zcN ch 1\n"
"yPz sz 1\n"
"rqN qu 1\n"
"pwG pr 1\n"
"vfP va 1\n"
"vIy va 1\n"
"vEj ij 1\n"
"jqD qu 1\n"
"Hxu qu 1\n"
"qLs qu 1\n"
"Jpy pr 1\n"
"pRw pr 1\n"
"fZs st 1\n"
"Vvx va 1\n"
"zkB sz 1\n"
"yGk ka 1\n"
"kvZ ka 1\n"
"cqW ch 1\n"
"wLg ng 1\n"
"Ypg ng 1\n"
"jrR er 1\n"
"vwZ va 1\n"
"gVd ng 1\n"
"iCw ij 1\n"
"Fxw wa 1\n"
"qyZ qu 1\n"
"qgT qu 1\n"
"xLs st 1\n"
"pXg ng 1\n"
"gNv ng 1\n"
"Hgz ng 1\n"
"zJv sz 1\n"
"Hvm va 1\n"
"uXb qu 1\n"
"lLz le 1\n"
"dwP de 1\n"
"gvN ng 1\n"
"cpF ch 1\n"
"vZj ij 1\n"
"Pfv va 1\n"
"xcI ch 1\n"
"yVp pr 1\n"
"fdC de 1\n"
"pbE pr 1\n"
"jQm ij 1\n"
"Tqt th 1\n"
"wMh th 1\n"
"Gkq qu 1\n"
"tdV th 1\n"
"xIk ka 1\n"
"hHp th 1\n"
"Lsb st 1\n"
"Wvs st 1\n"
"Qcw ch 1\n"
"gfQ ng 1\n"
"Fjt th 1\n"
"xBz sz 1\n"
"fLx fo 1\n"
"zkR sz 1\n"
"kjA ij 1\n"
"Fcw ch 1\n"
"fhT th 1\n"
"qiK qu 1\n"
"wQv va 1\n"
"pXl le 1\n"
"hLg th 1\n"
"jJw ij 1\n"
"sOj st 1\n"
"vWb va 1\n"
"Ajq qu 1\n"
"vKc ch 1\n"
"iIy in 1\n"
"pJy pr 1\n"
"Lqc ch 1\n"
"wBd de 1\n"
"kRb ka 1\n"
"Lcp ch 1\n"
"gfB ng 1\n"
"zVn an 1\n"
"qWf qu 1\n"
"Qyf ny 1\n"
"puF qu 1\n"
"fIe er 1\n"
"wGb wa 1\n"
"jjL ij 1\n"
"hcE th 1\n"
"qhp th 1\n"
"gxN ng 1\n"
"tMd th 1\n"
"Rzt th 1\n"
"cgO ch 1\n"
"vmT va 1\n"
"Dcq ch 1\n"
"qoI qu 1\n"
"Nqz qu 1\n"
"vhM th 1\n"
"gBq ng 1\n"
"jWv ij 1\n"
"xmE me 1\n"
"qcd ch 1\n"
"lYj le 1\n"
"dDc ch 1\n"
"xUa an 1\n"
"kVl le 1\n"
"wqN qu 1\n"
"uuI qu 1\n"
"Wzf sz 1\n"
"yvX va 1\n"
"Pyq qu 1\n"
"wuU qu 1\n"
"hLp th 1\n"
"qqL qu 1\n"
"cVh th 1\n"
"Fgs ng 1\n"
"xjF ij 1\n"
"wkG ka 1\n"
"qJr qu 1\n"
"Gzq qu 1\n"
"Ixv va 1\n"
"hMv th 1\n"
"dfQ de 1\n"
"eOx er 1\n"
"mHq qu 1\n"
"Zkn an 1\n"
"nqW an 1\n"
"nJd an 1\n"
"pEh th 1\n"
"gVg ng 1\n"
"Zyf ny 1\n"
"nmT an 1\n"
"csQ ch 1\n"
"Pkq qu 1\n"
"tdP th 1\n"
"fkz sz 1\n"
"Qnc an 1\n"
"pBj ij 1\n"
"Mjv ij 1\n"
"ymJ me 1\n"
"Mxs st 1\n"
"hbL th 1\n"
"vQh th 1\n"
"xDy ny 1\n"
"djC de 1\n"
"cdQ ch 1\n"
"bnL an 1\n"
"Yjl le 1\n"
"qUc ch 1\n"
"mjW ij 1\n"
"zWs st 1\n"
"xvF va 1\n"
"Gqi qu 1\n"
"fGm me 1\n"
"Xuw qu 1\n"
"qCs qu 1\n"
"Kxm me 1\n"
"lNn an 1\n"
"sdL de 1\n"
"Vtn th 1\n"
"sJj st 1\n"
"kQj ij 1\n"
"xfX fo 1\n"
"Nqk qu 1\n"
"cBs ch 1\n"
"yzP sz 1\n"
"xUv va 1\n"
"lbT le 1\n"
"wyV wa 1\n"
"Xkm ka 1\n"
"Wdv de 1\n"
"qQn an 1\n"
"sqZ qu 1\n"
"sfW st 1\n"
"gfM ng 1\n"
"Vlp le 1\n"
"Xjx ij 1\n"
"hIj th 1\n"
"Jws st 1\n"
"xZr er 1\n"
"iKw in 1\n"
"Tbd de 1\n"
"zQv sz 1\n"
"nmZ an 1\n"
"bpE pr 1\n"
"zSv sz 1\n"
"Fgi ng 1\n"
"uIw qu 1\n"
"Zvx va 1\n"
"rqR qu 1\n"
"vjZ ij 1\n"
"Njr er 1\n"
"kwF ka 1\n"
"Ovw va 1\n"
"hwZ th 1\n"
"Mvk ka 1\n"
"Dvf va 1\n"
"xsP st 1\n"
"gZq ng 1\n"
"vXv va 1\n"
"wGt th 1\n"
"qlO qu 1\n"
"fNz sz 1\n"
"Nvw va 1\n"
"zdZ de 1\n"
"vxV va 1\n"
"Nhz th 1\n"
"tZm th 1\n"
"iyS in 1\n"
"qZa an 1\n"
"xrZ er 1\n"
"qly qu 1\n"
"cjM ch 1\n"
"kYj ij 1\n"
"iyF in 1\n"
"Cdq qu 1\n"
"xwE wa 1\n"
"xfV fo 1\n"
"wbF wa 1\n"
"wuO qu 1\n"
"Rlh th 1\n"
"fCj ij 1\n"
"bcZ ch 1\n"
"Gjv ij 1\n"
"gLl ng 1\n"
"wLc ch 1\n"
"zmP sz 1\n"
"cYo ch 1\n"
"Rhk th 1\n"
"grM ng 1\n"
"fDh th 1\n"
"Yyb be 1\n"
"uyW un 1\n"
"kGb ka 1\n"
"iwK in 1\n"
"qkN qu 1\n"
"qXd qu 1\n"
"zCb sz 1\n"
"rQf er 1\n"
"xrO er 1\n"
"Fzh th 1\n"
"wSj ij 1\n"
"yPw wa 1\n"
"Bqw qu 1\n"
"kWc ch 1\n"
"qhX th 1\n"
"kBw ka 1\n"
"yvL va 1\n"
"xcT ch 1\n"
"Fbz sz 1\n"
"cEb ch 1\n"
"vEk ka 1\n"
"uQh th 1\n"
"sHw us 1\n"
"Fvf va 1\n"
"wkO ka 1\n"
"wiY in 1\n"
"sPm st 1\n"
"dFn an 1\n"
"qQx qu 1\n"
"Rsg ng 1\n"
"fUj ij 1\n"
"tLw th 1\n"
"sRk st 1\n"
"zkP sz 1\n"
"mvF va 1\n"
"jYb ij 1\n"
"swY is 1\n"
"rRc ch 1\n"
"rHd er 1\n"
"bDk ka 1\n"
"lWv le 1\n"
"vqv qu 1\n"
"qoN qu 1\n"
"zMl le 1\n"
"pfJ pr 1\n"
"Dmz sz 1\n"
"obQ on 1\n"
"Vfz sz 1\n"
"bVd de 1\n"
"Cjv ij 1\n"
"mKz sz 1\n"
"jjE ij 1\n"
"Aqc ch 1\n"
"Cxn an 1\n"
"vpH va 1\n"
"Lxa an 1\n"
"zpH sz 1\n"
"qoF qu 1\n"
"hRz th 1\n"
"yYw wa 1\n"
"dUx de 1\n"
"Kxl le 1\n"
"xUo on 1\n"
"hDp th 1\n"
"zDf sz 1\n"
"Wsq qu 1\n"
"jzZ sz 1\n"
"mGf me 1\n"
"jjV ij 1\n"
"pfR pr 1\n"
"bPd de 1\n"
"wjq qu 1\n"
"Rjx ij 1\n"
"Lwq qu 1\n"
"fqH qu 1\n"
"jRs sz 1\n"
"sfT sz 1\n"
"Grw er 1\n"
"zGn an 1\n"
"ycW ch 1\n"
"lUq qu 1\n"
"pRq qu 1\n"
"nZq an 1\n"
"Svx va 1\n"
"Phf th 1\n"
"Fvj ij 1\n"
"Qlm le 1\n"
"jgS ng 1\n"
"Mmv va 1\n"
"xPd de 1\n"
"qqw qu 1\n"
"rWp er 1\n"
"qIr qu 1\n"
"Cxf fo 1\n"
"wtG th 1\n"
"cKb ch 1\n"
"btL th 1\n"
"pRx pr 1\n"
"zsB sz 1\n"
"nbD an 1\n"
"jKg ng 1\n"
"bhL th 1\n"
"Yhw th 1\n"
"yYr er 1\n"
"jCm ij 1\n"
"xzK sz 1\n"
"pJl le 1\n"
"Qrr er 1\n"
"uvG qu 1\n"
"cfJ ch 1\n"
"iqX in 1\n"
"vNd de 1\n"
"qcM ch 1\n"
"Wvj ij 1\n"
"vmS va 1\n"
"vWp va 1\n"
"aIj an 1\n"
"jmS ij 1\n"
"Fmk ka 1\n"
"iyN in 1\n"
"bZu qu 1\n"
"Kzj sz 1\n"
"Vwd de 1\n"
"Ulx le 1\n"
"rCv er 1\n"
"wvq qu 1\n"
"Qkr ri 1\n"
"fjC ij 1\n"
"tRr th 1\n"
"pCy pr 1\n"
"fbC be 1\n"
"fQc ch 1\n"
"Xkf ka 1\n"
"Dqr qu 1\n"
"fgE ng 1\n"
"vMm va 1\n"
"dPb de 1\n"
"vjL ij 1\n"
"wKc ch 1\n"
"Pyw wa 1\n"
"eXv er 1\n"
"nVw an 1\n"
"Jww wa 1\n"
"Dfq qu 1\n"
"tCc th 1\n"
"qtH th 1\n"
"Xqm qu 1\n"
"Bhc th 1\n"
"tcX th 1\n"
"xKp pr 1\n"
"tfN th 1\n"
"ibZ in 1\n"
"Nzb sz 1\n"
"Wnj an 1\n"
"vXy va 1\n"
"iVf in 1\n"
"dxT de 1\n"
"jxQ ij 1\n"
"Ddv de 1\n"
"mXd de 1\n"
"fUq qu 1\n"
"wgQ ng 1\n"
"Lgj ng 1\n"
"mgY ng 1\n"
"qMw qu 1\n"
"gpJ ng 1\n"
"sZx st 1\n"
"nXz an 1\n"
"Wve er 1\n"
"lVk le 1\n"
"wCb wa 1\n"
"xvI va 1\n"
"mfJ me 1\n"
"tQq th 1\n"
"dTt th 1\n"
"fqk qu 1\n"
"nVt th 1\n"
"wIh th 1\n"
"Qvp va 1\n"
"vfN va 1\n"
"gQs ng 1\n"
"iVp in 1\n"
"jGl le 1\n"
"xMf fo 1\n"
"xvw wi 1\n"
"zIl le 1\n"
"zfR sz 1\n"
"zWv sz 1\n"
"ehV th 1\n"
"dZq qu 1\n"
"tmK th 1\n"
"cLt th 1\n"
"pZb pr 1\n"
"vnJ an 1\n"
"fvk ka 1\n"
"Xhv th 1\n"
"Vjn an 1\n"
"tgI th 1\n"
"xaJ an 1\n"
"mSf me 1\n"
"Xzm sz 1\n"
"dTz de 1\n"
"xXm me 1\n"
"pQz sz 1\n"
"Cqg ng 1\n"
"bSs st 1\n"
"prW er 1\n"
"hDb th 1\n"
"sXt th 1\n"
"kcD ch 1\n"
"kgZ ng 1\n"
"Tzt th 1\n"
"zcR ch 1\n"
"Xwu qu 1\n"
"kXg ng 1\n"
"Ywv wi 1\n"
"rpK er 1\n"
"wPs is 1\n"
"Kjz sz 1\n"
"fDb be 1\n"
"jrF er 1\n"
"bbQ be 1\n"
"Qdb de 1\n"
"rKt th 1\n"
"vYf va 1\n"
"vxA va 1\n"
"fhM th 1\n"
"jsU st 1\n"
"zXk sz 1\n"
"uwO qu 1\n"
"jsR st 1\n"
"kHn an 1\n"
"xWv va 1\n"
"vfS va 1\n"
"pIv va 1\n"
"bcW ch 1\n"
"zdM sz 1\n"
"gCz ng 1\n"
"hzN th 1\n"
"bQw wa 1\n"
"ojX on 1\n"
"Vqv qu 1\n"
"qWb qu 1\n"
"Ykb ka 1\n"
"xnJ an 1\n"
"sJz st 1\n"
"hRr th 1\n"
"tXs th 1\n"
"Qeb er 1\n"
"Uwd de 1\n"
"nYg an 1\n"
"Yfx fo 1\n"
"xrG er 1\n"
"eZr le 1\n"
"ufV us 1\n"
"rXm er 1\n"
"qZv qu 1\n"
"vQz sz 1\n"
"Tnq an 1\n"
"Rmj ij 1\n"
"jlM le 1\n"
"cqO ch 1\n"
"xWf fo 1\n"
"jcZ ch 1\n"
"jfV ij 1\n"
"Zmj ij 1\n"
"bxM be 1\n"
"fFd de 1\n"
"gjP ng 1\n"
"hMs th 1\n"
"Ysq qu 1\n"
"qkV qu 1\n"
"Kmc ch 1\n"
"xYy ny 1\n"
"dvX de 1\n"
"rwC er 1\n"
"gwW wa 1\n"
"Qpy pr 1\n"
"jXy ij 1\n"
"qOj qu 1\n"
"Qmz sz 1\n"
"Eqq qu 1\n"
"zJs st 1\n"
"fHy ny 1\n"
"hDt th 1\n"
"sDh th 1\n"
"Vkq qu 1\n"
"yLc ch 1\n"
"vHm va 1\n"
"vnX an 1\n"
"jxS ij 1\n"
"Jtj th 1\n"
"qgE ng 1\n"
"bpH pr 1\n"
"Iqy qu 1\n"
"qMn an 1\n"
"dmE de 1\n"
"Hfq qu 1\n"
"pSb pr 1\n"
"xhI th 1\n"
"Qjt th 1\n"
"yfX ny 1\n"
"vuF qu 1\n"
"wFw wa 1\n"
"znS an 1\n"
"zlV le 1\n"
"lkK le 1\n"
"Fvz sz 1\n"
"qjT qu 1\n"
"zoQ on 1\n"
"Wvx va 1\n"
"hMn th 1\n"
"dMw de 1\n"
"gcF ch 1\n"
"dbB de 1\n"
"Cqj qu 1\n"
"mCv va 1\n"
"pJx pr 1\n"
"Dfv va 1\n"
"sjL st 1\n"
"qiG in 1\n"
"Zls le 1\n"
"Vsf st 1\n"
"Fgd ng 1\n"
"wmD me 1\n"
"Dxo on 1\n"
"qrk qu 1\n"
"pJr er 1\n"
"cLx ch 1\n"
"jdB de 1\n"
"ybM be 1\n"
"mvM va 1\n"
"jtX th 1\n"
"cnB an 1\n"
"wtW th 1\n"
"Ksd st 1\n"
"wql wa 1\n"
"mhU th 1\n"
"oJy on 1\n"
"Ghp th 1\n"
"qoX qu 1\n"
"xsI st 1\n"
"vFs st 1\n"
"fYe er 1\n"
"lnV an 1\n"
"uXn an 1\n"
"Eoh th 1\n"
"wcM wa 1\n"
"jwK ij 1\n"
"Gke er 1\n"
"uFq qu 1\n"
"Ycg ch 1\n"
"xqy qu 1\n"
"btM th 1\n"
"jHw ij 1\n"
"qeU qu 1\n"
"Qjz sz 1\n"
"nuQ an 1\n"
"Fcx ch 1\n"
"Kqt th 1\n"
"Lqv qu 1\n"
"mwU me 1\n"
"fQs st 1\n"
"kSd de 1\n"
"nYv an 1\n"
"wGj ij 1\n"
"gvZ ng 1\n"
"mqN qu 1\n"
"Fhp th 1\n"
"pMq qu 1\n"
"dBh ch 1\n"
"bXk ka 1\n"
"fqK qu 1\n"
"Yyq qu 1\n"
"Krq qu 1\n"
"Rnv an 1\n"
"uuE qu 1\n"
"Xsz st 1\n"
"fKb be 1\n"
"yIh th 1\n"
"Ncd ch 1\n"
"mLr er 1\n"
"cSs ch 1\n"
"lbE le 1\n"
"xaW an 1\n"
"Rtd th 1\n"
"rbF er 1\n"
"vgR ng 1\n"
"scZ ch 1\n"
"rHp er 1\n"
"eYw er 1\n"
"Lxj ij 1\n"
"qRg ng 1\n"
"jpN ij 1\n"
"rjW er 1\n"
"lgK ng 1\n"
"mCc ch 1\n"
"fGu qu 1\n"
"xzT sz 1\n"
"wQw wa 1\n"
"klJ li 1\n"
"cqk ch 1\n"
"lMh th 1\n"
"pYs st 1\n"
"hQk th 1\n"
"Hxz sz 1\n"
"feY er 1\n"
"fhF th 1\n"
"fBm me 1\n"
"fVt th 1\n"
"zfh th 1\n"
"sbT st 1\n"
"dQy de 1\n"
"Fmc ch 1\n"
"vhL th 1\n"
"Jtb th 1\n"
"Vrx er 1\n"
"yqZ qu 1\n"
"jDm ij 1\n"
"mfV me 1\n"
"oSx on 1\n"
"Jxg ng 1\n"
"wOq qu 1\n"
"dJq qu 1\n"
"Vvc ch 1\n"
"Eqe qu 1\n"
"jqO qu 1\n"
"zxI sz 1\n"
"qKf qu 1\n"
"fdW de 1\n"
"ccM ch 1\n"
"gcW ch 1\n"
"lFn an 1\n"
"Rvq qu 1\n"
"znN an 1\n"
"zbU sz 1\n"
"tNw th 1\n"
"wjK ij 1\n"
"Jbd de 1\n"
"Bfc ch 1\n"
"qeX le 1\n"
"tXk th 1\n"
"slJ le 1\n"
"cKd ch 1\n"
"nCf an 1\n"
"qgV ng 1\n"
"Mhx th 1\n"
"sKf st 1\n"
"hqZ th 1\n"
"Fdt th 1\n"
"qzJ qu 1\n"
"sNn an 1\n"
"tjW th 1\n"
"xcN ch 1\n"
"fcJ ch 1\n"
"djU de 1\n"
"Ygh th 1\n"
"woI on 1\n"
"Yyz sz 1\n"
"kQc ch 1\n"
"hfQ th 1\n"
"nrL an 1\n"
"lQs le 1\n"
"mtF th 1\n"
"wbX wa 1\n"
"gmR ng 1\n"
"Zsq qu 1\n"
"ytQ th 1\n"
"mbF me 1\n"
"fgT ng 1\n"
"cWu ch 1\n"
"gxG ng 1\n"
"hNv th 1\n"
"dfW de 1\n"
"zrC er 1\n"
"woX on 1\n"
"wjT ij 1\n"
"Pqw qu 1\n"
"vkf ka 1\n"
"nLz an 1\n"
"cjV ch 1\n"
"fcP ch 1\n"
"vlQ le 1\n"
"Fgq ng 1\n"
"hgP th 1\n"
"Gqy qu 1\n"
"tKs th 1\n"
"Xfv va 1\n"
"yZq qu 1\n"
"yiZ in 1\n"
"rXv er 1\n"
"Ycy ch 1\n"
"fvA va 1\n"
"Tqs qu 1\n"
"hZy th 1\n"
"xwc ch 1\n"
"qVf qu 1\n"
"Mhq th 1\n"
"zSj sz 1\n"
"vhQ th 1\n"
"tzX th 1\n"
"Gvm va 1\n"
"cqU ch 1\n"
"Hhp th 1\n"
"gQk ng 1\n"
"pwL pr 1\n"
"sNw st 1\n"
"qEt th 1\n"
"Nzq qu 1\n"
"zsD st 1\n"
"mDg ng 1\n"
"Rtq th 1\n"
"jLf ij 1\n"
"wTp pr 1\n"
"xJh th 1\n"
"Vqo qu 1\n"
"Zqk qu 1\n"
"qqQ qu 1\n"
"hrY th 1\n"
"Wqo qu 1\n"
"mIy me 1\n"
"Ipk ka 1\n"
"xjC ij 1\n"
"lLp le 1\n"
"hqF th 1\n"
"cWg ch 1\n"
"qYc qu 1\n"
"cjU ch 1\n"
"qXk qu 1\n"
"hqL th 1\n"
"zxT sz 1\n"
"dnX an 1\n"
"zBt th 1\n"
"Qls le 1\n"
"khC th 1\n"
"uqX qu 1\n"
"Zbf be 1\n"
"iDx li 1\n"
"Znp an 1\n"
"Jxq qu 1\n"
"jqY qu 1\n"
"vbU va 1\n"
"qRr qu 1\n"
"qpj qu 1\n"
"wlG le 1\n"
"Wgx ng 1\n"
"Vxj ij 1\n"
"zSw sz 1\n"
"ihW th 1\n"
"kzT sz 1\n"
"aeZ an 1\n"
"hKj th 1\n"
"tWs th 1\n"
"gLc ch 1\n"
"gpK ng 1\n"
"yJz sz 1\n"
"Gvt th 1\n"
"fEo on 1\n"
"sKd st 1\n"
"xhN th 1\n"
"aMq an 1\n"
"ehX th 1\n"
"kfZ ku 1\n"
"Wwc ch 1\n"
"Ymz sz 1\n"
"Vkd de 1\n"
"bzD sz 1\n"
"Xkg ng 1\n"
"Vzz sz 1\n"
"xvV va 1\n"
"pHh th 1\n"
"rKq qu 1\n"
"vmM va 1\n"
"Qxj ij 1\n"
"zNr er 1\n"
"bqB qu 1\n"
"Jqw qu 1\n"
"zqB qu 1\n"
"Xvm va 1\n"
"lBf le 1\n"
"qqB qu 1\n"
"gCs ng 1\n"
"rRg ng 1\n"
"Rnm an 1\n"
"Lzw sz 1\n"
"iwN in 1\n"
"pfN pr 1\n"
"hCw wa 1\n"
"uHz qu 1\n"
"cLc ch 1\n"
"lwD le 1\n"
"qjB qu 1\n"
"Ojy ij 1\n"
"dmV di 1\n"
"cCw ch 1\n"
"lXs le 1\n"
"smR st 1\n"
"mxO me 1\n"
"Jrt th 1\n"
"zjN sz 1\n"
"bBn an 1\n"
"cxQ ch 1\n"
"Kdp de 1\n"
"Dlb le 1\n"
"pqD qu 1\n"
"qqC qu 1\n"
"Spz sz 1\n"
"tCd th 1\n"
"gfP ng 1\n"
"uGj qu 1\n"
"xbE be 1\n"
"Xpv va 1\n"
"Xzt th 1\n"
"gqG qu 1\n"
"kqq qu 1\n"
"Kvq qu 1\n"
"qWi qu 1\n"
"mxZ me 1\n"
"qoY qu 1\n"
"Sgf ng 1\n"
"cRv ch 1\n"
"Wgi ng 1\n"
"eDx er 1\n"
"cWw ch 1\n"
"vFq qu 1\n"
"Kxv va 1\n"
"iWp in 1\n"
"fRx fo 1\n"
"wtB th 1\n"
"swW st 1\n"
"grK ng 1\n"
"Hfe er 1\n"
"gfZ ng 1\n"
"xqX qu 1\n"
"oKj on 1\n"
"vfq qu 1\n"
"pWw pr 1\n"
"uWc ch 1\n"
"lCg ng 1\n"
"qkg qu 1\n"
"cDh th 1\n"
"Sfz sz 1\n"
"uYx qu 1\n"
"xvR va 1\n"
"eAo er 1\n"
"pYg ng 1\n"
"dRx de 1\n"
"iWd in 1\n"
"gGx ng 1\n"
"bXz sz 1\n"
"kcP ch 1\n"
"hcJ th 1\n"
"lCf le 1\n"
"gmW ng 1\n"
"Hkf ka 1\n"
"rhL th 1\n"
"jqP qu 1\n"
"rQp er 1\n"
"vCn an 1\n"
"dWj de 1\n"
"Hrx er 1\n"
"sTz st 1\n"
"aVt th 1\n"
"qwK qu 1\n"
"vvE va 1\n"
"wKp pr 1\n"
"xcY ch 1\n"
"vpM va 1\n"
"jlC le 1\n"
"dlG le 1\n"
"oTq qu 1\n"
"iLp in 1\n"
"xsL st 1\n"
"lFz le 1\n"
"vhC th 1\n"
"ylX le 1\n"
"pmO me 1\n"
"Ycc ch 1\n"
"Ynp an 1\n"
"Ybm me 1\n"
"Qln an 1\n"
"bxA be 1\n"
"tFs th 1\n"
"Lqw qu 1\n"
"zcU ch 1\n"
"vfK va 1\n"
"vpQ va 1\n"
"Dtf th 1\n"
"bTj ij 1\n"
"Vvw va 1\n"
"Qbx be 1\n"
"zWk sz 1\n"
"bSx be 1\n"
"zpK sz 1\n"
"wTb wa 1\n"
"mkC ka 1\n"
"cRh th 1\n"
"nBk an 1\n"
"xGv va 1\n"
"hnQ th 1\n"
"aqQ an 1\n"
"zhZ th 1\n"
"zwP sz 1\n"
"vqL qu 1\n"
"scU ch 1\n"
"glS ng 1\n"
"pjE ij 1\n"
"qqD qu 1\n"
"lRx le 1\n"
"qVr qu 1\n"
"Xuh th 1\n"
"brB er 1\n"
"Qyc ch 1\n"
"Sgx ng 1\n"
"dqk qu 1\n"
"bYj ij 1\n"
"mPx me 1\n"
"Fdv de 1\n"
"Xmd de 1\n"
"cPj ch 1\n"
"Pqg qu 1\n"
"vYh th 1\n"
"bJx be 1\n"
"dQt th 1\n"
"fxj ij 1\n"
"Hwq qu 1\n"
"vgC ng 1\n"
"kjK ij 1\n"
"nrC an 1\n"
"vqX qu 1\n"
"Bgk ng 1\n"
"Cbv va 1\n"
"Uww wa 1\n"
"wcJ ch 1\n"
"gBf ng 1\n"
"zTv va 1\n"
"zwX sz 1\n"
"lWg le 1\n"
"qOs qu 1\n"
"fbB be 1\n"
"xqG qu 1\n"
"jQj ij 1\n"
"voQ on 1\n"
"yjW ij 1\n"
"qvO qu 1\n"
"xbF be 1\n"
"nWu an 1\n"
"yjQ ij 1\n"
"cjK ch 1\n"
"Sxn an 1\n"
"ybX be 1\n"
"eYg ng 1\n"
"Bmn an 1\n"
"fDt th 1\n"
"jXm ij 1\n"
"nMt th 1\n"
"Sxb be 1\n"
"lHm le 1\n"
"gfY ng 1\n"
"nwG an 1\n"
"gHl ng 1\n"
"Wpm me 1\n"
"wFj ij 1\n"
"hGm th 1\n"
"wwC wa 1\n"
"Mlf le 1\n"
"cJb ch 1\n"
"bnC an 1\n"
"Fvp va 1\n"
"tGc th 1\n"
"fhZ th 1\n"
"Vkh th 1\n"
"jwg ng 1\n"
"xbK be 1\n"
"zVq qu 1\n"
"qTz qu 1\n"
"vrD er 1\n"
"fRt th 1\n"
"fFs st 1\n"
"hWg th 1\n"
"lzE le 1\n"
"lwX le 1\n"
"jHy ij 1\n"
"Qqt th 1\n"
"Dqi in 1\n"
"Tvj ij 1\n"
"gPb ng 1\n"
"dPz sz 1\n"
"zdT sz 1\n"
"mvA va 1\n"
"Zvh th 1\n"
"qaU an 1\n"
"fwQ wa 1\n"
"Rsw st 1\n"
"klB le 1\n"
"vlN le 1\n"
"Gvx va 1\n"
"pdJ de 1\n"
"lcB ch 1\n"
"vTq qu 1\n"
"yhV th 1\n"
"jLv ij 1\n"
"pzR sz 1\n"
"Xyw wa 1\n"
"Xlq qu 1\n"
"Rqw wa 1\n"
"zhP th 1\n"
"sgT ng 1\n"
"gpG ng 1\n"
"tkY th 1\n"
"dqE qu 1\n"
"Qcg ch 1\n"
"bfB be 1\n"
"Wpv va 1\n"
"Wxl le 1\n"
"Xbq qu 1\n"
"yFh th 1\n"
"Rfq qu 1\n"
"hhL th 1\n"
"jxz sz 1\n"
"bKh th 1\n"
"ptU th 1\n"
"cXe ch 1\n"
"zXm sz 1\n"
"Ghw th 1\n"
"dzY sz 1\n"
"dXn an 1\n"
"kxW ka 1\n"
"vVr er 1\n"
"Jxu un 1\n"
"bbX be 1\n"
"rPb er 1\n"
"qCm qu 1\n"
"qiJ qu 1\n"
"Xgw ng 1\n"
"Nhq th 1\n"
"cGp po 1\n"
"hPw th 1\n"
"bTz sz 1\n"
"qIg ng 1\n"
"pJh th 1\n"
"wcE ch 1\n"
"mCb me 1\n"
"bJc ch 1\n"
"nzQ an 1\n"
"yqR qu 1\n"
"xHw wa 1\n"
"bwH wa 1\n"
"qCr qu 1\n"
"Uqe qu 1\n"
"qxM qu 1\n"
"fpO pr 1\n"
"kcN ch 1\n"
"ykV ka 1\n"
"mQb me 1\n"
"Yqs qu 1\n"
"yVk ka 1\n"
"vbX va 1\n"
"mTd de 1\n"
"jXo on 1\n"
"wqJ qu 1\n"
"kKt th 1\n"
"fkS ka 1\n"
"Wvz sz 1\n"
"Iyv va 1\n"
"hGk th 1\n"
"Fze er 1\n"
"bhM th 1\n"
"qvI qu 1\n"
"nXq an 1\n"
"nXc an 1\n"
"kJt th 1\n"
"Nqc ch 1\n"
"Yjc ch 1\n"
"Fhb th 1\n"
"jyK ij 1\n"
"Jzj sz 1\n"
"yqc ch 1\n"
"wmZ me 1\n"
"zbF sz 1\n"
"spq qu 1\n"
"gPn an 1\n"
"jSg ng 1\n"
"gMh th 1\n"
"fXt th 1\n"
"Fyw wa 1\n"
"Fwg ng 1\n"
"hmN th 1\n"
"hNl th 1\n"
"tqY th 1\n"
"pGm me 1\n"
"mXz sz 1\n"
"qYy qu 1\n"
"Rmq qu 1\n"
"Dqa an 1\n"
"Wkx ka 1\n"
"dpT de 1\n"
"jyJ ij 1\n"
"Jqj qu 1\n"
"wjZ ij 1\n"
"xNr er 1\n"
"qAm qu 1\n"
"hBn th 1\n"
"qpJ qu 1\n"
"ygW ng 1\n"
"jXf ij 1\n"
"rMl er 1\n"
"zgV ng 1\n"
"nLp an 1\n"
"pFx pr 1\n"
"tvG th 1\n"
"zQl le 1\n"
"fdF de 1\n"
"bxK be 1\n"
"Bcx ch 1\n"
"rpY er 1\n"
"sJb st 1\n"
"Kvh th 1\n"
"kNq qu 1\n"
"zHd sz 1\n"
"dzF sz 1\n"
"tJq th 1\n"
"Hfv va 1\n"
"vQd de 1\n"
"pKj ij 1\n"
"fhV th 1\n"
"qZi qu 1\n"
"ohY th 1\n"
"vqq qu 1\n"
"tnQ th 1\n"
"Vqk qu 1\n"
"zJf sz 1\n"
"Jkz sz 1\n"
"Rwf wa 1\n"
"zvM va 1\n"
"bxY be 1\n"
"pXh th 1\n"
"fUy ny 1\n"
"pvE va 1\n"
"Lpk ka 1\n"
"dzV sz 1\n"
"xIf fo 1\n"
"wZw wa 1\n"
"npQ an 1\n"
"pWk ka 1\n"
"jgQ ng 1\n"
"Jqr qu 1\n"
"gmX ng 1\n"
"jfM ij 1\n"
"lWj le 1\n"
"pbN pr 1\n"
"fvF va 1\n"
"sDd st 1\n"
"qdB qu 1\n"
"frL er 1\n"
"uHn an 1\n"
"gwN ng 1\n"
"yBh th 1\n"
"Zzq qu 1\n"
"vDg ng 1\n"
"Qcz ch 1\n"
"qzf qu 1\n"
"wEc ch 1\n"
"pxH pr 1\n"
"fqO qu 1\n"
"Vqe qu 1\n"
"gkD ng 1\n"
"Xfq qu 1\n"
"uXg qu 1\n"
"jCw ij 1\n"
"Pzu qu 1\n"
"gRh th 1\n"
"vqH qu 1\n"
"vvW va 1\n"
"Rfb be 1\n"
"gqJ qu 1\n"
"tgO th 1\n"
"wUy wa 1\n"
"Jkw ka 1\n"
"hSs th 1\n"
"gkW ng 1\n"
"Qgy ng 1\n"
"dJb de 1\n"
"prF er 1\n"
"buX qu 1\n"
"cVg ch 1\n"
"jtU th 1\n"
"fDc ch 1\n"
"Ygc ch 1\n"
"Kqr qu 1\n"
"Uyp pr 1\n"
"lJk le 1\n"
"sxY st 1\n"
"xfY fo 1\n"
"Xkz sz 1\n"
"cgZ ch 1\n"
"cyX ch 1\n"
"gbF ng 1\n"
"zTk sz 1\n"
"hsU th 1\n"
"tlW th 1\n"
"Zzv sz 1\n"
"kqE qu 1\n"
"lpQ po 1\n"
"qJu un 1\n"
"hYi th 1\n"
"zlM le 1\n"
"vDt th 1\n"
"Hvn an 1\n"
"Nsf st 1\n"
"bJg ng 1\n"
"fNg ng 1\n"
"kQo on 1\n"
"Kqp qu 1\n"
"bKs st 1\n"
"mHp me 1\n"
"Uyj ij 1\n"
"cxY ch 1\n"
"yIe er 1\n"
"qTj qu 1\n"
"wfP wa 1\n"
"fxI fo 1\n"
"vQa an 1\n"
"fvN va 1\n"
"pwN pr 1\n"
"vaQ an 1\n"
"mxQ me 1\n"
"bdV de 1\n"
"Cgj ng 1\n"
"xjz sz 1\n"
"Wqw qu 1\n"
"wpO pr 1\n"
"woQ on 1\n"
"xYj ij 1\n"
"fpT pr 1\n"
"lNp le 1\n"
"pvX va 1\n"
"pLp pr 1\n"
"Ksg ng 1\n"
"rWg ng 1\n"
"iUy in 1\n"
"bfX be 1\n"
"xsV st 1\n"
"Xnj an 1\n"
"dmW de 1\n"
"oQw on 1\n"
"Zxy ny 1\n"
"Oay an 1\n"
"pjG ij 1\n"
"Zbt th 1\n"
"Hql qu 1\n"
"Zxq qu 1\n"
"jWd de 1\n"
"qUp qu 1\n"
"qxN qu 1\n"
"qCo qu 1\n"
"Yfd de 1\n"
"vvU va 1\n"
"vIk ka 1\n"
"Dfj ij 1\n"
"Zmh th 1\n"
"Cqt th 1\n"
"vQf va 1\n"
"Nbn an 1\n"
"tJs th 1\n"
"Fhx th 1\n"
"dzQ sz 1\n"
"zYj ij 1\n"
"qBw qu 1\n"
"vcV ch 1\n"
"gGt th 1\n"
"iVw in 1\n"
"Fzp sz 1\n"
"bjH ij 1\n"
"cuY ch 1\n"
"jwS ij 1\n"
"Cqp qu 1\n"
"yJv va 1\n"
"kdJ de 1\n"
"kdT de 1\n"
"nqB an 1\n"
"hWs th 1\n"
"qsj qu 1\n"
"hLw th 1\n"
"hdX th 1\n"
"cgV ch 1\n"
"tYc th 1\n"
"eZx er 1\n"
"hfN th 1\n"
"gvw ng 1\n"
"aVp an 1\n"
"gMs ng 1\n"
"Pbf be 1\n"
"mQf me 1\n"
"yUi in 1\n"
"vGf va 1\n"
"xgF ng 1\n"
"zvY sz 1\n"
"wrA er 1\n"
"yrM er 1\n"
"vMj ij 1\n"
"Uyv va 1\n"
"dLp de 1\n"
"Gjj ij 1\n"
"zEi in 1\n"
"Xdg ng 1\n"
"jHf ij 1\n"
"oPz on 1\n"
"xIz sz 1\n"
"bCb be 1\n"
"Dzq qu 1\n"
"Yjn an 1\n"
"gGz ng 1\n"
"mjU ij 1\n"
"Cjx ij 1\n"
"xKc ch 1\n"
"mvO va 1\n"
"Pzb sz 1\n"
"crK ch 1\n"
"xhO th 1\n"
"ylB le 1\n"
"lDk le 1\n"
"zlO le 1\n"
"pgH ng 1\n"
"vQb va 1\n"
"sdZ st 1\n"
"kQm ka 1\n"
"lRh th 1\n"
"oQy on 1\n"
"twC th 1\n"
"Bdj ij 1\n"
"Qjg ng 1\n"
"dnP an 1\n"
"Nnp an 1\n"
"qiP qu 1\n"
"Ccj ch 1\n"
"uHt th 1\n"
"qLx qu 1\n"
"Qsf st 1\n"
"fKx fo 1\n"
"fkE ka 1\n"
"jlX le 1\n"
"jZb ij 1\n"
"Vwj ij 1\n"
"zbA sz 1\n"
"Hhd th 1\n"
"cbY ch 1\n"
"Ikf ka 1\n"
"Grx er 1\n"
"jpP ij 1\n"
"Qfh th 1\n"
"xhW th 1\n"
"wmX me 1\n"
"aJb an 1\n"
"sfO st 1\n"
"qXq qu 1\n"
"mXg ng 1\n"
"bnV an 1\n"
"Ypw pr 1\n"
"zCy sz 1\n"
"lhN th 1\n"
"rXn an 1\n"
"fGh th 1\n"
"Wxq qu 1\n"
"cxT ch 1\n"
"Zsg ng 1\n"
"uGv qu 1\n"
"bzM sz 1\n"
"zjS sz 1\n"
"dfS de 1\n"
"gpH ng 1\n"
"qgO ng 1\n"
"kqF qu 1\n"
"qfU qu 1\n"
"qTp qu 1\n"
"vZb va 1\n"
"Ejw ij 1\n"
"zQn an 1\n"
"gYz ng 1\n"
"kjV ij 1\n"
"fWl le 1\n"
"fRk ka 1\n"
"uSj qu 1\n"
"Cxg ng 1\n"
"Lcv ch 1\n"
"bzK sz 1\n"
"wqF qu 1\n"
"qJp qu 1\n"
"rCj er 1\n"
"qvs qu 1\n"
"lwN le 1\n"
"xmR me 1\n"
"btC th 1\n"
"kTx ka 1\n"
"qkU qu 1\n"
"Lhj th 1\n"
"dIx de 1\n"
"vsQ st 1\n"
"gSd ng 1\n"
"wDl le 1\n"
"Vjm ij 1\n"
"pmI me 1\n"
"vWh th 1\n"
"fKv va 1\n"
"xPt th 1\n"
"uoQ qu 1\n"
"Kgh th 1\n"
"gwX ng 1\n"
"sgJ ng 1\n"
"pWj ij 1\n"
"Qff fo 1\n"
"hkJ th 1\n"
"Hqo qu 1\n"
"jwW ij 1\n"
"sQz st 1\n"
"wUw wa 1\n"
"mKx me 1\n"
"oQf on 1\n"
"jVk ij 1\n"
"xwT wa 1\n"
"sTq qu 1\n"
"uqV qu 1\n"
"Qlp le 1\n"
"pMb pr 1\n"
"xKj ij 1\n"
"bpX pr 1\n"
"vQe er 1\n"
"Jjq qu 1\n"
"qKh th 1\n"
"fkJ ka 1\n"
"jbQ ij 1\n"
"mZw me 1\n"
"Xgc ch 1\n"
"vzU sz 1\n"
"pTm me 1\n"
"pNq qu 1\n"
"rwD er 1\n"
"Qdg ng 1\n"
"wqC qu 1\n"
"Yrn an 1\n"
"qww qu 1\n"
"qwU qu 1\n"
"xzF sz 1\n"
"flW le 1\n"
"jzP sz 1\n"
"Wxp pr 1\n"
"rDq qu 1\n"
"dGp de 1\n"
"Ztj th 1\n"
"Uvp va 1\n"
"eGc ch 1\n"
"zZb sz 1\n"
"gQh th 1\n"
"tFd th 1\n"
"Mqg ng 1\n"
"dnD an 1\n"
"hvY th 1\n"
"Iyb be 1\n"
"fDz sz 1\n"
"Kbj ij 1\n"
"vYm va 1\n"
"Wxr er 1\n"
"Kwz sz 1\n"
"hrQ th 1\n"
"yCt th 1\n"
"Hxw wa 1\n"
"hEf th 1\n"
"bdU de 1\n"
"sGj st 1\n"
"Gwt th 1\n"
"bYh th 1\n"
"zmU sz 1\n"
"pDm po 1\n"
"qmC qu 1\n"
"dTd de 1\n"
"Qxq qu 1\n"
"uVf qu 1\n"
"qAl qu 1\n"
"jEa an 1\n"
"Kpy pr 1\n"
"Hqv qu 1\n"
"fCk ka 1\n"
"aqZ an 1\n"
"lUo on 1\n"
"Pvo on 1\n"
"Dqf qu 1\n"
"gdM ng 1\n"
"fzL sz 1\n"
"Bhh th 1\n"
"dGd de 1\n"
"wtY th 1\n"
"qTy qu 1\n"
"Uxr er 1\n"
"Vvm va 1\n"
"vHh th 1\n"
"qZc ch 1\n"
"fhC th 1\n"
"xdZ de 1\n"
"hZp th 1\n"
"Pmz sz 1\n"
"cfT ch 1\n"
"pjI ij 1\n"
"mdZ de 1\n"
"jkQ ij 1\n"
"Sdj de 1\n"
"hDf th 1\n"
"eJj er 1\n"
"wjY ij 1\n"
"zLm sz 1\n"
"eFs er 1\n"
"wgj ng 1\n"
"Zmk ka 1\n"
"lvJ le 1\n"
"xYm me 1\n"
"Nzf sz 1\n"
"wJi in 1\n"
"yQs st 1\n"
"pfM pr 1\n"
"dhR th 1\n"
"cmK ch 1\n"
"dhM th 1\n"
"qGb qu 1\n"
"wvQ va 1\n"
"Cgq ng 1\n"
"Jfc ch 1\n"
"bkD ka 1\n"
"fdS de 1\n"
"Ivp va 1\n"
"Gkj ij 1\n"
"zIv sz 1\n"
"Bzl le 1\n"
"gBb ng 1\n"
"Tpj ij 1\n"
"vyY va 1\n"
"Uxs st 1\n"
"kwW ka 1\n"
"gPf ng 1\n"
"pqC qu 1\n"
"cTj ch 1\n"
"yzI sz 1\n"
"Yph th 1\n"
"bvD va 1\n"
"xCc ch 1\n"
"pcQ ch 1\n"
"fZw wa 1\n"
"Zxf fo 1\n"
"wbA wa 1\n"
"bTf be 1\n"
"rxR er 1\n"
"qqE qu 1\n"
"yFp pr 1\n"
"pNf pr 1\n"
"kMv ka 1\n"
"vUq qu 1\n"
"wOh th 1\n"
"hxH th 1\n"
"Xqh th 1\n"
"uIu qu 1\n"
"Fzq qu 1\n"
"Ysd st 1\n"
"ojY on 1\n"
"cEo ch 1\n"
"lwR le 1\n"
"qjF qu 1\n"
"jTp ij 1\n"
"yzT sz 1\n"
"jfO ij 1\n"
"qSg ng 1\n"
"Nck ch 1\n"
"hwF th 1\n"
"Gmq qu 1\n"
"Iiq qu 1\n"
"zwE sz 1\n"
"qQv qu 1\n"
"xVd de 1\n"
"Ywq qu 1\n"
"sFx st 1\n"
"fvB va 1\n"
"qYe le 1\n"
"gwT ng 1\n"
"Wjx ij 1\n"
"bHn an 1\n"
"fMn an 1\n"
"gJg ng 1\n"
"Vkg ng 1\n"
"Fxv va 1\n"
"lHv le 1\n"
"Wpk ka 1\n"
"xAq qu 1\n"
"rxB pr 1\n"
"xuQ qu 1\n"
"pIb pr 1\n"
"bfE be 1\n"
"gRx ng 1\n"
"Bpb pr 1\n"
"bxN be 1\n"
"kgU ng 1\n"
"Pxc ch 1\n"
"cCq ch 1\n"
"Npb pr 1\n"
"lxE le 1\n"
"lCy le 1\n"
"dgX ng 1\n"
"xLf fo 1\n"
"bQt th 1\n"
"qgF ng 1\n"
"pxZ pr 1\n"
"pPx pr 1\n"
"iYz in 1\n"
"vJl le 1\n"
"kTf ka 1\n"
"qVm qu 1\n"
"gwS ng 1\n"
"zTd sz 1\n"
"pQk ka 1\n"
"xEg ng 1\n"
"fpP pr 1\n"
"qjw qu 1\n"
"Oyw wa 1\n"
"mcO ch 1\n"
"Vjd de 1\n"
"qdg ng 1\n"
"Lfp pr 1\n"
"vZc ch 1\n"
"nOq an 1\n"
"qjn an 1\n"
"sKc ch 1\n"
"wgU ng 1\n"
"hgX th 1\n"
"dMv de 1\n"
"Xcp ch 1\n"
"Fwz sz 1\n"
"pwA pr 1\n"
"Lpj ij 1\n"
"bkP ka 1\n"
"vHn an 1\n"
"Jjy ij 1\n"
"mCq qu 1\n"
"wvM va 1\n"
"Icb ch 1\n"
"kfJ ka 1\n"
"hsQ th 1\n"
"dWd de 1\n"
"fUs st 1\n"
"fLn an 1\n"
"pjN ij 1\n"
"zgQ ng 1\n"
"jLj ij 1\n"
"zqE qu 1\n"
"Qmv va 1\n"
"Zjr er 1\n"
"Zkp ka 1\n"
"iyH in 1\n"
"wuY qu 1\n"
"mzT sz 1\n"
"cwK ch 1\n"
"bCm me 1\n"
"ydG de 1\n"
"xdU de 1\n"
"wTf wa 1\n"
"lHh th 1\n"
"qyD qu 1\n"
"xlV le 1\n"
"qyT qu 1\n"
"tWn th 1\n"
"rMz er 1\n"
"pXv va 1\n"
"Xbz sz 1\n"
"kHm ka 1\n"
"cVd ch 1\n"
"qzH qu 1\n"
"ydN de 1\n"
"qMb qu 1\n"
"yjS ij 1\n"
"gmC ng 1\n"
"zIi in 1\n"
"fpM pr 1\n"
"lcZ ch 1\n"
"qHn an 1\n"
"Jjd de 1\n"
"jlG le 1\n"
"qcK ch 1\n"
"xQm me 1\n"
"vIi in 1\n"
"wBp pr 1\n"
"wcI ch 1\n"
"dJd de 1\n"
"Qbn an 1\n"
"Bjf ij 1\n"
"dpY de 1\n"
"dcF ch 1\n"
"xSj ij 1\n"
"iXj in 1\n"
"Qgb ng 1\n"
"gDt th 1\n"
"xxq qu 1\n"
"xcQ ch 1\n"
"Sqs qu 1\n"
"Qmg ng 1\n"
"gcU ch 1\n"
"Bvv va 1\n"
"pzE sz 1\n"
"wtT th 1\n"
"vbL va 1\n"
"bCt th 1\n"
"Qpo on 1\n"
"mXs me 1\n"
"Zqr qu 1\n"
"Gky ka 1\n"
"Xmr er 1\n"
"Lnz an 1\n"
"vYq qu 1\n"
"yRl le 1\n"
"gmK ng 1\n"
"vwP va 1\n"
"eFg ng 1\n"
"Njd de 1\n"
"klG le 1\n"
"hbE th 1\n"
"kWz sz 1\n"
"qpM qu 1\n"
"oZc ch 1\n"
"jRm ij 1\n"
"wXl le 1\n"
#ifndef _MSC_VER // TODO: Hack to avoid unsupported long string for MS VC.
"iyD in 1\n"
"fvL va 1\n"
"rPw er 1\n"
"fdR de 1\n"
"iSg ng 1\n"
"dbQ de 1\n"
"xxQ xe 1\n"
"Djc ch 1\n"
"ygK ng 1\n"
"Rhb th 1\n"
"zgG ng 1\n"
"Yky ka 1\n"
"Cxj ij 1\n"
"wWk ka 1\n"
"lmY le 1\n"
"qrB qu 1\n"
"ywK wa 1\n"
"xqI qu 1\n"
"Twj ij 1\n"
"Xgq ng 1\n"
"dwZ de 1\n"
"nQl an 1\n"
"Ghc th 1\n"
"pnH an 1\n"
"vmU va 1\n"
"qqK qu 1\n"
"cjB ch 1\n"
"gzS ng 1\n"
"Rwz sz 1\n"
"gYr ng 1\n"
"Fgx ng 1\n"
"wdK de 1\n"
"hxZ th 1\n"
"xUx xe 1\n"
"wmT me 1\n"
"yYk ka 1\n"
"fcD ch 1\n"
"hVv th 1\n"
"Sgv ng 1\n"
"zPn an 1\n"
"vYb va 1\n"
"bzE sz 1\n"
"whV th 1\n"
"qNz qu 1\n"
"wtS th 1\n"
"vhY th 1\n"
"nLf an 1\n"
"Lfw wa 1\n"
"gVc ch 1\n"
"gkS ng 1\n"
"Jqb qu 1\n"
"hWx th 1\n"
"zgO ng 1\n"
"tgX th 1\n"
"jPb ij 1\n"
"Wxb be 1\n"
"gqw ng 1\n"
"Cfw wa 1\n"
"woU on 1\n"
"ycJ ch 1\n"
"kwD ka 1\n"
"Sbp pr 1\n"
"qcw ch 1\n"
"Hwr er 1\n"
"bmL me 1\n"
"gwZ ng 1\n"
"yKj ij 1\n"
"fXv va 1\n"
"iKx in 1\n"
"lRz le 1\n"
"cHj ch 1\n"
"fFt th 1\n"
"sJv sz 1\n"
"xmI me 1\n"
"cCd ch 1\n"
"iYd in 1\n"
"yfY ny 1\n"
"xbY be 1\n"
"bmE me 1\n"
"fBv va 1\n"
"dHw de 1\n"
"ycR ch 1\n"
"wvL va 1\n"
"rjL er 1\n"
"sYv sz 1\n"
"Wpn an 1\n"
"zxB sz 1\n"
"yBq qu 1\n"
"gdJ ng 1\n"
"Yjo on 1\n"
"fpQ pr 1\n"
"qOq qu 1\n"
"Wjf ij 1\n"
"qcT ch 1\n"
"Lfh th 1\n"
"cFj ch 1\n"
"lMq qu 1\n"
"wSf wa 1\n"
"wQc ch 1\n"
"zDy sz 1\n"
"qrl qu 1\n"
"pYw pr 1\n"
"Vnf an 1\n"
"Hcj ch 1\n"
"zdU sz 1\n"
"bvP va 1\n"
"Yfj ij 1\n"
"Qkn an 1\n"
"wHm me 1\n"
"qVv qu 1\n"
"gkV ng 1\n"
"vpq qu 1\n"
"hFk th 1\n"
"fWf fo 1\n"
"pYq qu 1\n"
"dNv de 1\n"
"Wwj ij 1\n"
"Fmx me 1\n"
"mDl le 1\n"
"jMg ng 1\n"
"fZk ka 1\n"
"jNp ij 1\n"
"qhf th 1\n"
"Vbg ng 1\n"
"lKx le 1\n"
"iZx in 1\n"
"sjT sz 1\n"
"ijY in 1\n"
"qtV th 1\n"
"yTk ka 1\n"
"Hpz sz 1\n"
"iGq qu 1\n"
"yqW qu 1\n"
"hgF th 1\n"
"mFk ka 1\n"
"Oqw qu 1\n"
"dXa an 1\n"
"Zbq qu 1\n"
"lKm le 1\n"
"Svz sz 1\n"
"zKc ch 1\n"
"Vmz sz 1\n"
"mIx me 1\n"
"gKj ng 1\n"
"gTt th 1\n"
"vfC fo 1\n"
"hKg th 1\n"
"hSx th 1\n"
"oKg ng 1\n"
"nQs an 1\n"
"yiG in 1\n"
"qgM ng 1\n"
"kQg ng 1\n"
"Cjd de 1\n"
"jPy ij 1\n"
"Xqe qu 1\n"
"Pzy sz 1\n"
"Ftq th 1\n"
"fcE ch 1\n"
"mkL ka 1\n"
"Hzj sz 1\n"
"bTn an 1\n"
"qXy qu 1\n"
"dmM de 1\n"
"dVx de 1\n"
"Tqn an 1\n"
"xWj ij 1\n"
"qxQ qu 1\n"
"fQx fo 1\n"
"vLl le 1\n"
"Pgk ng 1\n"
"gHk ng 1\n"
"hxV th 1\n"
"tJz th 1\n"
"fMz sz 1\n"
"Ixb be 1\n"
"Cyy ny 1\n"
"pXf pr 1\n"
"pLl le 1\n"
"Twq qu 1\n"
"Dtw th 1\n"
"wRn an 1\n"
"uXl qu 1\n"
"zhq th 1\n"
"wIv va 1\n"
"cjL ch 1\n"
"qxH qu 1\n"
"lDm le 1\n"
"tXv th 1\n"
"gjC ng 1\n"
"Zzd sz 1\n"
"tgT th 1\n"
"hnP th 1\n"
"Kjc ch 1\n"
"gVw ng 1\n"
"xbI be 1\n"
"Zpc ch 1\n"
"bfO be 1\n"
"mSx me 1\n"
"qaF an 1\n"
"aQh th 1\n"
"Hjd de 1\n"
"qXj qu 1\n"
"fqA qu 1\n"
"bvR va 1\n"
"qSn an 1\n"
"cdV ch 1\n"
"pTf pr 1\n"
"Kzc ch 1\n"
"qtI th 1\n"
"egY ng 1\n"
"Rxt th 1\n"
"bhY th 1\n"
"pGh th 1\n"
"jDg ng 1\n"
"foY on 1\n"
"dKs sz 1\n"
"qJt th 1\n"
"Xwz sz 1\n"
"Ixg ng 1\n"
"rMt th 1\n"
"zXu qu 1\n"
"sQy sz 1\n"
"Npz sz 1\n"
"Qfz sz 1\n"
"rLm er 1\n"
"zGm sz 1\n"
"wHz sz 1\n"
"vcY ch 1\n"
"kqZ qu 1\n"
"jDh th 1\n"
"qgG ng 1\n"
"Dqq qu 1\n"
"fmO me 1\n"
"qdW qu 1\n"
"dNw de 1\n"
"rXj er 1\n"
"Jwc ch 1\n"
"mDb me 1\n"
"wMw wa 1\n"
"Yjg ng 1\n"
"fjY ij 1\n"
"iJb in 1\n"
"cdC ch 1\n"
"Yxq qu 1\n"
"Vbk ka 1\n"
"Fpx pr 1\n"
"zhD th 1\n"
"hCs th 1\n"
"dXw de 1\n"
"kDd de 1\n"
"uqT un 1\n"
"Bxw wa 1\n"
"Bjq qu 1\n"
"jGx ij 1\n"
"fXb be 1\n"
"ybF be 1\n"
"dtA th 1\n"
"cVv ch 1\n"
"Cbd de 1\n"
"wtH th 1\n"
"Kdj de 1\n"
"kPs sz 1\n"
"Zvk ka 1\n"
"xPv va 1\n"
"woH on 1\n"
"Xpz sz 1\n"
"qXe qu 1\n"
"pTj ij 1\n"
"kwQ ka 1\n"
"kZf ka 1\n"
"Uqj qu 1\n"
"yJh th 1\n"
"hCq th 1\n"
"jMj ij 1\n"
"phY th 1\n"
"kbB ka 1\n"
"Gpz sz 1\n"
"sGz st 1\n"
"fwE wa 1\n"
"Ttf th 1\n"
"Gqm qu 1\n"
"bzN sz 1\n"
"fkO ka 1\n"
"uzW qu 1\n"
"oxQ on 1\n"
"Vgm ng 1\n"
"qmD qu 1\n"
"xqn an 1\n"
"vRl le 1\n"
"Tnr an 1\n"
"zjW sz 1\n"
"vwq qu 1\n"
"jtW th 1\n"
"qnL an 1\n"
"yDx ny 1\n"
"xfQ fo 1\n"
"wxJ wa 1\n"
"nxE an 1\n"
"vQn in 1\n"
"Wkh th 1\n"
"ywD wa 1\n"
"pFf pr 1\n"
"lbK le 1\n"
"vHy va 1\n"
"gVj ng 1\n"
"Oqh th 1\n"
"bcN ch 1\n"
"tWm th 1\n"
"wMc ch 1\n"
"nwQ an 1\n"
"qnM an 1\n"
"Ztx th 1\n"
"nQj an 1\n"
"Vxt th 1\n"
"Uxc ch 1\n"
"pWv va 1\n"
"yRx ny 1\n"
"qKu un 1\n"
"jXg ng 1\n"
"jpX ij 1\n"
"dkG de 1\n"
"Bnf an 1\n"
"Ykf ka 1\n"
"gbW ng 1\n"
"klX le 1\n"
"vkH ka 1\n"
"dKd de 1\n"
"Kpq qu 1\n"
"gqM ng 1\n"
"yBz sz 1\n"
"rPj er 1\n"
"Hzv sz 1\n"
"wYz sz 1\n"
"qGa an 1\n"
"jIs sz 1\n"
"bUj ij 1\n"
"rTt th 1\n"
"nqI an 1\n"
"jfP ij 1\n"
"hRt th 1\n"
"yRr er 1\n"
"jjK ij 1\n"
"tfE th 1\n"
"Qsw st 1\n"
"Fcm ch 1\n"
"bJm me 1\n"
"tXq th 1\n"
"fRl le 1\n"
"gqE ng 1\n"
"wGg ng 1\n"
"gKc ch 1\n"
"yXc ch 1\n"
"zBy sz 1\n"
"lTd le 1\n"
"Wqc ch 1\n"
"Ftf th 1\n"
"wdB de 1\n"
"xnX an 1\n"
"Bqc ch 1\n"
"zqO qu 1\n"
"Qdl le 1\n"
"ojJ on 1\n"
"qZn an 1\n"
"hzW th 1\n"
"ylQ le 1\n"
"Zbw wa 1\n"
"mvL va 1\n"
"Ljb ij 1\n"
"Gqe qu 1\n"
"mfE me 1\n"
"xQq qu 1\n"
"fLv va 1\n"
"xLt th 1\n"
"wBj ij 1\n"
"jUm ij 1\n"
"pdL de 1\n"
"mJv va 1\n"
"dxU de 1\n"
"xqN qu 1\n"
"fpG pr 1\n"
"tlO th 1\n"
"whL th 1\n"
"kDx ka 1\n"
"Rqb qu 1\n"
"uvX qu 1\n"
"vjY ij 1\n"
"crQ ch 1\n"
"xyY ny 1\n"
"yhQ th 1\n"
"yYc ch 1\n"
"Lmg ng 1\n"
"Jsq qu 1\n"
"Gbj ij 1\n"
"aPb an 1\n"
"dwJ de 1\n"
"Xyv va 1\n"
"ucJ ch 1\n"
"dTf de 1\n"
"lBb le 1\n"
"hKz th 1\n"
"jcR ch 1\n"
"eQc ch 1\n"
"qYi in 1\n"
"Vtb th 1\n"
"Ccg ch 1\n"
"zAe er 1\n"
"gxJ ng 1\n"
"uvC qu 1\n"
"Bhm ma 1\n"
"Zgx ng 1\n"
"yzJ sz 1\n"
"cvJ ch 1\n"
"xTk ka 1\n"
"qdK qu 1\n"
"vwG va 1\n"
"Ymx me 1\n"
"oYw on 1\n"
"jXx ij 1\n"
"ywf wa 1\n"
"vVx vi 1\n"
"Rwm me 1\n"
"Dvk ka 1\n"
"xKt th 1\n"
"qLp qu 1\n"
"Yyv vi 1\n"
"Cqa an 1\n"
"xRf fo 1\n"
"Qqk qu 1\n"
"Jqe qu 1\n"
"yZg ng 1\n"
"vqG qu 1\n"
"hbO th 1\n"
"uVq qu 1\n"
"Rlm le 1\n"
"uZc ch 1\n"
"Ppv va 1\n"
"pVd de 1\n"
"yVd de 1\n"
"zJl le 1\n"
"Yzg ng 1\n"
"Cvq qu 1\n"
"pwS pr 1\n"
"Kkw ka 1\n"
"Wvv va 1\n"
"Fdy de 1\n"
"ppX pr 1\n"
"hvC th 1\n"
"iwG in 1\n"
"rBg ng 1\n"
"hBq th 1\n"
"nYs an 1\n"
"kcO ch 1\n"
"qEe qu 1\n"
"Ybv va 1\n"
"Qsn an 1\n"
"svC st 1\n"
"qkD qu 1\n"
"Qiw in 1\n"
"Gtj th 1\n"
"qAh th 1\n"
"wVy wa 1\n"
"bxT be 1\n"
"Qhs th 1\n"
"tlX th 1\n"
"hbA th 1\n"
"Qfb be 1\n"
"xWl le 1\n"
"xeV er 1\n"
"rqG qu 1\n"
"vqZ qu 1\n"
"jKv ij 1\n"
"iTf in 1\n"
"kwU ka 1\n"
"iFq in 1\n"
"mjZ ij 1\n"
"xgJ ng 1\n"
"zLp sz 1\n"
"qsR qu 1\n"
"zDj sz 1\n"
"pdF de 1\n"
"wxN wa 1\n"
"wGk ka 1\n"
"dUq qu 1\n"
"dJw de 1\n"
"fCb be 1\n"
"Dhz th 1\n"
"yIq qu 1\n"
"aQm an 1\n"
"Yzs st 1\n"
"vHf va 1\n"
"bjV ij 1\n"
"zSq qu 1\n"
"Wqs qu 1\n"
"jrW er 1\n"
"Hzq qu 1\n"
"wWs st 1\n"
"Mkg ng 1\n"
"zgF ng 1\n"
"Cnk an 1\n"
"rDg ng 1\n"
"fzB sz 1\n"
"fOm me 1\n"
"uVt th 1\n"
"Qfi in 1\n"
"Mhj th 1\n"
"uYj qu 1\n"
"Rqx qu 1\n"
"hkY th 1\n"
"wYb wa 1\n"
"tqP th 1\n"
"Jpb pr 1\n"
"bGw wa 1\n"
"xFh th 1\n"
"Xwb wa 1\n"
"Kgt th 1\n"
"Iqc ch 1\n"
"pJm me 1\n"
"Qkq qu 1\n"
"bVh th 1\n"
"yTq qu 1\n"
"zZg ng 1\n"
"cDz ch 1\n"
"qfm qu 1\n"
"afQ an 1\n"
"Qwc ch 1\n"
"bdJ de 1\n"
"qTu un 1\n"
"Ucx ch 1\n"
"Hnx an 1\n"
"Hbh th 1\n"
"gyH ng 1\n"
"tTz th 1\n"
"txV th 1\n"
"bdS de 1\n"
"Wgg ng 1\n"
"oqP qu 1\n"
"Rrf er 1\n"
"gYy ng 1\n"
"fMs st 1\n"
"fKd de 1\n"
"Hyx ny 1\n"
"Mxz sz 1\n"
"qHk qu 1\n"
"tfM th 1\n"
"hgQ th 1\n"
"zmO sz 1\n"
"wzS sz 1\n"
"jwQ ij 1\n"
"Fhc ic 1\n"
"xIy ny 1\n"
"fHg ng 1\n"
"wqY qu 1\n"
"bFp pr 1\n"
"Qdq qu 1\n"
"bhV th 1\n"
"bCg ng 1\n"
"Hgr ng 1\n"
"xqL qu 1\n"
"qgS ng 1\n"
"Nqg ng 1\n"
"fQv va 1\n"
"Qzw sz 1\n"
"Ixd de 1\n"
"Cxm me 1\n"
"mxN me 1\n"
"vQi in 1\n"
"cAq ch 1\n"
"eCx er 1\n"
"mqX qu 1\n"
"rqY qu 1\n"
"fVp pr 1\n"
"qoP qu 1\n"
"Gxc ch 1\n"
"vzX sz 1\n"
"fXf fo 1\n"
"Qtc th 1\n"
"ohQ th 1\n"
"Ygy ng 1\n"
"Xnb an 1\n"
"cWm ch 1\n"
"jXw ij 1\n"
"gWj ng 1\n"
"Kmg ng 1\n"
"vvH va 1\n"
"Uew er 1\n"
"qJk qu 1\n"
"Hkd de 1\n"
"xmP me 1\n"
"slR is 1\n"
"Uaq an 1\n"
"zbG sz 1\n"
"vNv va 1\n"
"cVb ch 1\n"
"bGg ng 1\n"
"iwU in 1\n"
"Cnw an 1\n"
"rXd er 1\n"
"vWz sz 1\n"
"tGf th 1\n"
"fbY be 1\n"
"hzp th 1\n"
"uWz qu 1\n"
"bMb be 1\n"
"jzW sz 1\n"
"gLh th 1\n"
"kZc ch 1\n"
"kHg ng 1\n"
"Vwf wa 1\n"
"vtY th 1\n"
"qeA qu 1\n"
"cxG ch 1\n"
"uQz qu 1\n"
"jGc ch 1\n"
"cvA ch 1\n"
"oTm on 1\n"
"pjY ij 1\n"
"bUo on 1\n"
"jwU ij 1\n"
"Jgm ng 1\n"
"tfZ th 1\n"
"xeO er 1\n"
"qBp qu 1\n"
"pBz sz 1\n"
"qSb qu 1\n"
"jyP ij 1\n"
"Fkq qu 1\n"
"njS an 1\n"
"jtA th 1\n"
"Zmf me 1\n"
"Ytm th 1\n"
"Pqc ch 1\n"
"bwJ wa 1\n"
"oWf on 1\n"
"kxJ ka 1\n"
"jHx ij 1\n"
"gcP ch 1\n"
"gBs ng 1\n"
"bkK ka 1\n"
"vdQ de 1\n"
"pjZ ij 1\n"
"Vgf ng 1\n"
"svG st 1\n"
"kGj ij 1\n"
"Wjg ng 1\n"
"Qmk ka 1\n"
"Glv le 1\n"
"tmY th 1\n"
"klY le 1\n"
"Pcj ch 1\n"
"fQw wi 1\n"
"xaO an 1\n"
"jfN ij 1\n"
"qGx qu 1\n"
"qvB qu 1\n"
"hwA th 1\n"
"Xmq qu 1\n"
"Xvt th 1\n"
"Bpq qu 1\n"
"oJq qu 1\n"
"vmZ va 1\n"
"nJp an 1\n"
"zqJ qu 1\n"
"qHf qu 1\n"
"mQg ng 1\n"
"yGz sz 1\n"
"hQm th 1\n"
"mBp me 1\n"
"tpJ th 1\n"
"Qkj ij 1\n"
"uUg ng 1\n"
"tdJ th 1\n"
"Jfn an 1\n"
"Lvj ij 1\n"
"iXc ch 1\n"
"pOq qu 1\n"
"bhK th 1\n"
"bMk ka 1\n"
"Fsw st 1\n"
"qAt th 1\n"
"xwJ wa 1\n"
"fPm me 1\n"
"Dfy ny 1\n"
"Zbp pr 1\n"
"Bgw ng 1\n"
"pQp pr 1\n"
"kQp ka 1\n"
"qoV qu 1\n"
"Uqd qu 1\n"
"jYo on 1\n"
"sDf st 1\n"
"xuJ qu 1\n"
"vRk ka 1\n"
"Qsg ng 1\n"
"yTd de 1\n"
"Qxr er 1\n"
"Hvc ch 1\n"
"hZt th 1\n"
"qDu un 1\n"
"fxA fo 1\n"
"xPf fo 1\n"
"wXc ch 1\n"
"jJb ij 1\n"
"pdK de 1\n"
"gpW ng 1\n"
"Qgx ng 1\n"
"kxG ka 1\n"
"dLx de 1\n"
"Bwz sz 1\n"
"Vdx de 1\n"
"yQh th 1\n"
"Wsx st 1\n"
"fSb be 1\n"
"Ukg ng 1\n"
"Pjz sz 1\n"
"rFg ng 1\n"
"fjP ij 1\n"
"kWv ka 1\n"
"Khf th 1\n"
"yGv va 1\n"
"pnD an 1\n"
"jYf ij 1\n"
"mgR ng 1\n"
"rjC er 1\n"
"Xjl le 1\n"
"kzE sz 1\n"
"Qgq ng 1\n"
"zgb ng 1\n"
"mhD th 1\n"
"vkO ka 1\n"
"uwV qu 1\n"
"rPp er 1\n"
"wXd de 1\n"
"gAo ng 1\n"
"kvG ka 1\n"
"vcX ch 1\n"
"xOz sz 1\n"
"Xzq qu 1\n"
"Fmu qu 1\n"
"xGg ng 1\n"
"jjR ij 1\n"
"qkI ku 1\n"
"pqH qu 1\n"
"cnH an 1\n"
"dhT th 1\n"
"mdR de 1\n"
"dDf de 1\n"
"qIq qu 1\n"
"xCj ij 1\n"
"qRk qu 1\n"
"kKc ch 1\n"
"Iuu qu 1\n"
"jqR qu 1\n"
"qEk qu 1\n"
"hfO th 1\n"
"quJ un 1\n"
"nRp an 1\n"
"txI th 1\n"
"yfZ ny 1\n"
"oqT ho 1\n"
"cgX ch 1\n"
"pbL pr 1\n"
"Xmx me 1\n"
"Vjr er 1\n"
"ylY le 1\n"
"dfK de 1\n"
"xgD ng 1\n"
"uwL qu 1\n"
"bPm me 1\n"
"qCy qu 1\n"
"Rpq qu 1\n"
"yqh th 1\n"
"xJt th 1\n"
"lzQ le 1\n"
"fgM ng 1\n"
"Ylc ch 1\n"
"fTz sz 1\n"
"Rjf ij 1\n"
"Rgj jo 1\n"
"Gkt th 1\n"
"fxG fo 1\n"
"mtG th 1\n"
"lgJ ng 1\n"
"tdR th 1\n"
"iHk in 1\n"
"Gqv qu 1\n"
"lDj le 1\n"
"wzZ sz 1\n"
"dFp de 1\n"
"qTt th 1\n"
"Wtg th 1\n"
"cbT ch 1\n"
"dvK de 1\n"
"Ctw th 1\n"
"mdG de 1\n"
"vKj ij 1\n"
"Clf le 1\n"
"wrU er 1\n"
"gmT ng 1\n"
"bXx be 1\n"
"zOx sz 1\n"
"Xnf an 1\n"
"rzQ er 1\n"
"vQj ij 1\n"
"kpT ka 1\n"
"fYh th 1\n"
"zLr er 1\n"
"Xgd ng 1\n"
"cZl ch 1\n"
"lFy le 1\n"
"Zng an 1\n"
"aXg an 1\n"
"qbE qu 1\n"
"zcY ch 1\n"
"sqK qu 1\n"
"Blx le 1\n"
"oqJ qu 1\n"
"jPv ij 1\n"
"qZd qu 1\n"
"fdZ de 1\n"
"Bqm qu 1\n"
"cpG ch 1\n"
"xdP de 1\n"
"fuF qu 1\n"
"vbq qu 1\n"
"dhH th 1\n"
"Jwm me 1\n"
"qkO ko 1\n"
"gsY ng 1\n"
"qGh th 1\n"
"Jkv ka 1\n"
"zpg ng 1\n"
"rwK er 1\n"
"Lhq th 1\n"
"zuV qu 1\n"
"bqV qu 1\n"
"Qcv ch 1\n"
"mWd de 1\n"
"cnF an 1\n"
"lWw le 1\n"
"txS th 1\n"
"znE an 1\n"
"fTj ij 1\n"
"lFq qu 1\n"
"wdJ de 1\n"
"eVk er 1\n"
"zjZ sz 1\n"
"fPq qu 1\n"
"cqQ ch 1\n"
"Pcg ch 1\n"
"Ydk de 1\n"
"svE st 1\n"
"Wqb qu 1\n"
"bcV ch 1\n"
"nHx on 1\n"
"wAx wa 1\n"
"hfB th 1\n"
"aMv an 1\n"
"pwO pr 1\n"
"Ywx wa 1\n"
"cbH ch 1\n"
"ojZ on 1\n"
"suU qu 1\n"
"jcU ch 1\n"
"sqY qu 1\n"
"jMr er 1\n"
"pxG pr 1\n"
"rBq qu 1\n"
"vlY le 1\n"
"hyY th 1\n"
"Cvw va 1\n"
"Tqe qu 1\n"
"fSj ij 1\n"
"fVs st 1\n"
"Eqc ch 1\n"
"xnD an 1\n"
"Owp pr 1\n"
"xTb be 1\n"
"wjL ij 1\n"
"Rxv va 1\n"
"nWf an 1\n"
"vHp va 1\n"
"vBk ka 1\n"
"Nqv qu 1\n"
"Lzf sz 1\n"
"bwS wa 1\n"
"Cby be 1\n"
"zRr er 1\n"
"qwJ qu 1\n"
"xnB an 1\n"
"qIc ch 1\n"
"cGk ch 1\n"
"Yji in 1\n"
"gVh th 1\n"
"lDc ch 1\n"
"Qyr er 1\n"
"fcH ch 1\n"
"nxB an 1\n"
"dvw de 1\n"
"gQc ch 1\n"
"mrR er 1\n"
"fnK an 1\n"
"Hlr le 1\n"
"Dnq an 1\n"
"bnU an 1\n"
"qCe qu 1\n"
"Tjv ij 1\n"
"Epq qu 1\n"
"wLf wa 1\n"
"pZj ij 1\n"
"gvR ng 1\n"
"kqK qu 1\n"
"vlG le 1\n"
"vvN va 1\n"
"gbM ng 1\n"
"bNk ka 1\n"
"jzL sz 1\n"
"Wlq qu 1\n"
"aYq an 1\n"
"zdY de 1\n"
"sfG st 1\n"
"qfW qu 1\n"
"kBv ka 1\n"
"btG th 1\n"
"Mqb qu 1\n"
"lrC er 1\n"
"vuE qu 1\n"
"fyJ ny 1\n"
"qmZ qu 1\n"
"Jkq qu 1\n"
"Cmj ij 1\n"
"bXy be 1\n"
"Ymy me 1\n"
"qxY qu 1\n"
"cNl ch 1\n"
"fzU fo 1\n"
"Rvt th 1\n"
"ylI le 1\n"
"xMs st 1\n"
"Qhm th 1\n"
"dHq qu 1\n"
"dwL de 1\n"
"vYr er 1\n"
"Qxu qu 1\n"
"dNh th 1\n"
"zNc ch 1\n"
"jmP ij 1\n"
"Pbq qu 1\n"
"fqj qu 1\n"
"fUw wa 1\n"
"Hyq qu 1\n"
"Qdx de 1\n"
"zSl le 1\n"
"cWt th 1\n"
"Fke er 1\n"
"Ztz th 1\n"
"uUq qu 1\n"
"nBm an 1\n"
"zJy sz 1\n"
"pdI de 1\n"
"nTd an 1\n"
"Yjb ij 1\n"
"Qjn an 1\n"
"yXj ij 1\n"
"xwB ow 1\n"
"klq qu 1\n"
"hfY th 1\n"
"pDg ng 1\n"
"zZd de 1\n"
"mqO qu 1\n"
"hZr th 1\n"
"cmY ch 1\n"
"gLk ng 1\n"
"Qcj ch 1\n"
"uKj qu 1\n"
"nqD an 1\n"
"yKw wa 1\n"
"bfR be 1\n"
"Rqz qu 1\n"
"jhQ th 1\n"
"vNj ij 1\n"
"Tcf ch 1\n"
"Hbn an 1\n"
"Lwv va 1\n"
"wcZ ch 1\n"
"cdK ch 1\n"
"bpR pr 1\n"
"lWm le 1\n"
"wNq qu 1\n"
"pAj ij 1\n"
"grV ng 1\n"
"qmk qu 1\n"
"cLf ch 1\n"
"iwB in 1\n"
"eqV qu 1\n"
"Wqz qu 1\n"
"Qnj an 1\n"
"uoJ qu 1\n"
"fVj ij 1\n"
"cbU ch 1\n"
"qpT qu 1\n"
"pdZ de 1\n"
"dzW de 1\n"
"Wfw wa 1\n"
"Zqm qu 1\n"
"kJd de 1\n"
"zWf sz 1\n"
"bYg ng 1\n"
"rjQ er 1\n"
"dwB de 1\n"
"Vlx le 1\n"
"zKd de 1\n"
"Lxw wa 1\n"
"Hpw pr 1\n"
"mvR va 1\n"
"qMt th 1\n"
"pWb pr 1\n"
"dcW ch 1\n"
"zEh th 1\n"
"Xrs er 1\n"
"Ftz th 1\n"
"qyL qu 1\n"
"jSn an 1\n"
"Wzh th 1\n"
"Pzf sz 1\n"
"zkW sz 1\n"
"ywY wa 1\n"
"oGb on 1\n"
"jBw ij 1\n"
"Qpz sz 1\n"
"rWm er 1\n"
"smQ st 1\n"
"uGk qu 1\n"
"xkV ka 1\n"
"wJf wa 1\n"
"cjW ch 1\n"
"wNx wa 1\n"
"wjR ij 1\n"
"wDd wa 1\n"
"lrB er 1\n"
"qhJ th 1\n"
"jKp ij 1\n"
"kNn an 1\n"
"tqU th 1\n"
"Jmj ij 1\n"
"bJv va 1\n"
"frN er 1\n"
"uBj qu 1\n"
"Uuv qu 1\n"
"Mzv sz 1\n"
"Djq qu 1\n"
"Qgl le 1\n"
"hdC th 1\n"
"mFh th 1\n"
"vjU ij 1\n"
"prX er 1\n"
"Kvc ch 1\n"
"ryY er 1\n"
"vzQ sz 1\n"
"Ojh th 1\n"
"Qfn an 1\n"
"Vqg ng 1\n"
"aQv an 1\n"
"hHx th 1\n"
"uIg ng 1\n"
"Kpv va 1\n"
"dQk ko 1\n"
"Ghq th 1\n"
"cZs ch 1\n"
"nvH an 1\n"
"jwJ ij 1\n"
"dMm de 1\n"
"gjI ng 1\n"
"lPg ng 1\n"
"qBs qu 1\n"
"Vhq th 1\n"
"qLt th 1\n"
"hBd th 1\n"
"Vcu ch 1\n"
"cQd ch 1\n"
"ypX pr 1\n"
"mQv va 1\n"
"vmR va 1\n"
"xfH fo 1\n"
"pqY qu 1\n"
"Xtb th 1\n"
"Vcx ch 1\n"
"tWb th 1\n"
"Pxa an 1\n"
"Qmr er 1\n"
"mdX de 1\n"
"Bxt th 1\n"
"jZv ij 1\n"
"hNp th 1\n"
"ybN be 1\n"
"bkZ ka 1\n"
"nVf an 1\n"
"lKq qu 1\n"
"oJj on 1\n"
"pBv va 1\n"
"hgA th 1\n"
"qxE qu 1\n"
"nvJ an 1\n"
"Xcf ch 1\n"
"Fdb de 1\n"
"zAo on 1\n"
"wQk ka 1\n"
"tmX th 1\n"
"pvZ va 1\n"
"fNw wa 1\n"
"zKk sz 1\n"
"hRx th 1\n"
"Tlj le 1\n"
"iQj in 1\n"
"jmU ij 1\n"
"tbW th 1\n"
"wVh th 1\n"
"Tvh th 1\n"
"nVg an 1\n"
"Lxp pr 1\n"
"vgO ng 1\n"
"dfE de 1\n"
"nVm an 1\n"
"qKy qu 1\n"
"eqZ qu 1\n"
"Tcc ch 1\n"
"cTk ch 1\n"
"fKz sz 1\n"
"Wkz sz 1\n"
"lvZ le 1\n"
"rGp er 1\n"
"kKz sz 1\n"
"Cbf be 1\n"
"jQd de 1\n"
"Zfc ch 1\n"
"hvX th 1\n"
"xgN ng 1\n"
"Kpe er 1\n"
"hzM th 1\n"
"jxZ ij 1\n"
"yqL qu 1\n"
"pgC ng 1\n"
"Fqd qu 1\n"
"tMb th 1\n"
"njQ an 1\n"
"tfB th 1\n"
"gjN ng 1\n"
"wNc ch 1\n"
"Pzj sz 1\n"
"mhO th 1\n"
"qUm qu 1\n"
"Fhh th 1\n"
"Sjd de 1\n"
"hWj th 1\n"
"yhL th 1\n"
"lGp le 1\n"
"dtX th 1\n"
"hwX th 1\n"
"srK er 1\n"
"vqE qu 1\n"
"bcO ch 1\n"
"xQl le 1\n"
"Qqf qu 1\n"
"kJg ng 1\n"
"pXz sz 1\n"
"yuJ qu 1\n"
"Gnp an 1\n"
"Dlc ch 1\n"
"Mxf fo 1\n"
"yNr er 1\n"
"bmV me 1\n"
"fXo on 1\n"
"mwW me 1\n"
"lIj le 1\n"
"Fvq qu 1\n"
"Utq th 1\n"
"jGk ij 1\n"
"wYw wa 1\n"
"wVm me 1\n"
"bTq qu 1\n"
"Ijp ij 1\n"
"znM an 1\n"
"xmO me 1\n"
"gQx ng 1\n"
"dKw de 1\n"
"dUf de 1\n"
"cSb ch 1\n"
"zVb sz 1\n"
"ccY ch 1\n"
"xjE ij 1\n"
"pYt th 1\n"
"Vrq qu 1\n"
"kzK sz 1\n"
"zfC sz 1\n"
"Ybh th 1\n"
"dgS ng 1\n"
"xcV ch 1\n"
"xNm me 1\n"
"Xkw ka 1\n"
"Tpw pr 1\n"
"Bwd de 1\n"
"hwT th 1\n"
"gQl ng 1\n"
"cDs ch 1\n"
"zYr er 1\n"
"xTp pr 1\n"
"qWm qu 1\n"
"xjT ij 1\n"
"hjK th 1\n"
"uDc ch 1\n"
"xhS th 1\n"
"bWd de 1\n"
"vCw va 1\n"
"jyB ij 1\n"
"uWd qu 1\n"
"Nnq qu 1\n"
"Qvb va 1\n"
"jzV sz 1\n"
"zBx sz 1\n"
"wIj ij 1\n"
"qRt th 1\n"
"qrJ qu 1\n"
"zZj sz 1\n"
"kRr er 1\n"
"Nzv sz 1\n"
"Qfw wa 1\n"
"Njt th 1\n"
"bFy be 1\n"
"lhY th 1\n"
"eWj er 1\n"
"jbM ij 1\n"
"Xsg ng 1\n"
"Rsd de 1\n"
"flF le 1\n"
"Phz th 1\n"
"xWs st 1\n"
"bCw wa 1\n"
"gfJ ng 1\n"
"qVo qu 1\n"
"eQh th 1\n"
"vcP ch 1\n"
"mDj ij 1\n"
"qTs qu 1\n"
"Xgs ng 1\n"
"Vuq qu 1\n"
"ufN qu 1\n"
"xBs st 1\n"
"pTk ka 1\n"
"fSq qu 1\n"
"mbD me 1\n"
"Vwz sz 1\n"
"hhQ th 1\n"
"kfP ka 1\n"
"Pwq qu 1\n"
"dhG th 1\n"
"qZj qu 1\n"
"yRj ij 1\n"
"yCs st 1\n"
"fjN ij 1\n"
"Rqg ng 1\n"
"jJh th 1\n"
"dlR le 1\n"
"Xmb me 1\n"
"Jjt th 1\n"
"gqI ng 1\n"
"fqM qu 1\n"
"iVg ng 1\n"
"Hgu ng 1\n"
"iHw in 1\n"
"eQv er 1\n"
"mzE sz 1\n"
"fjZ ij 1\n"
"qNn an 1\n"
"wlE le 1\n"
"kGp ka 1\n"
"Iqv qu 1\n"
"kBn an 1\n"
"xZd de 1\n"
"Dkc ch 1\n"
"zlH le 1\n"
"txB th 1\n"
"tQr th 1\n"
"uOx qu 1\n"
"pJi in 1\n"
"zbL sz 1\n"
"xkD ka 1\n"
"scV ch 1\n"
"qXh th 1\n"
"kIq qu 1\n"
"xNn an 1\n"
"gJf ng 1\n"
"tmB th 1\n"
"tcK th 1\n"
"kwZ ka 1\n"
"uZj qu 1\n"
"snQ an 1\n"
"uKq qu 1\n"
"crX ch 1\n"
"hXy th 1\n"
"Zcc ch 1\n"
"Pfz sz 1\n"
"dwM de 1\n"
"qIy qu 1\n"
"xuP qu 1\n"
"wDw wa 1\n"
"Hjr er 1\n"
"dQf de 1\n"
"wvJ wa 1\n"
"tHm th 1\n"
"Ydw de 1\n"
"wxI wa 1\n"
"pOv va 1\n"
"Wmq qu 1\n"
"dhD th 1\n"
"qpw qu 1\n"
"bmC me 1\n"
"wcX ch 1\n"
"wjH ij 1\n"
"bWf be 1\n"
"Gdp de 1\n"
"Ldw de 1\n"
"Sbq qu 1\n"
"vZv va 1\n"
"Kwb wa 1\n"
"qhT th 1\n"
"yRf ny 1\n"
"hwC th 1\n"
"npJ an 1\n"
"jmV ij 1\n"
"vGg ng 1\n"
"xqF qu 1\n"
"Phm th 1\n"
"pWc ch 1\n"
"Vxk ka 1\n"
"sHz st 1\n"
"Wbx be 1\n"
"bfK be 1\n"
"Jgl ng 1\n"
"kTb ka 1\n"
"Kbf be 1\n"
"kzC sz 1\n"
"pKq qu 1\n"
"zwB sz 1\n"
"uZg ng 1\n"
"btI th 1\n"
"zXj sz 1\n"
"uzS qu 1\n"
"vWk ka 1\n"
"xrH er 1\n"
"oQc ch 1\n"
"zlT le 1\n"
"dfI de 1\n"
"Qmf me 1\n"
"sgE ng 1\n"
"Ysx st 1\n"
"Rzd de 1\n"
"xLd de 1\n"
"qsX qu 1\n"
"kqJ qu 1\n"
"kCm ka 1\n"
"bFm me 1\n"
"igQ ng 1\n"
"sRq qu 1\n"
"jGm ij 1\n"
"Szs st 1\n"
"Yvz sz 1\n"
"kXz sz 1\n"
"Gnz an 1\n"
"mWc ch 1\n"
"tDq th 1\n"
"gqz ng 1\n"
"nHb ng 1\n"
"tdM th 1\n"
"Ovx va 1\n"
"Znl an 1\n"
"wuE qu 1\n"
"zLt th 1\n"
"ofQ on 1\n"
"vYj ij 1\n"
"jyH ij 1\n"
"zqA qu 1\n"
"cJy ch 1\n"
"Wbf be 1\n"
"lTt th 1\n"
"klW le 1\n"
"Xxa an 1\n"
"fCz sz 1\n"
"lKf le 1\n"
"qwT qu 1\n"
"rHk er 1\n"
"dbN de 1\n"
"uUy qu 1\n"
"zgN ng 1\n"
"Pxg ng 1\n"
"pNc ch 1\n"
"cyJ ch 1\n"
"jpH ij 1\n"
"Vtf th 1\n"
"sjJ st 1\n"
"Qlh th 1\n"
"twV th 1\n"
"yGq qu 1\n"
"tVp th 1\n"
"ksQ st 1\n"
"xnT an 1\n"
"rpJ er 1\n"
"wzI sz 1\n"
"Zhp th 1\n"
"aDf an 1\n"
"Uxj ij 1\n"
"cPg ch 1\n"
"qSq qu 1\n"
"mKq qu 1\n"
"vBz sz 1\n"
"yPj ij 1\n"
"Vkz sz 1\n"
"qiB qu 1\n"
"tkJ th 1\n"
"Ouq qu 1\n"
"zoH on 1\n"
"qVt th 1\n"
"Gxs st 1\n"
"jzF sz 1\n"
"swH st 1\n"
"nBb an 1\n"
"zhQ th 1\n"
"yRn an 1\n"
"fnX an 1\n"
"qoQ qu 1\n"
"mxP me 1\n"
"bwR wa 1\n"
"gJj ng 1\n"
"qnk an 1\n"
"tMk th 1\n"
"dxO de 1\n"
"rzV er 1\n"
"vpP va 1\n"
"Nvz sz 1\n"
"Nfp pr 1\n"
"Cnz an 1\n"
"oTd on 1\n"
"dqG qu 1\n"
"Hmx me 1\n"
"psX st 1\n"
"swM st 1\n"
"dqC qu 1\n"
"Vwx wa 1\n"
"nXf an 1\n"
"wkY ka 1\n"
"wfC wa 1\n"
"qSr qu 1\n"
"qVc ch 1\n"
"kDn an 1\n"
"Yvb va 1\n"
"zqH qu 1\n"
"qxJ qu 1\n"
"zKj sz 1\n"
"jcN ch 1\n"
"tWk th 1\n"
"Rrz er 1\n"
"bmG me 1\n"
"srZ er 1\n"
"wWq qu 1\n"
"Cfh th 1\n"
"lNt th 1\n"
"hcV th 1\n"
"Znf an 1\n"
"Jhv th 1\n"
"qIp qu 1\n"
"vSz sz 1\n"
"feU er 1\n"
"xIi in 1\n"
"Zmq qu 1\n"
"eGf er 1\n"
"bQk ka 1\n"
"Xcb ch 1\n"
"nlK an 1\n"
"tmJ th 1\n"
"jlL le 1\n"
"mwC me 1\n"
"qjr qu 1\n"
"zBb sz 1\n"
"fhU th 1\n"
"sPq qu 1\n"
"sBf st 1\n"
"uXy qu 1\n"
"Lkx ka 1\n"
"rGz er 1\n"
"hXz th 1\n"
"zuW qu 1\n"
"Rvx va 1\n"
"bcJ ch 1\n"
"Eoj on 1\n"
"iVt in 1\n"
"yhH th 1\n"
"xVv va 1\n"
"pMr er 1\n"
"vZd de 1\n"
"Vvn an 1\n"
"iCv in 1\n"
"vQp va 1\n"
"vlB le 1\n"
"wVt th 1\n"
"Ugk ng 1\n"
"ktQ th 1\n"
"jCr er 1\n"
"qvz qu 1\n"
"bVf be 1\n"
"rPv er 1\n"
"wfH wa 1\n"
"hbU th 1\n"
"pjF ij 1\n"
"oXg ng 1\n"
"zSr er 1\n"
"wRb wa 1\n"
"Hcu ch 1\n"
"yxJ ny 1\n"
"lTc ch 1\n"
"bYb be 1\n"
"Wxz sz 1\n"
"vrE er 1\n"
"zGy sz 1\n"
"Jqm qu 1\n"
"rzI er 1\n"
"xgV gi 1\n"
"Rvw va 1\n"
"Vnx an 1\n"
"uJg ng 1\n"
"hFq th 1\n"
"Tgz ng 1\n"
"aQc an 1\n"
"xzJ sz 1\n"
"tNc th 1\n"
"jfA ij 1\n"
"ycO ch 1\n"
"Wkj ij 1\n"
"yBp pr 1\n"
"hgD th 1\n"
"iSx in 1\n"
"xCm me 1\n"
"yjX ij 1\n"
"uIh th 1\n"
"qgq ng 1\n"
"Tzj sz 1\n"
"yjO ij 1\n"
"yrY er 1\n"
"bmZ me 1\n"
"zqT qu 1\n"
"mBd de 1\n"
"qvK qu 1\n"
"zcA ch 1\n"
"xrX er 1\n"
"mJm me 1\n"
"Xqf qu 1\n"
"Pxk ka 1\n"
"aDb an 1\n"
"qXg ng 1\n"
"eGw er 1\n"
"hjD th 1\n"
"tTx th 1\n"
"oMd on 1\n"
"fKg ng 1\n"
"Npn an 1\n"
"kqU qu 1\n"
"lbF le 1\n"
"Hvj ij 1\n"
"qZe qu 1\n"
"lQj le 1\n"
"dkY de 1\n"
"dZl le 1\n"
"zZh th 1\n"
"qyM qu 1\n"
"dmJ de 1\n"
"kfK ka 1\n"
"iPq qu 1\n"
"zwU sz 1\n"
"pvS va 1\n"
"ihJ th 1\n"
"ucW ch 1\n"
"Jjz sz 1\n"
"mMd de 1\n"
"vpw va 1\n"
"xCg ng 1\n"
"hKs th 1\n"
"vlI le 1\n"
"Nmc ch 1\n"
"xzV sz 1\n"
"gZs ng 1\n"
"rRp er 1\n"
"Ufd de 1\n"
"fpF pr 1\n"
"fwY wa 1\n"
"Gxr er 1\n"
"xLr er 1\n"
"vzE sz 1\n"
"jRf ij 1\n"
"brR er 1\n"
"gkZ ng 1\n"
"dUy de 1\n"
"Xji in 1\n"
"Kdb de 1\n"
"jpC ij 1\n"
"oUj on 1\n"
"qmh th 1\n"
"qjL qu 1\n"
"wRs sz 1\n"
"jhM th 1\n"
"Rhr th 1\n"
"btN th 1\n"
"Pjq ij 1\n"
"xwU wa 1\n"
"qyE qu 1\n"
"Jxd de 1\n"
"Pqr qu 1\n"
"lRd le 1\n"
"jqI qu 1\n"
"qFs qu 1\n"
"Mwk ka 1\n"
"jEb ij 1\n"
"Nxy ny 1\n"
"Pzm sz 1\n"
"tfL th 1\n"
"vFc ch 1\n"
"jQg ng 1\n"
"Bnx an 1\n"
"lMv le 1\n"
"tKq th 1\n"
"eVq qu 1\n"
"Tyq qu 1\n"
"drJ er 1\n"
"oHw on 1\n"
"lFk le 1\n"
"jpW ij 1\n"
"Qjw ij 1\n"
"cNx ch 1\n"
"Bhz th 1\n"
"bhB th 1\n"
"pDx pr 1\n"
"xpY pr 1\n"
"tnH th 1\n"
"dfL de 1\n"
"hzL th 1\n"
"zNk sz 1\n"
"lBm le 1\n"
"lXl le 1\n"
"yPv va 1\n"
"Zcl ch 1\n"
"hMq th 1\n"
"rJj ri 1\n"
"aXw an 1\n"
"zsQ sz 1\n"
"cQm ch 1\n"
"Sqc ch 1\n"
"tKm th 1\n"
"hvO th 1\n"
"hGd th 1\n"
"Wbn an 1\n"
"vCf va 1\n"
"lGg ng 1\n"
"vDh th 1\n"
"wDq qu 1\n"
"xRy ny 1\n"
"vXi in 1\n"
"qiQ qu 1\n"
"cFs ch 1\n"
"Lhp th 1\n"
"xEp pr 1\n"
"fQt th 1\n"
"cJv ch 1\n"
"lzO le 1\n"
"Fxk ka 1\n"
"tDd th 1\n"
"Xnx an 1\n"
"txC th 1\n"
"tGb th 1\n"
"zvG sz 1\n"
"gpC ng 1\n"
"pxD pr 1\n"
"Zfp pr 1\n"
"oWt th 1\n"
"vvV va 1\n"
"Gwf wa 1\n"
"Ycv ch 1\n"
"gcZ ch 1\n"
"mMw me 1\n"
"yQl le 1\n"
"uGp qu 1\n"
"lNj le 1\n"
"Ycm ch 1\n"
"vIx va 1\n"
"yLp pr 1\n"
"mRx me 1\n"
"nrK an 1\n"
"Zyh th 1\n"
"Nct th 1\n"
"Qml le 1\n"
"zPd de 1\n"
"dWq qu 1\n"
"Egx ng 1\n"
"vNs st 1\n"
"sNl le 1\n"
"pdW de 1\n"
"Snh th 1\n"
"yrP er 1\n"
"fJl le 1\n"
"tVg th 1\n"
"jvC ij 1\n"
"yhN th 1\n"
"qdC qu 1\n"
"pmT me 1\n"
"Lbg ng 1\n"
"xpJ pr 1\n"
"mYt th 1\n"
"bwV wa 1\n"
"wjD ij 1\n"
"fqC qu 1\n"
"xUf fo 1\n"
"dhU th 1\n"
"bZb be 1\n"
"twD th 1\n"
"bbM be 1\n"
"hgC th 1\n"
"dKb de 1\n"
"vJm va 1\n"
"wEq qu 1\n"
"Ofq qu 1\n"
"cXl ch 1\n"
"wpV pr 1\n"
"tqM th 1\n"
"pUf pr 1\n"
"Twx wa 1\n"
"Mgq ng 1\n"
"vQo on 1\n"
"yjT ij 1\n"
"aVd an 1\n"
"eHp er 1\n"
"vGv va 1\n"
"srG er 1\n"
"qVb qu 1\n"
"tlM th 1\n"
"nrT an 1\n"
"zRh th 1\n"
"cLr ch 1\n"
"lrH er 1\n"
"wTl le 1\n"
"cvI ch 1\n"
"kqN qu 1\n"
"Ixp pr 1\n"
"xeQ er 1\n"
"cNy ch 1\n"
"kRh th 1\n"
"ruY qu 1\n"
"Xcq ch 1\n"
"Kzb bi 1\n"
"Wxh th 1\n"
"pjM ij 1\n"
"jdO de 1\n"
"Jfy ny 1\n"
"bVz sz 1\n"
"dQo on 1\n"
"ncQ an 1\n"
"pVw pr 1\n"
"Sxj ij 1\n"
"Ubp pr 1\n"
"wvC va 1\n"
"khG th 1\n"
"cqF ch 1\n"
"Nxj ij 1\n"
"wDm me 1\n"
"yDd de 1\n"
"iyI in 1\n"
"eXq qu 1\n"
"hqP th 1\n"
"Kxr er 1\n"
"vsY st 1\n"
"Twb wa 1\n"
"fqw qu 1\n"
"wmC me 1\n"
"vFx va 1\n"
"vnC an 1\n"
"nWq an 1\n"
"hzB th 1\n"
"Kfk ka 1\n"
"tQe th 1\n"
"juW qu 1\n"
"qlX qu 1\n"
"hGw th 1\n"
"Oqd qu 1\n"
"Npw pr 1\n"
"hgW th 1\n"
"fxM fo 1\n"
"jSy ij 1\n"
"fJt th 1\n"
"mjG ij 1\n"
"tgV th 1\n"
"Ogx ng 1\n"
"Hbx be 1\n"
"Ljl le 1\n"
"ivZ in 1\n"
"bmY me 1\n"
"Qfp pr 1\n"
"wfQ wa 1\n"
"hCg th 1\n"
"vuU qu 1\n"
"ydZ de 1\n"
"vVk ka 1\n"
"mZf me 1\n"
"lOq qu 1\n"
"qIv qu 1\n"
"xZb be 1\n"
"xqk qu 1\n"
"Wmy me 1\n"
"Jqi qu 1\n"
"cxL ch 1\n"
"Ztq th 1\n"
"tdT th 1\n"
"uWt th 1\n"
"xGz sz 1\n"
"Wwk ka 1\n"
"pBk ka 1\n"
"yqg ng 1\n"
"cYl ch 1\n"
"ynW an 1\n"
"wyJ wa 1\n"
"qGy qu 1\n"
"fNp pr 1\n"
"hFs th 1\n"
"Yxu qu 1\n"
"kvJ ka 1\n"
"Fxz sz 1\n"
"twG th 1\n"
"qvG qu 1\n"
"vRp va 1\n"
"Qqi qu 1\n"
"gzE ng 1\n"
"pNl le 1\n"
"zpW sz 1\n"
"dcP ch 1\n"
"cPx ch 1\n"
"wcQ ch 1\n"
"pQc ch 1\n"
"qyF qu 1\n"
"zcX ch 1\n"
"wqk qu 1\n"
"kmY ka 1\n"
"qlG qu 1\n"
"xEz sz 1\n"
"pqV qu 1\n"
"Ohp th 1\n"
"xdM de 1\n"
"fLp pr 1\n"
"qAe qu 1\n"
"Xwv va 1\n"
"Lzi in 1\n"
"qOk qu 1\n"
"cXn an 1\n"
"Kds de 1\n"
"gvU ng 1\n"
"fPk ka 1\n"
"nZr an 1\n"
"Hxq qu 1\n"
"fCm me 1\n"
"qfD qu 1\n"
"Wfv va 1\n"
"qfb qu 1\n"
"jqC qu 1\n"
"fuX qu 1\n"
"qfA qu 1\n"
"Rlt th 1\n"
"xjD ij 1\n"
"wtF th 1\n"
"Xmz sz 1\n"
"pWp pr 1\n"
"Qxv va 1\n"
"zVf sz 1\n"
"gmZ ng 1\n"
"qdU qu 1\n"
"jqV qu 1\n"
"gXc ch 1\n"
"qmK qu 1\n"
"Gfj ij 1\n"
"cQr ch 1\n"
"Yhr th 1\n"
"vvS va 1\n"
"uDb qu 1\n"
"cdB ch 1\n"
"bvE va 1\n"
"xvS va 1\n"
"jRq qu 1\n"
"rvD er 1\n"
"Xyy ny 1\n"
"Jfi in 1\n"
"aBw an 1\n"
"nWc an 1\n"
"xBq qu 1\n"
"kgY ng 1\n"
"bGb bi 1\n"
"gjE ng 1\n"
"Rlw le 1\n"
"wrT er 1\n"
"bQr er 1\n"
"ljY le 1\n"
"qvU qu 1\n"
"fKm me 1\n"
"pTt th 1\n"
"zTw sz 1\n"
"qnV an 1\n"
"rWx er 1\n"
"nWd an 1\n"
"nKf an 1\n"
"kMf ka 1\n"
"fkG ka 1\n"
"bwX wa 1\n"
"cwV ch 1\n"
"uwK qu 1\n"
"rLv er 1\n"
"zMb sz 1\n"
"zpZ sz 1\n"
"rMq qu 1\n"
"Ttj th 1\n"
"gvO ng 1\n"
"Jcz ch 1\n"
"Cyx ny 1\n"
"njX an 1\n"
"aVx an 1\n"
"qXn an 1\n"
"Uqs qu 1\n"
"dVz de 1\n"
"Rcp ch 1\n"
"eKg ng 1\n"
"Xzn in 1\n"
"vyF va 1\n"
"Klc ch 1\n"
"xdI de 1\n"
"Hqb qu 1\n"
"xEe er 1\n"
"qpI qu 1\n"
"gDx ng 1\n"
"Jhf th 1\n"
"quK un 1\n"
"vgU ng 1\n"
"rWv er 1\n"
"Pnm an 1\n"
"nLm an 1\n"
"Bhj th 1\n"
"bPt th 1\n"
"jpI ij 1\n"
"tLz th 1\n"
"vpS va 1\n"
"Fxj ij 1\n"
"qDs qu 1\n"
"wzM sz 1\n"
"gwJ ng 1\n"
"zBw sz 1\n"
"qGv qu 1\n"
"rLh th 1\n"
"Bjl le 1\n"
"hfH th 1\n"
"clW ch 1\n"
"Rgk ng 1\n"
"Gsg ng 1\n"
"Uvx va 1\n"
"Qgv ng 1\n"
"gfX ng 1\n"
"rQv er 1\n"
"xvG va 1\n"
"kjx ij 1\n"
"dGf de 1\n"
"fcA ch 1\n"
"Ehq th 1\n"
"zBz sz 1\n"
"Gpk ka 1\n"
"tBv th 1\n"
"Xfg ng 1\n"
"yJm me 1\n"
"sqT qu 1\n"
"prY er 1\n"
"Dqo qu 1\n"
"Jzg ng 1\n"
"qMp qu 1\n"
"yfM ny 1\n"
"Gxf fo 1\n"
"wzP sz 1\n"
"zNm sz 1\n"
"wKg ng 1\n"
"Rrd er 1\n"
"Hvw va 1\n"
"gfD ng 1\n"
"Wmz sz 1\n"
"cJn an 1\n"
"nTf an 1\n"
"uvW qu 1\n"
"uPf qu 1\n"
"vwR va 1\n"
"bMf be 1\n"
"wIu qu 1\n"
"kxY ka 1\n"
"gZk ng 1\n"
"qFd qu 1\n"
"bMl le 1\n"
"wHl le 1\n"
"wVg ng 1\n"
"wlX le 1\n"
"fsL st 1\n"
"pRf pr 1\n"
"zsX st 1\n"
"qBk qu 1\n"
"Xzp sz 1\n"
"jdR de 1\n"
"Zlz le 1\n"
"Wfc ch 1\n"
"Rjv ij 1\n"
"vFz sz 1\n"
"tkV th 1\n"
"Xbw wa 1\n"
"xQc ch 1\n"
"Kxy ny 1\n"
"xCv va 1\n"
"nqV an 1\n"
"Wwx wa 1\n"
"kdW de 1\n"
"pkI ka 1\n"
"ohS th 1\n"
"Zdc ch 1\n"
"mCg ng 1\n"
"sxL st 1\n"
"Qrx er 1\n"
"qXw qu 1\n"
"wqQ qu 1\n"
"ijK in 1\n"
"sFz st 1\n"
"Hlw le 1\n"
"Gqn an 1\n"
"xPk ka 1\n"
"wZq qu 1\n"
"jqm qu 1\n"
"Lzp sz 1\n"
"Bdz de 1\n"
"wQl le 1\n"
"wtJ th 1\n"
"Uyi in 1\n"
"Wcy ch 1\n"
"wqH qu 1\n"
"Bns an 1\n"
"cDt th 1\n"
"xJv va 1\n"
"Wfz sz 1\n"
"xhP th 1\n"
"cWp ch 1\n"
"rqZ qu 1\n"
"bkB ka 1\n"
"Wtl th 1\n"
"gzf ng 1\n"
"bMr er 1\n"
"pxN pr 1\n"
"vhV th 1\n"
"kqX qu 1\n"
"Kdq qu 1\n"
"vQl le 1\n"
"ykC ka 1\n"
"zMh th 1\n"
"Eqz qu 1\n"
"lXq qu 1\n"
"zmZ sz 1\n"
"qpB qu 1\n"
"vGj ij 1\n"
"Tjx zj 1\n"
"tvK th 1\n"
"gYc ch 1\n"
"lFc ch 1\n"
"iJt th 1\n"
"Pkx ka 1\n"
"cDv ch 1\n"
"Yyd de 1\n"
"Vcq ch 1\n"
"Xhq th 1\n"
"zNf sz 1\n"
"vcD ch 1\n"
"bnW an 1\n"
"uvQ qu 1\n"
"Zzj sz 1\n"
"gPj ng 1\n"
"jwD ij 1\n"
"jpO ij 1\n"
"bDx be 1\n"
"vEi in 1\n"
"Zct th 1\n"
"wrX er 1\n"
"dhS th 1\n"
"zjJ sz 1\n"
"dDk de 1\n"
"srJ er 1\n"
"aWg an 1\n"
"mvJ va 1\n"
"Ytc th 1\n"
"jiQ in 1\n"
"tFz th 1\n"
"sJl le 1\n"
"vZq qu 1\n"
"xUd de 1\n"
"oqB qu 1\n"
"xDh th 1\n"
"hfE th 1\n"
"mSb me 1\n"
"jmR ij 1\n"
"rFp er 1\n"
"Xjy ij 1\n"
"bPp pr 1\n"
"iqQ ti 1\n"
"mfq qu 1\n"
"txL th 1\n"
"jBd de 1\n"
"Xvq qu 1\n"
"dvY de 1\n"
"sdM de 1\n"
"xgY ng 1\n"
"rYh th 1\n"
"vlA le 1\n"
"pFb pr 1\n"
"yFz sz 1\n"
"gcK ch 1\n"
"xfZ fo 1\n"
"jDc ch 1\n"
"yNv va 1\n"
"tKt th 1\n"
"wtU th 1\n"
"bHk ka 1\n"
"qCw qu 1\n"
"Zca an 1\n"
"kDw ka 1\n"
"Ywc ch 1\n"
"pXs st 1\n"
"yMm me 1\n"
"Gwq qu 1\n"
"mYv va 1\n"
"wCx wa 1\n"
"jZx ij 1\n"
"oQd on 1\n"
"Fzk sz 1\n"
"lwF le 1\n"
"Xzk sz 1\n"
"Njx ij 1\n"
"yoI on 1\n"
"sJm st 1\n"
"wKk ka 1\n"
"Qth ch 1\n"
"Llz le 1\n"
"gVf gi 1\n"
"pPq qu 1\n"
"lGy le 1\n"
"gzR ng 1\n"
"rXg ng 1\n"
"Npf pr 1\n"
"wvR va 1\n"
"yXs st 1\n"
"mMl li 1\n"
"bYx be 1\n"
"fzZ sz 1\n"
"vrG er 1\n"
"Kdk de 1\n"
"yqw qu 1\n"
"Lkq qu 1\n"
"jKs st 1\n"
"Zqx qu 1\n"
"Pfm me 1\n"
"rlW er 1\n"
"hPv th 1\n"
"Ojx ij 1\n"
"Gtq th 1\n"
"vtJ th 1\n"
"Wly le 1\n"
"yHd de 1\n"
"kQb ka 1\n"
"Ldc de 1\n"
"sUx st 1\n"
"cJg ch 1\n"
"fLd de 1\n"
"Mjq qu 1\n"
"Cjm ij 1\n"
"awX an 1\n"
"Gtl th 1\n"
"wzN sz 1\n"
"bqx qu 1\n"
"fAq qu 1\n"
"ezX er 1\n"
"cBx ch 1\n"
"csX ch 1\n"
"cUf ch 1\n"
"qsJ qu 1\n"
"hsZ th 1\n"
"qzg ng 1\n"
"Qgk ng 1\n"
"Nxg ng 1\n"
"Hqa an 1\n"
"rXl er 1\n"
"nlP an 1\n"
"aVg an 1\n"
"yhG th 1\n"
"kfA ka 1\n"
"Vmk mG 1\n"
"jKm ij 1\n"
"hPd th 1\n"
"aPd an 1\n"
"bYy be 1\n"
"bnZ an 1\n"
"Gsj st 1\n"
"kxQ ka 1\n"
"vkF ka 1\n"
"jzS sz 1\n"
"fWm me 1\n"
"Qcu ch 1\n"
"rZf er 1\n"
"jbZ ij 1\n"
"aQj an 1\n"
"bzO sz 1\n"
"fZq qu 1\n"
"lrN er 1\n"
"fkL ka 1\n"
"Dqv qu 1\n"
"zkC sz 1\n"
"sLw st 1\n"
"Nvr er 1\n"
"Nby be 1\n"
"eMh th 1\n"
"wFc ch 1\n"
"Cxz sz 1\n"
"iZp in 1\n"
"dvZ de 1\n"
"vIh th 1\n"
"qCl qu 1\n"
"Pzo on 1\n"
"vNq qu 1\n"
"zqK qu 1\n"
"Lmx me 1\n"
"xVt th 1\n"
"glD ng 1\n"
"Gbf be 1\n"
"Jvq qu 1\n"
"zFw sz 1\n"
"tMq th 1\n"
"vkJ ka 1\n"
"Sxu qu 1\n"
"afU an 1\n"
"mHb me 1\n"
"jxU ij 1\n"
"cJl ch 1\n"
"uqE qu 1\n"
"Nqq qu 1\n"
"xGt th 1\n"
"czG ch 1\n"
"Kfg ng 1\n"
"zWh th 1\n"
"yXm me 1\n"
"fnD an 1\n"
"Jrd er 1\n"
"oxZ on 1\n"
"hXn th 1\n"
"fqI qu 1\n"
"wAo on 1\n"
"iGk in 1\n"
"xEw wa 1\n"
"fVq qu 1\n"
"ytU th 1\n"
"bhG th 1\n"
"oQz on 1\n"
"pgO ng 1\n"
"Yqm qu 1\n"
"bJi in 1\n"
"kcV ch 1\n"
"knM an 1\n"
"Cwr er 1\n"
"Wgd ng 1\n"
"bpT pr 1\n"
"Jdj de 1\n"
"Nbq qu 1\n"
"twJ th 1\n"
"Qep er 1\n"
"Kdc ch 1\n"
"kQq qu 1\n"
"rPq qu 1\n"
"lWp le 1\n"
"Fbq qu 1\n"
"bVk ka 1\n"
"zlI le 1\n"
"Bzp sz 1\n"
"jfK ij 1\n"
"Yvm va 1\n"
"Ftm th 1\n"
"aMj an 1\n"
"zzV sz 1\n"
"zOa an 1\n"
"mHc ch 1\n"
"xWn an 1\n"
"fFh th 1\n"
"sDv st 1\n"
"vmD va 1\n"
"xjL ij 1\n"
"iBq qu 1\n"
"jqT qu 1\n"
"hsR th 1\n"
"Qxo on 1\n"
"jsG st 1\n"
"cXb ch 1\n"
"Ybj ij 1\n"
"xeJ er 1\n"
"oPq qu 1\n"
"yXt th 1\n"
"xvL va 1\n"
"jcF ch 1\n"
"kFb ka 1\n"
"jXv ij 1\n"
"Aox on 1\n"
"zkQ sz 1\n"
"fPd de 1\n"
"Fvx va 1\n"
"fbX be 1\n"
"oCf on 1\n"
"Yjd de 1\n"
"Ppf pr 1\n"
"Njs st 1\n"
"cZh th 1\n"
"vnG an 1\n"
"cwJ cm 1\n"
"qJl qu 1\n"
"gNf ng 1\n"
"Tfv va 1\n"
"vwK va 1\n"
"Zcs ch 1\n"
"eBv er 1\n"
"qLf qu 1\n"
"Yqt th 1\n"
"crD ch 1\n"
"Icj ch 1\n"
"qBl qu 1\n"
"gzX ng 1\n"
"ujF qu 1\n"
"vxU va 1\n"
"kZt th 1\n"
"Ldh th 1\n"
"bfM be 1\n"
"mQm QO 1\n"
"zlQ le 1\n"
"jbU ij 1\n"
"Kvz sz 1\n"
"Uxw wa 1\n"
"pjS ij 1\n"
"Xvv va 1\n"
"kjI ij 1\n"
"cYi ch 1\n"
"nJn an 1\n"
"Qxz sz 1\n"
"aNw an 1\n"
"Jfp pr 1\n"
"bNz sz 1\n"
"xdQ de 1\n"
"Bzk sz 1\n"
"qZz qu 1\n"
"Ycp ch 1\n"
"pGs st 1\n"
"kCf ka 1\n"
"gwP ng 1\n"
"wbV wa 1\n"
"Eqt eq 1\n"
"Xhn th 1\n"
"oUf on 1\n"
"dKc ch 1\n"
"sxN st 1\n"
"Ofz sz 1\n"
"gCp ng 1\n"
"bhI th 1\n"
"hgU th 1\n"
"knU an 1\n"
"kjT ij 1\n"
"fsZ st 1\n"
"lGv le 1\n"
"wMd de 1\n"
"ukQ qu 1\n"
"Ghk th 1\n"
"kRw ka 1\n"
"zRc ch 1\n"
"gwK ng 1\n"
"vJp va 1\n"
"tVc th 1\n"
"pqT qu 1\n"
"iYl in 1\n"
"xLv va 1\n"
"Xdq qu 1\n"
"zcO ch 1\n"
"plM le 1\n"
"bDz sz 1\n"
"Nmx me 1\n"
"dKv de 1\n"
"hPk th 1\n"
"Tjy ij 1\n"
"wYs st 1\n"
"nfJ an 1\n"
"tfC th 1\n"
"zJt th 1\n"
"lKp le 1\n"
"Iyc ch 1\n"
"xuB qu 1\n"
"eKx er 1\n"
"sZf st 1\n"
"zpQ sz 1\n"
"sfL st 1\n"
"mjT ij 1\n"
"zXw sz 1\n"
"yKt th 1\n"
"rwV er 1\n"
"pjB ij 1\n"
"qYb qu 1\n"
"bYz sz 1\n"
"qqY eq 1\n"
"uIf qu 1\n"
"jTc ch 1\n"
"sqC qu 1\n"
"uJc ch 1\n"
"dGx de 1\n"
"swF st 1\n"
"Hfn an 1\n"
"Htb th 1\n"
"pfW hW 1\n"
"iyG in 1\n"
"zPc ch 1\n"
"yzV sz 1\n"
"pVz sz 1\n"
"sPg ng 1\n"
"fKj ij 1\n"
"eFb er 1\n"
"Qji jS 1\n"
"mtH th 1\n"
"wgZ ng 1\n"
"hHd th 1\n"
"fTt th 1\n"
"gxZ ng 1\n"
"Ktg th 1\n"
"hWd th 1\n"
"fWq qu 1\n"
"wSv va 1\n"
"Fzn an 1\n"
"ghH th 1\n"
"npW an 1\n"
"jvP ij 1\n"
"uYk qu 1\n"
"Uxn an 1\n"
"Sqg ng 1\n"
"zcJ ch 1\n"
"dMr er 1\n"
"Zgc ch 1\n"
"qGp qu 1\n"
"oVq qu 1\n"
"oUa an 1\n"
"oqV qu 1\n"
"jGs st 1\n"
"Ybq qu 1\n"
"qRf qu 1\n"
"brZ er 1\n"
"qTv qu 1\n"
"wZf wa 1\n"
"gOj ng 1\n"
"Jji in 1\n"
"Ppx pr 1\n"
"qwB qu 1\n"
"qcJ ch 1\n"
"fFz sz 1\n"
"wwY wa 1\n"
"kTc ch 1\n"
"uGn an 1\n"
"eQq qu 1\n"
"qGk qu 1\n"
"dpV de 1\n"
"vTm va 1\n"
"Ojq qu 1\n"
"dpX de 1\n"
"bYf be 1\n"
"tjV th 1\n"
"Lzn LG 1\n"
"Yjm ij 1\n"
"uYw qu 1\n"
"Zdg ng 1\n"
"hXs th 1\n"
"Iwp pr 1\n"
"hJw th 1\n"
"Tfd de 1\n"
"cxO ch 1\n"
"Qqy qu 1\n"
"lDv le 1\n"
"zsO st 1\n"
"mrG er 1\n"
"cjJ ch 1\n"
"dgD ng 1\n"
"cUw ch 1\n"
"zdB de 1\n"
"jlU le 1\n"
"bBf be 1\n"
"qbJ qu 1\n"
"qlR qu 1\n"
"cWc ch 1\n"
"Xgb ng 1\n"
"zrU er 1\n"
"bgI ng 1\n"
"wjJ ij 1\n"
"mvU va 1\n"
"rCp GC 1\n"
"nVx an 1\n"
"xbG be 1\n"
"tdN th 1\n"
"yjR ij 1\n"
"wQj ij 1\n"
"xzZ sz 1\n"
"qUk qu 1\n"
"xjY ij 1\n"
"Jxz sz 1\n"
"xZs st 1\n"
"vZx va 1\n"
"lRs le 1\n"
"vwp va 1\n"
"wpj ij 1\n"
"swS st 1\n"
"Eqx qu 1\n"
"vEw va 1\n"
"tkQ th 1\n"
"vgX ng 1\n"
"Rwb wa 1\n"
"sjW st 1\n"
"dXm de 1\n"
"fvY vK 1\n"
"lrO er 1\n"
"Ldx de 1\n"
"cxV ch 1\n"
"qFh th 1\n"
"qVw qu 1\n"
"Pyf ny 1\n"
"Kxz sz 1\n"
"hwJ th 1\n"
"cpL ch 1\n"
"Hge ng 1\n"
"Wbh th 1\n"
"lQq qu 1\n"
"hDl th 1\n"
"Zph th 1\n"
"wZj ij 1\n"
"Zqt th 1\n"
"xmU me 1\n"
"tUf th 1\n"
"qWo qu 1\n"
"Lrd er 1\n"
"pQs st 1\n"
"rZv er 1\n"
"mjI ij 1\n"
"xQy ny 1\n"
"vGy va 1\n"
"jwY ij 1\n"
"cNn an 1\n"
"zpP sz 1\n"
"vKd de 1\n"
"wVk ka 1\n"
"tMh ch 1\n"
"Ktd th 1\n"
"tpG th 1\n"
"iDf in 1\n"
"qKl qu 1\n"
"jLc ch 1\n"
"Jjl le 1\n"
"hcQ th 1\n"
"Tqg qu 1\n"
"bGk ka 1\n"
"jxV ij 1\n"
"fcC ch 1\n"
"Fwx wa 1\n"
"qPy qu 1\n"
"jmE ij 1\n"
"xmT me 1\n"
"lxC GC 1\n"
"lRr er 1\n"
"Qkl le 1\n"
"ihF th 1\n"
"Llt th 1\n"
"Kqe qu 1\n"
"Hhf th 1\n"
"nPq an 1\n"
"zvQ QO 1\n"
"jGy ij 1\n"
"lMk le 1\n"
"uOj qu 1\n"
"fdT de 1\n"
"qvH qu 1\n"
"pcZ ch 1\n"
"qkc ch 1\n"
"cbJ ch 1\n"
"gfK ng 1\n"
"pMt th 1\n"
"vpF va 1\n"
"dgP ng 1\n"
"mxF me 1\n"
"rZp er 1\n"
"cGd ch 1\n"
"sPx st 1\n"
"rGd er 1\n"
"gbQ ng 1\n"
"Dfz sz 1\n"
"sjC st 1\n"
"zSx sz 1\n"
"qIo qu 1\n"
"dIw de 1\n"
"kpF ka 1\n"
"eUw er 1\n"
"Hxc ch 1\n"
"yvG va 1\n"
"vUf va 1\n"
"fjF ij 1\n"
"kLq qu 1\n"
"Zjt th 1\n"
"fLq qu 1\n"
"ydS de 1\n"
"zwK sz 1\n"
"hHy th 1\n"
"Ssw st 1\n"
"hjG th 1\n"
"Ddp de 1\n"
"bPs st 1\n"
"Wpq qu 1\n"
"crW ch 1\n"
"Xpj ij 1\n"
"oXr er 1\n"
"vjK ij 1\n"
"Vzf sz 1\n"
"lYd le 1\n"
"Odx de 1\n"
"hVt th 1\n"
"gRc ch 1\n"
"Ztf th 1\n"
"hVj th 1\n"
"Jjf ij 1\n"
"jFb ij 1\n"
"Lhf th 1\n"
"jlO le 1\n"
"jvB ij 1\n"
"gbN ng 1\n"
"vPm va 1\n"
"tQd th 1\n"
"Vvj ij 1\n"
"rqX qu 1\n"
"zEo on 1\n"
"jsB st 1\n"
"qmH qu 1\n"
"btE th 1\n"
"Wdd de 1\n"
"Dmj ij 1\n"
"ywI wa 1\n"
"jpQ ij 1\n"
"uXs qu 1\n"
"bYm me 1\n"
"oFz on 1\n"
"tBg th 1\n"
"cCn ch 1\n"
"dZg ng 1\n"
"wrL er 1\n"
"Jry er 1\n"
"iKd in 1\n"
"vcN ch 1\n"
"zNp sz 1\n"
"nRf an 1\n"
"dcH ch 1\n"
"qaO an 1\n"
"uaQ an 1\n"
"jxL ij 1\n"
"mUf me 1\n"
"vOk ka 1\n"
"Pxt th 1\n"
"fuQ qu 1\n"
"sfN st 1\n"
"Qlv le 1\n"
"bZy be 1\n"
"vEq vK 1\n"
"Xvg ng 1\n"
"Jxb be 1\n"
"zGz sz 1\n"
"Cqf qu 1\n"
"sPp st 1\n"
"vAq qu 1\n"
"kWd de 1\n"
"rcZ cm 1\n"
"lDs le 1\n"
"xDd de 1\n"
"pSj ij 1\n"
"vwS va 1\n"
"kgQ ng 1\n"
"crT ch 1\n"
"fKs st 1\n"
"qhc th 1\n"
"gMl ng 1\n"
"zKt th 1\n"
"jdF de 1\n"
"cfN ch 1\n"
"sdO st 1\n"
"kHh th 1\n"
"xvE va 1\n"
"bPf be 1\n"
"rzX er 1\n"
"vSj ij 1\n"
"dFf de 1\n"
"vXl le 1\n"
"bRv va 1\n"
"Zxw wa 1\n"
"Xzw sz 1\n"
"vrR er 1\n"
"xHb be 1\n"
"qeE qu 1\n"
"jrQ er 1\n"
"vkI ka 1\n"
"frY er 1\n"
"jqL qu 1\n"
"cZj ch 1\n"
"Tmg ng 1\n"
"mHw me 1\n"
"dqS qu 1\n"
"qlI qu 1\n"
"Zvb va 1\n"
"Klx le 1\n"
"gbS ng 1\n"
"sbQ st 1\n"
"quF un 1\n"
"qzT qu 1\n"
"qaI an 1\n"
"Vmd de 1\n"
"qaQ an 1\n"
"Qkb ka 1\n"
"Xjb ij 1\n"
"oCq GC 1\n"
"qQh QO 1\n"
"cwO ch 1\n"
"tMf th 1\n"
"zrK er 1\n"
"wKy wa 1\n"
"wKb wa 1\n"
"cqS ch 1\n"
"iGv in 1\n"
"xXw wa 1\n"
"fMx fo 1\n"
"Zmv va 1\n"
"Yqq qu 1\n"
"kDh th 1\n"
"Jxy ny 1\n"
"yyE ny 1\n"
"sUv st 1\n"
"cVr ch 1\n"
"bqH qu 1\n"
"Wgq qu 1\n"
"uqQ qu 1\n"
"bTg ng 1\n"
"iMv in 1\n"
"qWk qu 1\n"
"fdV de 1\n"
"oQq qu 1\n"
"nZp an 1\n"
"zoY on 1\n"
"jRk ij 1\n"
"qPj qu 1\n"
"uqL qu 1\n"
"cqX ch 1\n"
"lBq qu 1\n"
"fpX pr 1\n"
"bYw wa 1\n"
"Yeq qu 1\n"
"hjN th 1\n"
"tqW th 1\n"
"jhT th 1\n"
"cvF ch 1\n"
"Ycx ch 1\n"
"jFs st 1\n"
"Hdy de 1\n"
"lrZ er 1\n"
"fZv va 1\n"
"Tfw wa 1\n"
"zrI er 1\n"
"dDv de 1\n"
"xeH er 1\n"
"lzH le 1\n"
"sLr er 1\n"
"iKq qu 1\n"
"Fzc cm 1\n"
"xRd de 1\n"
"fSd de 1\n"
"qwF qu 1\n"
"wxY wa 1\n"
"Ykw ka 1\n"
"oVp on 1\n"
"cgB ch 1\n"
"bFh th 1\n"
"njT an 1\n"
"dZz de 1\n"
"bhS th 1\n"
"Fzu qu 1\n"
"fHm me 1\n"
"vNz sz 1\n"
"qlF qu 1\n"
"Lvf va 1\n"
"zpU sz 1\n"
"jtL th 1\n"
"cQq ch 1\n"
"mKm me 1\n"
"Rwc ch 1\n"
"jrO er 1\n"
"npB an 1\n"
"Qtx th 1\n"
"Mqj qu 1\n"
"Oqx qu 1\n"
"Dzp sz 1\n"
"hVg th 1\n"
"pTn an 1\n"
"gQj ng 1\n"
"mTn an 1\n"
"tQv th 1\n"
"lZh th 1\n"
"kJj ij 1\n"
"crP ch 1\n"
"mqC qu 1\n"
"Dwl le 1\n"
"vVj ij 1\n"
"hqT th 1\n"
"mJw me 1\n"
"txT th 1\n"
"wZm me 1\n"
"Xnq an 1\n"
"hfU th 1\n"
"kVr er 1\n"
"gVp ng 1\n"
"nBp an 1\n"
"xnZ an 1\n"
"jqA qu 1\n"
"Pzk sz 1\n"
"fJq qu 1\n"
"Gnf an 1\n"
"Kxp pr 1\n"
"dXl Xm 1\n"
"hwL th 1\n"
"Rrn an 1\n"
"klL le 1\n"
"fOg ng 1\n"
"Qwx wa 1\n"
"Cmx me 1\n"
"Fbf be 1\n"
"hWq th 1\n"
"bSw wa 1\n"
"Bxr er 1\n"
"zcB ch 1\n"
"lvX le 1\n"
"Kkx ka 1\n"
"qfI qu 1\n"
"uKg qu 1\n"
"Yku qu 1\n"
"jJz sz 1\n"
"uIp qu 1\n"
"qAd qu 1\n"
"pfH pr 1\n"
"Qwf wa 1\n"
"wbU wa 1\n"
"vDv va 1\n"
"gJn an 1\n"
"zlR le 1\n"
"mXr er 1\n"
"rHx er 1\n"
"oVz on 1\n"
"gtG th 1\n"
"lrK HK 1\n"
"Wxe er 1\n"
"pnJ an 1\n"
"Fqy qu 1\n"
"jVl le 1\n"
"cbP ch 1\n"
"Gjc jS 1\n"
"jQs st 1\n"
"tvV th 1\n"
"Hzk sz 1\n"
"jyW ij 1\n"
"Xbf be 1\n"
"qfS qu 1\n"
"Wvp va 1\n"
"wbL wa 1\n"
"mkO ka 1\n"
"eqB qu 1\n"
"dvS de 1\n"
"zGh th 1\n"
"vWu qu 1\n"
"flX le 1\n"
"xJq qu 1\n"
"qLk qu 1\n"
"vNl le 1\n"
"kzQ sz 1\n"
"Czv sz 1\n"
"knV an 1\n"
"Rjb ij 1\n"
"bNq qu 1\n"
"zPm sz 1\n"
"qxB qu 1\n"
"Lhh th 1\n"
"Uvt th 1\n"
"xfU fo 1\n"
"iNp in 1\n"
"yYg ng 1\n"
"oPb on 1\n"
"qiW qu 1\n"
"ycD ch 1\n"
"wVz sz 1\n"
"wGq qu 1\n"
"hRb th 1\n"
"xbB be 1\n"
"sZl le 1\n"
"gxO ng 1\n"
"wFk ka 1\n"
"Mxd de 1\n"
"dxP de 1\n"
"lRq qu 1\n"
"hbZ th 1\n"
"Eao an 1\n"
"zgA ng 1\n"
"qcW ch 1\n"
"vmQ va 1\n"
"Yqf qu 1\n"
"wiO in 1\n"
"xOe er 1\n"
"Hfy ny 1\n"
"bfS be 1\n"
"Qhn th 1\n"
"Cmk ka 1\n"
"lYs le 1\n"
"Nqt th 1\n"
"qeJ qu 1\n"
"ztJ th 1\n"
"pMv va 1\n"
"uhW th 1\n"
"jSb ij 1\n"
"dYh th 1\n"
"cfW ch 1\n"
"gSx ng 1\n"
"qSv qu 1\n"
"jCs st 1\n"
"pwC pr 1\n"
"Gxq qu 1\n"
"fMq qu 1\n"
"kkC ka 1\n"
"uqI qu 1\n"
"zBk sz 1\n"
"zsW st 1\n"
"fZb be 1\n"
"xjb ij 1\n"
"vHq qu 1\n"
"fwN wa 1\n"
"vMw va 1\n"
"Hhq th 1\n"
"csJ ch 1\n"
"brJ er 1\n"
"xvM va 1\n"
"mXn an 1\n"
"qWw wa 1\n"
"dxZ de 1\n"
"sVj st 1\n"
"xrF er 1\n"
"pbU pr 1\n"
"Tfz sz 1\n"
"wqT qu 1\n"
"vcF ch 1\n"
"nrS an 1\n"
"Whz th 1\n"
"kgX ng 1\n"
"yXk ka 1\n"
"kJb ka 1\n"
"rZk er 1\n"
"pBc ch 1\n"
"gUv ng 1\n"
"Hqe qu 1\n"
"Kqj qu 1\n"
"oFj on 1\n"
"xbN be 1\n"
"pnK an 1\n"
"Lbw wa 1\n"
"dMb de 1\n"
"qSp qu 1\n"
"Zsv st 1\n"
"wrV er 1\n"
"uKf qu 1\n"
"mlY le 1\n"
"gxF ng 1\n"
"tjL th 1\n"
"Xrc ch 1\n"
"rvF er 1\n"
"mLq qu 1\n"
"jrK er 1\n"
"Qlz le 1\n"
"zxD sz 1\n"
"fdY de 1\n"
"jvD ij 1\n"
"xQg ng 1\n"
"qFu un 1\n"
"sfJ st 1\n"
"pIf pr 1\n"
"hxJ th 1\n"
"cNc ch 1\n"
"Idq qu 1\n"
"yHf ny 1\n"
"qXm qu 1\n"
"ylD le 1\n"
"zFq qu 1\n"
"jWp ij 1\n"
"eKp er 1\n"
"xhf th 1\n"
"ybV be 1\n"
"xXs st 1\n"
"Yhk th 1\n"
"fwX wa 1\n"
"bqK qu 1\n"
"nvY an 1\n"
"xvk ka 1\n"
"rbP er 1\n"
"sXl le 1\n"
"Uwt th 1\n"
"wmW me 1\n"
"pxV pr 1\n"
"njZ an 1\n"
"Tqk qu 1\n"
"zmE sz 1\n"
"Rqu un 1\n"
"qqM qu 1\n"
"dhQ th 1\n"
"uJz qu 1\n"
"Vqd qu 1\n"
"yCk ka 1\n"
"pWu qu 1\n"
"Vdy de 1\n"
"iRx in 1\n"
"Vcm ch 1\n"
"wIg ng 1\n"
"Xbh th 1\n"
"vcG ch 1\n"
"jjX ij 1\n"
"nmO an 1\n"
"dQj de 1\n"
"dfV de 1\n"
"dbK de 1\n"
"gqk qu 1\n"
"nFd an 1\n"
"oWv on 1\n"
"nHp an 1\n"
"knK an 1\n"
"bxZ be 1\n"
"wmH me 1\n"
"fgX ng 1\n"
"gzH ng 1\n"
"Zbv va 1\n"
"vgM ng 1\n"
"dmK de 1\n"
"cvB ch 1\n"
"eQs er 1\n"
"cHm ch 1\n"
"sBt th 1\n"
"bHx be 1\n"
"vqd qu 1\n"
"Npy pr 1\n"
"xzL sz 1\n"
"gMx ng 1\n"
"vwU va 1\n"
"pfX pr 1\n"
"nFg an 1\n"
"sFs st 1\n"
"Vqh th 1\n"
"Emq qu 1\n"
"tXy th 1\n"
"uVd qu 1\n"
"Yvj ij 1\n"
"qHo qu 1\n"
"pWm me 1\n"
"xcK ch 1\n"
"pUv va 1\n"
"pLn an 1\n"
"uVn an 1\n"
"Fsq qu 1\n"
"cGj ch 1\n"
"Xwy wa 1\n"
"gzT ng 1\n"
"dNq qu 1\n"
"jrU er 1\n"
"qtA th 1\n"
"gqT qu 1\n"
"pwM pr 1\n"
"lrP er 1\n"
"jmC ij 1\n"
"pmP me 1\n"
"yiY in 1\n"
"pTs st 1\n"
"Zwj ij 1\n"
"qpF qu 1\n"
"fhJ ch 1\n"
"fOv va 1\n"
"wcK ch 1\n"
"kqk qu 1\n"
"Ugz ng 1\n"
"xfF fo 1\n"
"cTv ch 1\n"
"gpX ng 1\n"
"Lfx fo 1\n"
"gwU ng 1\n"
"Dzx sz 1\n"
"kDc ch 1\n"
"Pvh th 1\n"
"kdY de 1\n"
"wWv va 1\n"
"sQq qu 1\n"
"mjY ij 1\n"
"yCb be 1\n"
"rSq qu 1\n"
"Sfv va 1\n"
"fZh th 1\n"
"dMd de 1\n"
"dNs st 1\n"
"jTv ij 1\n"
"tmW th 1\n"
"cxJ ch 1\n"
"uAo qu 1\n"
"mHx me 1\n"
"fgA ng 1\n"
"Rhx th 1\n"
"wWt th 1\n"
"pfU pr 1\n"
"oIj on 1\n"
"lhQ th 1\n"
"vDk ka 1\n"
"vJd de 1\n"
"sDp st 1\n"
"qiU qu 1\n"
"Yfs st 1\n"
"qxW qu 1\n"
"sFh th 1\n"
"vhP th 1\n"
"Vjj ij 1\n"
"tmQ th 1\n"
"wmM me 1\n"
"cVy ch 1\n"
"Kzw sz 1\n"
"tfA th 1\n"
"gjR ij 1\n"
"xyQ ny 1\n"
"mBv va 1\n"
"fQy ny 1\n"
"dZc ch 1\n"
"eVh th 1\n"
"Nvc ch 1\n"
"qFb qu 1\n"
"qhl th 1\n"
"Zcn ch 1\n"
"qwW qu 1\n"
"xZq qu 1\n"
"jhL th 1\n"
"lWf le 1\n"
"jJx ij 1\n"
"Yzt th 1\n"
"Eoq qu 1\n"
"Njm ij 1\n"
"Zgd ng 1\n"
"pGq qu 1\n"
"sgY ng 1\n"
"jyE ij 1\n"
"jzE sz 1\n"
"ujK qu 1\n"
"qbm qu 1\n"
"Wsf st 1\n"
"mQn an 1\n"
"sQs st 1\n"
"yXg ng 1\n"
"vYe er 1\n"
"ePv er 1\n"
"aCv an 1\n"
"pVm me 1\n"
"zxO sz 1\n"
"jjW ij 1\n"
"vgI ng 1\n"
"tZc th 1\n"
"Qtg th 1\n"
"vMt th 1\n"
"kTt th 1\n"
"Mxj ij 1\n"
"fbI be 1\n"
"qAu un 1\n"
"wfT wa 1\n"
"fcF ch 1\n"
"pfK pr 1\n"
"bOq qu 1\n"
"huX th 1\n"
"cJm ch 1\n"
"Xpg ng 1\n"
"tqJ th 1\n"
"Ovf va 1\n"
"Xlj le 1\n"
"Nrl er 1\n"
"fxW fo 1\n"
"Swq qu 1\n"
"qvE qu 1\n"
"qpY qu 1\n"
"oNw on 1\n"
"kYc ch 1\n"
"jXb ij 1\n"
"Qfk ka 1\n"
"eDp er 1\n"
"Vqb qu 1\n"
"sKz us 1\n"
"qjp qu 1\n"
"Uxl le 1\n"
"Lky ka 1\n"
"zFy sz 1\n"
"nMl an 1\n"
"yYi in 1\n"
"cQe ch 1\n"
"oYj on 1\n"
"tbB th 1\n"
"Ybg ng 1\n"
"nVk nd 1\n"
"bXc ch 1\n"
"Lqn an 1\n"
"mdK de 1\n"
"pdP de 1\n"
"tqS th 1\n"
"Zjf ij 1\n"
"kcC ch 1\n"
"qZq qu 1\n"
"aSd an 1\n"
"Cmh th 1\n"
"hzG th 1\n"
"wQm me 1\n"
"Gqg qu 1\n"
"yWp pr 1\n"
"Xrw er 1\n"
"yJy ny 1\n"
"sqD qu 1\n"
"dWb de 1\n"
"nbQ an 1\n"
"iwP in 1\n"
"lWs le 1\n"
"Tsg ng 1\n"
"dHz de 1\n"
"tcF th 1\n"
"Qkt th 1\n"
"Bdd de 1\n"
"Mxq qu 1\n"
"pjV ij 1\n"
"kQr er 1\n"
"dnI an 1\n"
"fyY ny 1\n"
"aFq an 1\n"
"Ylx le 1\n"
"Yym me 1\n"
"jbV ij 1\n"
"qcV ch 1\n"
"pzX sz 1\n"
"qRh th 1\n"
"djA de 1\n"
"bnI an 1\n"
"Llv le 1\n"
"tmZ th 1\n"
"hQo th 1\n"
"ztW th 1\n"
"Rxz sz 1\n"
"dxW de 1\n"
"qtW th 1\n"
"kqO qu 1\n"
"lHc ch 1\n"
"lRj le 1\n"
"hNf th 1\n"
"Giq qu 1\n"
"cYq ch 1\n"
"Ydp de 1\n"
"qWn an 1\n"
"xkB ka 1\n"
"kxC ka 1\n"
"ljA le 1\n"
"Qwp pr 1\n"
"mCp me 1\n"
"fJd de 1\n"
"vCt th 1\n"
"Vcz ch 1\n"
"vBf va 1\n"
"cYx ch 1\n"
"fHw wa 1\n"
"kvW ka 1\n"
"Jmz sz 1\n"
"hQj th 1\n"
"rbQ er 1\n"
"vxX va 1\n"
"wFh th 1\n"
"Tjz sz 1\n"
"hxR th 1\n"
"vdY de 1\n"
"pmF me 1\n"
"sDl le 1\n"
"rVh th 1\n"
"wDc ch 1\n"
"gBw ng 1\n"
"cHf ch 1\n"
"pzQ sz 1\n"
"lVp le 1\n"
"gfH ng 1\n"
"oGc ch 1\n"
"tvJ th 1\n"
"cMv ch 1\n"
"xnS an 1\n"
"vQx va 1\n"
"uoM qu 1\n"
"zkX sz 1\n"
"zHp sz 1\n"
"yuW qu 1\n"
"Qbv va 1\n"
"zwG sz 1\n"
"cpX ch 1\n"
"Rpv va 1\n"
"zKq qu 1\n"
"wUb wa 1\n"
"qnJ an 1\n"
"Rpy pr 1\n"
"bcS ch 1\n"
"qxK qu 1\n"
"qjD qu 1\n"
"lQg ng 1\n"
"krX er 1\n"
"Fcg ch 1\n"
"oVx on 1\n"
"vJf va 1\n"
"Bvk ka 1\n"
"dmX de 1\n"
"Wdj de 1\n"
"Yzp sz 1\n"
"Ycd ch 1\n"
"jKx ij 1\n"
"krH er 1\n"
"Lnm an 1\n"
"zCm sz 1\n"
"Uwj ij 1\n"
"Uvk ka 1\n"
"Mfj ij 1\n"
"yqJ qu 1\n"
"Lfq qu 1\n"
"yHz sz 1\n"
"kgJ ng 1\n"
"aGq an 1\n"
"tjH th 1\n"
"Zkc ch 1\n"
"wHv va 1\n"
"Nzp sz 1\n"
"cZx ch 1\n"
"jvK ij 1\n"
"clF ch 1\n"
"xmD me 1\n"
"Ypz sz 1\n"
"pFy pr 1\n"
"hvF th 1\n"
"mtW th 1\n"
"hqG th 1\n"
"kvN ka 1\n"
"tcZ th 1\n"
"tkR th 1\n"
"pdH de 1\n"
"qEs qu 1\n"
"Zcw ch 1\n"
"Vwu un 1\n"
"gXz ng 1\n"
"mWj ij 1\n"
"mWv va 1\n"
"Jqx qu 1\n"
"oSj on 1\n"
"lwY le 1\n"
"Tkf ka 1\n"
"pcC ch 1\n"
"ohG th 1\n"
"dzG de 1\n"
"fdN de 1\n"
"xrS er 1\n"
"hHk th 1\n"
"Fjz sz 1\n"
"vbZ va 1\n"
"Udx de 1\n"
"wzX sz 1\n"
"uNq qu 1\n"
"wfZ wa 1\n"
"swB st 1\n"
"dmQ de 1\n"
"dcA ch 1\n"
"qzP qu 1\n"
"jJj ij 1\n"
"qWq qu 1\n"
"tVk th 1\n"
"gwB ng 1\n"
"bIw wa 1\n"
"bpU pr 1\n"
"bwM wa 1\n"
"fkA ka 1\n"
"xUc ch 1\n"
"xTd de 1\n"
"fKl le 1\n"
"lxS le 1\n"
"xaS an 1\n"
"yvQ va 1\n"
"dhV th 1\n"
"mdW de 1\n"
"wfJ wa 1\n"
"Wqq qu 1\n"
"sZj st 1\n"
"Lxy ny 1\n"
"xXy ny 1\n"
"qDm qu 1\n"
"gKq qu 1\n"
"Qvj ij 1\n"
"kfH ka 1\n"
"aQp an 1\n"
"xFz sz 1\n"
"njW an 1\n"
"Rpn an 1\n"
"Mmn an 1\n"
"fhD th 1\n"
"jKk ij 1\n"
"zAq qu 1\n"
"qfL qu 1\n"
"ywN wa 1\n"
"qpz qu 1\n"
"hxP th 1\n"
"Gdq qu 1\n"
"tMx th 1\n"
"jwL ij 1\n"
"kBb ka 1\n"
"fAw wa 1\n"
"Sdx de 1\n"
"Jmv va 1\n"
"bgX ng 1\n"
"xWp pr 1\n"
"hHt th 1\n"
"Gww wa 1\n"
"Fbb be 1\n"
"zoT on 1\n"
"yjG ij 1\n"
"Rlg ng 1\n"
"vFn an 1\n"
"zcK ch 1\n"
"xdC de 1\n"
"wvO va 1\n"
"oQl le 1\n"
"nIw an 1\n"
"wzA sz 1\n"
"Rzj sz 1\n"
"Qzn an 1\n"
"Yjt th 1\n"
"xkQ ku 1\n"
"lrq qu 1\n"
"nwZ an 1\n"
"pGk ka 1\n"
"mnL an 1\n"
"Rlq qu 1\n"
"ccD ch 1\n"
"rRd er 1\n"
"Ofj ij 1\n"
"Fjh th 1\n"
"uuO qu 1\n"
"zZx sz 1\n"
"Nbj ij 1\n"
"znW an 1\n"
"jbH ij 1\n"
"rDx er 1\n"
"Qmc ch 1\n"
"dwV de 1\n"
"Oqv qu 1\n"
"Zqe qu 1\n"
"fwI wa 1\n"
"njP an 1\n"
"Oqq qu 1\n"
"pVv va 1\n"
"fqx qu 1\n"
"gfO ng 1\n"
"hqU th 1\n"
"gDj ng 1\n"
"Tmj ij 1\n"
"vcK ch 1\n"
"qmV qu 1\n"
"sVx st 1\n"
"Wfh th 1\n"
"mJk ka 1\n"
"fuK qu 1\n"
"bfN be 1\n"
"qfT qu 1\n"
"Fmj ij 1\n"
"tbN th 1\n"
"kjN ij 1\n"
"yhZ th 1\n"
"Nxk ka 1\n"
"wxU wa 1\n"
"zXb sz 1\n"
"Nzd de 1\n"
"ohL th 1\n"
"pVt th 1\n"
"Zsx st 1\n"
"Zqj qu 1\n"
"wUj ij 1\n"
"yjC ij 1\n"
"kTn an 1\n"
"vqV qu 1\n"
"Fyc ch 1\n"
"Icd ch 1\n"
"svN st 1\n"
"Jjv ij 1\n"
"bVp pr 1\n"
"fdI de 1\n"
"nbX an 1\n"
"cfU ch 1\n"
"lGm le 1\n"
"Ovg ng 1\n"
"zDc ch 1\n"
"jgq qu 1\n"
"lYr er 1\n"
"hjR th 1\n"
"qPm qu 1\n"
"iRq qu 1\n"
"Zrx er 1\n"
"wpT pr 1\n"
"xsB st 1\n"
"qxT qu 1\n"
"gFx ng 1\n"
"qoJ qu 1\n"
"smD st 1\n"
"lbM le 1\n"
"wCc ch 1\n"
"wFm me 1\n"
"Xlv le 1\n"
"zyU sz 1\n"
"vFk ka 1\n"
"tjR th 1\n"
"iYx in 1\n"
"uJk qu 1\n"
"Qeh th 1\n"
"Xrv er 1\n"
"Bqq qu 1\n"
"Vdb de 1\n"
"znR an 1\n"
"pmL me 1\n"
"tvH th 1\n"
"Tmd de 1\n"
"Dgb ng 1\n"
"ozO on 1\n"
"fQb be 1\n"
"Pqb qu 1\n"
"qYn an 1\n"
"xPm me 1\n"
"gWf ng 1\n"
"cCv ch 1\n"
"qeP qu 1\n"
"qZm qu 1\n"
"dgZ ng 1\n"
"mjO ij 1\n"
"gCw ng 1\n"
"svQ st 1\n"
"Rqq qu 1\n"
"Qbt th 1\n"
"Lkj ij 1\n"
"Fza an 1\n"
"jlB le 1\n"
"iWj in 1\n"
"Zxi in 1\n"
"Kxw wa 1\n"
"jcJ ij 1\n"
"uCf qu 1\n"
"cAx ch 1\n"
"Vjw ij 1\n"
"vUs st 1\n"
"Mnq an 1\n"
"jjM ij 1\n"
"vUx va 1\n"
"uZr qu 1\n"
"twU th 1\n"
"Ytv th 1\n"
"hRp th 1\n"
"kzV sz 1\n"
"mvY va 1\n"
"jFj ij 1\n"
"jBp ij 1\n"
"kGz sz 1\n"
"qUq qu 1\n"
"qgR qu 1\n"
"lWb le 1\n"
"wwP wa 1\n"
"wvE va 1\n"
"Fsx st 1\n"
"Izx sz 1\n"
"bwC wa 1\n"
"Fmq qu 1\n"
"cLd ch 1\n"
"bRl le 1\n"
"iXf in 1\n"
"yMq qu 1\n"
"cqP ch 1\n"
"jsL st 1\n"
"jIq qu 1\n"
"wuG qu 1\n"
"Lbv va 1\n"
"Eqf qu 1\n"
"Ogf ng 1\n"
"kGv ka 1\n"
"pjK ij 1\n"
"vcQ ch 1\n"
"Xzh th 1\n"
"jUv ij 1\n"
"wGd de 1\n"
"hmX th 1\n"
"yqm qu 1\n"
"qkE qu 1\n"
"zgX ng 1\n"
"vwO va 1\n"
"wmS me 1\n"
"vhT th 1\n"
"syX st 1\n"
"nbC an 1\n"
"zgW ng 1\n"
"vqM qu 1\n"
"dWf de 1\n"
"cwF ch 1\n"
"dnF an 1\n"
"qDi qu 1\n"
"qSw qu 1\n"
"jQf ij 1\n"
"crZ ch 1\n"
"qGl qu 1\n"
"Wxu qu 1\n"
"grW ng 1\n"
"glX ng 1\n"
"vFd de 1\n"
"pbF pr 1\n"
"bNf be 1\n"
"Qcf ch 1\n"
"fVx fo 1\n"
"pPf pr 1\n"
"pVq qu 1\n"
"xlG le 1\n"
"Dwj ij 1\n"
"xQj ij 1\n"
"lkQ le 1\n"
"sqH qu 1\n"
"Yyx ny 1\n"
"vFm va 1\n"
"tQo th 1\n"
"zlU le 1\n"
"vlW le 1\n"
"glW ng 1\n"
"qmW qu 1\n"
"aWl an 1\n"
"zmV sz 1\n"
"gLm ng 1\n"
"glB ng 1\n"
"tqA th 1\n"
"hgJ th 1\n"
"cGb ch 1\n"
"qwE qu 1\n"
"Ffy ny 1\n"
"wmL me 1\n"
"xLh th 1\n"
"sbE st 1\n"
"bQl le 1\n"
"xkR ka 1\n"
"yFd de 1\n"
"Omq qu 1\n"
"Xfj ij 1\n"
"wJj ij 1\n"
"Lws st 1\n"
"wfU wa 1\n"
"zfk sz 1\n"
"lNv le 1\n"
"ykQ ka 1\n"
"xDt th 1\n"
"jDw ij 1\n"
"zbx sz 1\n"
"vQs st 1\n"
"vvM va 1\n"
"Xqq qu 1\n"
"jLq qu 1\n"
"zkZ sz 1\n"
"qAg qu 1\n"
"Xjw ij 1\n"
"cFw ch 1\n"
"rwQ er 1\n"
"mWk ka 1\n"
"Yrx er 1\n"
"eUo er 1\n"
"uDm qu 1\n"
"Mhw th 1\n"
"fGp pr 1\n"
"Rpz sz 1\n"
"sbF st 1\n"
"nfX an 1\n"
"Wfu qu 1\n"
"Mwq qu 1\n"
"qDj qu 1\n"
"Wpw pr 1\n"
"zFv sz 1\n"
"qXc ch 1\n"
"qsT qu 1\n"
"pZh th 1\n"
"lLc ch 1\n"
"pqB qu 1\n"
"Xjo on 1\n"
"kDk ka 1\n"
"Jxf fo 1\n"
"Vqz qu 1\n"
"Hvq qu 1\n"
"Zqw qu 1\n"
"kRc ch 1\n"
"tvR th 1\n"
"dNx de 1\n"
"jWq qu 1\n"
"nRw an 1\n"
"rGb er 1\n"
"vZz sz 1\n"
"Xtz th 1\n"
"kZn an 1\n"
"Vmj ij 1\n"
"dMp de 1\n"
"cPy ch 1\n"
"uzR qu 1\n"
"yjE ij 1\n"
"gzF ng 1\n"
"tCp th 1\n"
"qfC qu 1\n"
"vcq ch 1\n"
"Zfg ng 1\n"
"kwC ka 1\n"
"fkM ko 1\n"
"vJh th 1\n"
"eCq qu 1\n"
"wPp pr 1\n"
"qJy qu 1\n"
"dmY de 1\n"
"uMj qu 1\n"
"fKh th 1\n"
"sqU qu 1\n"
"vNp va 1\n"
"Crj er 1\n"
"hsH th 1\n"
"Vwn an 1\n"
"Sdy de 1\n"
"Fpw pr 1\n"
"Wcq ch 1\n"
"pjW ij 1\n"
"dwW de 1\n"
"gjX ng 1\n"
"yZk ka 1\n"
"cKg ch 1\n"
"xdR de 1\n"
"wqW qu 1\n"
"khD th 1\n"
"vgG ng 1\n"
"vMl le 1\n"
"qnQ an 1\n"
"hJt th 1\n"
"fvC va 1\n"
"cpR ch 1\n"
"Wtt th 1\n"
"uyX qu 1\n"
"cXf ch 1\n"
"uKv qu 1\n"
"gVv ng 1\n"
"xzg ng 1\n"
"cPq ch 1\n"
"fTn an 1\n"
"sFj st 1\n"
"mzX sz 1\n"
"gMq qu 1\n"
"rxI er 1\n"
"eYf er 1\n"
"kwB ka 1\n"
"eQk er 1\n"
"jBq qu 1\n"
"lbH le 1\n"
"qCt th 1\n"
"Wnv an 1\n"
"gYd ng 1\n"
"Zxe er 1\n"
"fZj ij 1\n"
"Hgj ng 1\n"
"bRj ij 1\n"
"fpR pr 1\n"
"cbR ch 1\n"
"lqT qu 1\n"
"cMt th 1\n"
"tQy to 1\n"
"vxG va 1\n"
"gpB ng 1\n"
"Gkw ka 1\n"
"zqX qu 1\n"
"tPw th 1\n"
"fnN an 1\n"
"Gkp ka 1\n"
"mvQ va 1\n"
"hHf th 1\n"
"wfS wa 1\n"
"qCx qu 1\n"
"mqH qu 1\n"
"hgR th 1\n"
"Mwg ng 1\n"
"bqQ qu 1\n"
"Fkz sz 1\n"
"oFv on 1\n"
"Ddq qu 1\n"
"uIo qu 1\n"
"Yfh th 1\n"
"ygQ ng 1\n"
"fxh th 1\n"
"Zqd qu 1\n"
"Htn th 1\n"
"Gvz sz 1\n"
"zRw sz 1\n"
"vCb va 1\n"
"rjT ro 1\n"
"rjD er 1\n"
"Qpm me 1\n"
"Xdb de 1\n"
"Lkf ka 1\n"
"Ajx ij 1\n"
"Ylz le 1\n"
"Qtb th 1\n"
"bHz sz 1\n"
"bDg ng 1\n"
"Lqx qu 1\n"
"yhW th 1\n"
"zLv sz 1\n"
"xgK ng 1\n"
"eWq qu 1\n"
"sjS st 1\n"
"qVe qu 1\n"
"Okq qu 1\n"
"Ewj ij 1\n"
"Dsv st 1\n"
"jhI th 1\n"
"xGf fo 1\n"
"Okx ka 1\n"
"Fqx qu 1\n"
"dPv de 1\n"
"zsK st 1\n"
"qLn an 1\n"
"fkB ka 1\n"
"cCb ch 1\n"
"gNp ng 1\n"
"Qwd de 1\n"
"zTf sz 1\n"
"Pqq qu 1\n"
"rFv ro 1\n"
"Rwt th 1\n"
"uKc ch 1\n"
"hqN th 1\n"
"kmK ka 1\n"
"wuC qu 1\n"
"pnZ an 1\n"
"tgM th 1\n"
"Qds st 1\n"
"Axq qu 1\n"
"xwO wa 1\n"
"eQg ng 1\n"
"mFj ij 1\n"
"Dpm me 1\n"
"pQm me 1\n"
"aFp an 1\n"
"mfB me 1\n"
"fpA pr 1\n"
"jgZ ng 1\n"
"lGk le 1\n"
"xcA ch 1\n"
"gWw ng 1\n"
"lzF le 1\n"
"xsQ st 1\n"
"bQx be 1\n"
"wjc ch 1\n"
"bDc ch 1\n"
"Wpz sz 1\n"
"rfV er 1\n"
"Zbs st 1\n"
"hKq th 1\n"
"qXa ar 1\n"
"wjA ij 1\n"
"vzS sz 1\n"
"cWy ch 1\n"
"gjK ng 1\n"
"yRb be 1\n"
"qgU qu 1\n"
"pqF qu 1\n"
"qnU an 1\n"
"Zqc ch 1\n"
"Xqg qu 1\n"
"zLq qu 1\n"
"gzV ng 1\n"
"Kqs qu 1\n"
"zgZ ng 1\n"
"jqG qu 1\n"
"pqJ qu 1\n"
"Ieq qu 1\n"
"hjH th 1\n"
"vmN va 1\n"
"iuF qu 1\n"
"wGy wa 1\n"
"Kdh th 1\n"
"hQb th 1\n"
"jWr er 1\n"
"Cxy ny 1\n"
"Kqz qu 1\n"
"wXr er 1\n"
"xoQ on 1\n"
"wBh th 1\n"
"qyI qu 1\n"
"qhC th 1\n"
"Vpy pr 1\n"
"nJb an 1\n"
"uGw qu 1\n"
"hhX th 1\n"
"mjS ij 1\n"
"Scv ch 1\n"
"hFw th 1\n"
"bKg ng 1\n"
"Xmn an 1\n"
"bdT de 1\n"
"sJq qu 1\n"
"xTm me 1\n"
"qjz qu 1\n"
"Mqp qu 1\n"
"dHp de 1\n"
"rRn ar 1\n"
"Xlf le 1\n"
"cNs ch 1\n"
"Xql qu 1\n"
"iFz in 1\n"
"Nlk le 1\n"
"sPw st 1\n"
"vWq qu 1\n"
"wXt th 1\n"
"Fnq an 1\n"
"ozJ on 1\n"
"zIg ng 1\n"
"lSf le 1\n"
"wRc ch 1\n"
"Bvp va 1\n"
"Wwr er 1\n"
"pWg pr 1\n"
"pLk ka 1\n"
"krJ er 1\n"
"Zfv va 1\n"
"yIx ny 1\n"
"oKx on 1\n"
"qLb qu 1\n"
"dHj de 1\n"
"oqK qu 1\n"
"cxC ch 1\n"
"wJh th 1\n"
"wZd de 1\n"
"cWz ch 1\n"
"yqS qu 1\n"
"kXq qu 1\n"
"fYd de 1\n"
"dGy de 1\n"
"dDt th 1\n"
"pKg ng 1\n"
"Xjd de 1\n"
"sjM st 1\n"
"sfC st 1\n"
"dMh th 1\n"
"dZp de 1\n"
"wcD ch 1\n"
"Qoj on 1\n"
"gxC ng 1\n"
"Zfn an 1\n"
"hYv th 1\n"
"xWq qu 1\n"
"gZw ng 1\n"
"pQi in 1\n"
"Xlb le 1\n"
"gQz ng 1\n"
"nbZ an 1\n"
"Ezx sz 1\n"
"wNg ng 1\n"
"Xrj er 1\n"
"cxX ch 1\n"
"dQp de 1\n"
"Ypn an 1\n"
"pNp pr 1\n"
"pbQ pr 1\n"
"gMv ng 1\n"
"qeF qu 1\n"
"uVv qu 1\n"
"dVk de 1\n"
"uMv qu 1\n"
"jQn an 1\n"
"mhP th 1\n"
"iTb in 1\n"
"Pvw va 1\n"
"zCw sz 1\n"
"wcR ch 1\n"
"svU st 1\n"
"nMz an 1\n"
"cjE ch 1\n"
"jmH ij 1\n"
"Qzc ch 1\n"
"mqc ch 1\n"
"qlU qu 1\n"
"Zvp va 1\n"
"xHl le 1\n"
"gqB qu 1\n"
"xsN st 1\n"
"kCj ij 1\n"
"Olx le 1\n"
"Gxw wa 1\n"
"xwV wa 1\n"
"fPb be 1\n"
"Rhv th 1\n"
"pgV ng 1\n"
"Qdp de 1\n"
"zFs st 1\n"
"klQ le 1\n"
"yJd de 1\n"
"rxE er 1\n"
"uHv qu 1\n"
"wKl le 1\n"
"wpJ pr 1\n"
"Cjr er 1\n"
"tYg th 1\n"
"Vpz sz 1\n"
"Zxh th 1\n"
"pQl le 1\n"
"Fxe er 1\n"
"Qok on 1\n"
"plK le 1\n"
"lpX le 1\n"
"jdP de 1\n"
"Zqy qu 1\n"
"yRz sz 1\n"
"nDg an 1\n"
"kqL qu 1\n"
"ugW qu 1\n"
"Mbf be 1\n"
"Kql qu 1\n"
"Nqw qu 1\n"
"Jzw sz 1\n"
"sGn an 1\n"
"wDv va 1\n"
"Jjk ij 1\n"
"ztQ th 1\n"
"hwP th 1\n"
"wDp pr 1\n"
"gfG ng 1\n"
"qhL th 1\n"
"cUv ch 1\n"
"Wbk ka 1\n"
"fkF ko 1\n"
"Pqv qu 1\n"
"nbK an 1\n"
"qSz qu 1\n"
"vwI va 1\n"
"cFc ch 1\n"
"qfG qu 1\n"
"rhF th 1\n"
"xzl le 1\n"
"dNc ch 1\n"
"zwR sz 1\n"
"wzK sz 1\n"
"bQa an 1\n"
"hLq th 1\n"
"fUv va 1\n"
"rHg ng 1\n"
"uJj qu 1\n"
"Fhz th 1\n"
"Nzm sz 1\n"
"gRz ng 1\n"
"qXf qu 1\n"
"Tzm sz 1\n"
"Zkx ka 1\n"
"hLx th 1\n"
"Ukd de 1\n"
"fMf fo 1\n"
"vGp va 1\n"
"jtI th 1\n"
"hxE th 1\n"
"jrH er 1\n"
"Fgh th 1\n"
"dlF le 1\n"
"jcO ja 1\n"
"sCw st 1\n"
"Bqh th 1\n"
"kZy ka 1\n"
"fOh th 1\n"
"rJb er 1\n"
"rjV er 1\n"
"Kwq qu 1\n"
"Hcw ch 1\n"
"mCw ma 1\n"
"hxM th 1\n"
"jTb ij 1\n"
"mmQ me 1\n"
"pjR ij 1\n"
"cdP ch 1\n"
"Zjs st 1\n"
"jqF qu 1\n"
"vMn an 1\n"
"Mqs qu 1\n"
"svX st 1\n"
"iXn an 1\n"
"nwR an 1\n"
"ytR th 1\n"
"Vjb ij 1\n"
"Cjl le 1\n"
"pXd de 1\n"
"Gwu qu 1\n"
"qIj qu 1\n"
"kQn an 1\n"
"fYm me 1\n"
"vtZ th 1\n"
"Usx st 1\n"
"nfP an 1\n"
"dQx de 1\n"
"oXf on 1\n"
"fEw wa 1\n"
"sgX ng 1\n"
"cPp ch 1\n"
"ybW be 1\n"
"kcW ch 1\n"
"kHf ka 1\n"
"vcU ch 1\n"
"tXo th 1\n"
"Kzh th 1\n"
"Cfq qu 1\n"
"Ujy ij 1\n"
"Fxa an 1\n"
"hxS th 1\n"
"tWx th 1\n"
"mlK le 1\n"
"nZj an 1\n"
"qOv qu 1\n"
"Xkt th 1\n"
"Fzf sz 1\n"
"uTd qu 1\n"
"qrS qu 1\n"
"Ptw th 1\n"
"dDs st 1\n"
"rNm er 1\n"
"Ewf wa 1\n"
"hJk th 1\n"
"Hdq qu 1\n"
"Jtw th 1\n"
"kqc ch 1\n"
"nHq an 1\n"
"rhH th 1\n"
"oqH qu 1\n"
"vpZ va 1\n"
"Dgd ng 1\n"
"qxV qu 1\n"
"Cxv va 1\n"
"plV pr 1\n"
"kIi in 1\n"
"Khc th 1\n"
"jsY st 1\n"
"fLh th 1\n"
"Ykq qu 1\n"
"Qmx me 1\n"
"zvI sz 1\n"
"yhS th 1\n"
"qfg qu 1\n"
"wxZ wa 1\n"
"jVy ij 1\n"
"kQw ka 1\n"
"zXv sz 1\n"
"Lhs th 1\n"
"Mkq qu 1\n"
"jkU ij 1\n"
"Yhq th 1\n"
"zrH er 1\n"
"vhG va 1\n"
"drD er 1\n"
"Psj st 1\n"
"gDf ng 1\n"
"Xjj ij 1\n"
"pLm me 1\n"
"klC le 1\n"
"hTx th 1\n"
"zrJ er 1\n"
"Xgk ng 1\n"
"Wxf fo 1\n"
"fdD de 1\n"
"jHp ij 1\n"
"yDw wa 1\n"
"kPv ka 1\n"
"Rkm ka 1\n"
"mzg ng 1\n"
"lHz le 1\n"
"vpR va 1\n"
"wZt th 1\n"
"pBd de 1\n"
"qPf qu 1\n"
"hNw th 1\n"
"Nvj ij 1\n"
"pyU pr 1\n"
"Sjh th 1\n"
"Kzx sz 1\n"
"oQp on 1\n"
"xdL de 1\n"
"dnZ an 1\n"
"qfB qu 1\n"
"kJc ch 1\n"
"fWn an 1\n"
"Xmc ch 1\n"
"rGx er 1\n"
"sFf st 1\n"
"Vwv va 1\n"
"tKd th 1\n"
"sQx st 1\n"
"oNm on 1\n"
"uXj qu 1\n"
"Xsq qu 1\n"
"yWc ch 1\n"
"hfC th 1\n"
"Ijd de 1\n"
"dkW de 1\n"
"Nxn an 1\n"
"juC qu 1\n"
"bPy be 1\n"
"lKs le 1\n"
"aLq an 1\n"
"jPp ij 1\n"
"wpZ pr 1\n"
"fjE ij 1\n"
"zNt th 1\n"
"mhN th 1\n"
"bQn an 1\n"
"bxB be 1\n"
"fdX de 1\n"
"Jcv va 1\n"
"Fdp de 1\n"
"wVx wa 1\n"
"tmU th 1\n"
"njJ an 1\n"
"qzK qu 1\n"
"jtD th 1\n"
"bcX ch 1\n"
"Ghx th 1\n"
"xZj ij 1\n"
"vKw va 1\n"
"pvO va 1\n"
"gXs ng 1\n"
"wRv va 1\n"
"hgN th 1\n"
"gpO ng 1\n"
"hWc th 1\n"
"Upq qu 1\n"
"vwD va 1\n"
"mxE me 1\n"
"Zvm va 1\n"
"ozM on 1\n"
"fbJ be 1\n"
"tpQ th 1\n"
"yeV er 1\n"
"Znb an 1\n"
"wXv va 1\n"
"bcY ch 1\n"
"sgZ ng 1\n"
"qfM qu 1\n"
"fcL ch 1\n"
"mXl le 1\n"
"uBq qu 1\n"
"jxW ij 1\n"
"mtU th 1\n"
"qgJ qu 1\n"
"dAq qu 1\n"
"jBv ij 1\n"
"Gty th 1\n"
"Jfm me 1\n"
"xqQ qu 1\n"
"cBp ch 1\n"
"Xqd qu 1\n"
"fvM va 1\n"
"uWm qu 1\n"
"rSb er 1\n"
"Xqj qu 1\n"
"qTd qu 1\n"
"lLg ng 1\n"
"Jrp er 1\n"
"oJb on 1\n"
"pXy pr 1\n"
"zrQ er 1\n"
"cnT ch 1\n"
"qsE qu 1\n"
"pZc ch 1\n"
"bVy be 1\n"
"qIz qu 1\n"
"dgR ng 1\n"
"mLv va 1\n"
"hVl th 1\n"
"qRj qu 1\n"
"fhA th 1\n"
"zLc ch 1\n"
"Sgq qu 1\n"
"pLc ch 1\n"
"Txq qu 1\n"
"ypY pr 1\n"
"tXz th 1\n"
"dcC ch 1\n"
"iYf in 1\n"
"Wwm me 1\n"
"kZk ka 1\n"
"Ywr er 1\n"
"gFv ng 1\n"
"Fmz sz 1\n"
"uQq qu 1\n"
"xwR wa 1\n"
"Yfc ch 1\n"
"aIo an 1\n"
"sBq qu 1\n"
"Gzb sz 1\n"
"jwI ij 1\n"
"cFf ch 1\n"
"aWv an 1\n"
"Eaw an 1\n"
"vkW ka 1\n"
"Nfh th 1\n"
"flN le 1\n"
"Lpm me 1\n"
"ylK le 1\n"
"Znr an 1\n"
"mcQ ch 1\n"
"kfE ka 1\n"
"Iyf ny 1\n"
"qrV qu 1\n"
"fPx fo 1\n"
"fgJ ng 1\n"
"jIi in 1\n"
"bPw wa 1\n"
"Qyx ny 1\n"
"Qnb an 1\n"
"Wdm de 1\n"
"nJt th 1\n"
"qCd qu 1\n"
"gZl ng 1\n"
"Nlz le 1\n"
"Zwh th 1\n"
"iWl in 1\n"
"bUu qu 1\n"
"lbJ le 1\n"
"sNq qu 1\n"
"qjU qu 1\n"
"wbT wa 1\n"
"yNc ch 1\n"
"mxM me 1\n"
"pHk ka 1\n"
"Rdq qu 1\n"
"gkE ng 1\n"
"hbN th 1\n"
"Tgq qu 1\n"
"gjV ng 1\n"
"Gjw ij 1\n"
"gqX qu 1\n"
"qXx qu 1\n"
"vQq qu 1\n"
"pNb pr 1\n"
"fJy ny 1\n"
"yvZ va 1\n"
"zNl le 1\n"
"zDb sz 1\n"
"lUz le 1\n"
"Dxy ny 1\n"
"Wwn an 1\n"
"hPn th 1\n"
"kNb ko 1\n"
"Wdb de 1\n"
"zXt th 1\n"
"pjL ij 1\n"
"tJg th 1\n"
"jmM ij 1\n"
"bXg ng 1\n"
"hTv th 1\n"
"Ysf st 1\n"
"hmQ th 1\n"
"Vyq qu 1\n"
"Fpd de 1\n"
"yQw wa 1\n"
"Pbn an 1\n"
"xVj ij 1\n"
"whP th 1\n"
"fSg ng 1\n"
"Gxz ze 1\n"
"Dfw wa 1\n"
"rMx er 1\n"
"zMf sz 1\n"
"vJw va 1\n"
"xJl le 1\n"
"xfN fo 1\n"
"dQw de 1\n"
"fuD qu 1\n"
"xjB ij 1\n"
"lPj le 1\n"
"mqA qu 1\n"
"mfM me 1\n"
"kwG ka 1\n"
"eaY an 1\n"
"Vmm me 1\n"
"zfS sz 1\n"
"Fmy me 1\n"
"sqP qu 1\n"
"fKk ka 1\n"
"Qdv de 1\n"
"djZ de 1\n"
"qrR qu 1\n"
"txK th 1\n"
"bxH be 1\n"
"jRb ij 1\n"
"cjD ch 1\n"
"Sxw wa 1\n"
"Sxh th 1\n"
"vrZ er 1\n"
"xmH me 1\n"
"dfH de 1\n"
"fJw wa 1\n"
"mwZ me 1\n"
"vRm va 1\n"
"xwj ij 1\n"
"Xqr er 1\n"
"Gvj ij 1\n"
"hzF th 1\n"
"xnK an 1\n"
"xhU th 1\n"
"Nls le 1\n"
"zbV sz 1\n"
"fTq qu 1\n"
"Wxv va 1\n"
"upG qu 1\n"
"qAo qu 1\n"
"kKx ka 1\n"
"zlD le 1\n"
"hTl th 1\n"
"Gqr qu 1\n"
"Gxm me 1\n"
"zPj sz 1\n"
"bvZ va 1\n"
"jHc ch 1\n"
"iXg ng 1\n"
"Kgz ng 1\n"
"Jyi in 1\n"
"vFh th 1\n"
"ytW th 1\n"
"qBd qu 1\n"
"Xjq qu 1\n"
"dgO ng 1\n"
"mjN ij 1\n"
"Djg ng 1\n"
"zIj sz 1\n"
"uDx qu 1\n"
"qJf qu 1\n"
"fAx fo 1\n"
"Fsj st 1\n"
"yDf ny 1\n"
"xjV ij 1\n"
"hdB th 1\n"
"dwG de 1\n"
"slW le 1\n"
"zYb sz 1\n"
"vzO sz 1\n"
"vqO qu 1\n"
"Jzv sz 1\n"
"xmG me 1\n"
"Kdw de 1\n"
"xVq qu 1\n"
"jtE th 1\n"
"kJy ka 1\n"
"xjW ij 1\n"
"mwR me 1\n"
"zVx sz 1\n"
"tMj th 1\n"
"qqb qu 1\n"
"nlQ le 1\n"
"bxQ be 1\n"
"hJv th 1\n"
"jnY an 1\n"
"yfS ny 1\n"
"Mdw de 1\n"
"zZc ch 1\n"
"ysJ st 1\n"
"Qqv qu 1\n"
"zxl le 1\n"
"jAq qu 1\n"
"lJw le 1\n"
"kwJ ka 1\n"
"sxC st 1\n"
"hJr th 1\n"
"xGp pr 1\n"
"ccF ch 1\n"
"vGq qu 1\n"
"qSc ch 1\n"
"fqq qu 1\n"
"kkV ka 1\n"
"gVq qu 1\n"
"Wqg qu 1\n"
"kJp ka 1\n"
"Wlr er 1\n"
"Jwz sz 1\n"
"qEa an 1\n"
"krL er 1\n"
"tqE th 1\n"
"eJz er 1\n"
"Whx th 1\n"
"vWw va 1\n"
"Qzh th 1\n"
"pcF ch 1\n"
"Vmx me 1\n"
"dvC de 1\n"
"qjZ qu 1\n"
"pkF ka 1\n"
"cvO ch 1\n"
"Qyv va 1\n"
"hNs th 1\n"
"snJ an 1\n"
"yjU ij 1\n"
"Yfq qu 1\n"
"xLw wa 1\n"
"rVz er 1\n"
"gOw ng 1\n"
"fxL fo 1\n"
"snW an 1\n"
"yWk ka 1\n"
"wgK ng 1\n"
"aTf an 1\n"
"eVf er 1\n"
"vZp va 1\n"
"uVp qu 1\n"
"Vjh th 1\n"
"zwT sz 1\n"
"wSn an 1\n"
"nNp an 1\n"
"gfF ng 1\n"
"hcW th 1\n"
"gTf ng 1\n"
"qaJ an 1\n"
"kzY sz 1\n"
"ljX le 1\n"
"wMm me 1\n"
"btB st 1\n"
"zfE sz 1\n"
"bxO be 1\n"
"wPc ch 1\n"
"fgK ng 1\n"
"fzW sz 1\n"
"dcX ch 1\n"
"qqR qu 1\n"
"kjq qu 1\n"
"vMh th 1\n"
"gZj ng 1\n"
"qtw th 1\n"
"vkY ka 1\n"
"lCb le 1\n"
"dpO de 1\n"
"mXm me 1\n"
"vWc ch 1\n"
"fOq qu 1\n"
"Vgy ng 1\n"
"dkD de 1\n"
"fQh th 1\n"
"vIq qu 1\n"
"lZr er 1\n"
"zKn an 1\n"
"Vpt th 1\n"
"Dmw me 1\n"
"Nwf wa 1\n"
"kYl le 1\n"
"jpJ ij 1\n"
"qXi qu 1\n"
"Bnj an 1\n"
"xfK fo 1\n"
"fCc ch 1\n"
"vPd de 1\n"
"Qnp an 1\n"
"ypW pr 1\n"
"uwJ qu 1\n"
"Pvb va 1\n"
"cnC ch 1\n"
"hvA th 1\n"
"hGz th 1\n"
"nZx an 1\n"
"kbS ka 1\n"
"Swx wa 1\n"
"hvP th 1\n"
"kqG qu 1\n"
"bLq qu 1\n"
"qjP qu 1\n"
"sUo on 1\n"
"lDq qu 1\n"
"Zlp le 1\n"
"dwQ de 1\n"
"dlN le 1\n"
"fTl le 1\n"
"Npv va 1\n"
"bMn an 1\n"
"dNz sz 1\n"
"efV er 1\n"
"aCw an 1\n"
"aWf an 1\n"
"Lqo qu 1\n"
"fzT sz 1\n"
"Jjr er 1\n"
"zvK sz 1\n"
"nwT an 1\n"
"fXr er 1\n"
"cGm ch 1\n"
"lvS le 1\n"
"qDq qu 1\n"
"qRm qu 1\n"
"vYt th 1\n"
"iQv in 1\n"
"fkH ka 1\n"
"fcO ch 1\n"
"rNn an 1\n"
"qmS qu 1\n"
"kzR sz 1\n"
"Dfc ch 1\n"
"qUs qu 1\n"
"xqP qu 1\n"
"sXk st 1\n"
"Xyt th 1\n"
"pWt th 1\n"
"jbL ij 1\n"
"jYd ij 1\n"
"kqV qu 1\n"
"Fqm qu 1\n"
"xoX on 1\n"
"zuX qu 1\n"
"xUq qu 1\n"
"cgC ch 1\n"
"wBq qu 1\n"
"gQp ng 1\n"
"jnE an 1\n"
"yZs st 1\n"
"fkD ka 1\n"
"sVk st 1\n"
"qyX qu 1\n"
"cBf ch 1\n"
"Cjy ij 1\n"
"dPq qu 1\n"
"wDg ng 1\n"
"dxB de 1\n"
"Dkm ka 1\n"
"kPp ka 1\n"
"hWz th 1\n"
"Bjv ij 1\n"
"Izf sz 1\n"
"Hnk an 1\n"
"rQc ch 1\n"
"Jwu qu 1\n"
"fbP be 1\n"
"frQ er 1\n"
"Aov on 1\n"
"yqQ qu 1\n"
"jfY ij 1\n"
"xsH st 1\n"
"zxh th 1\n"
"Jbj ij 1\n"
"Mjz sz 1\n"
"gRp ng 1\n"
"Gvw va 1\n"
"mzF sz 1\n"
"oqF qu 1\n"
"ejU er 1\n"
"xmQ me 1\n"
"hOq th 1\n"
"pwX pr 1\n"
"zgK ng 1\n"
"wLk ka 1\n"
"fqc ch 1\n"
"dPm de 1\n"
"tCg th 1\n"
"qrF qu 1\n"
"pWl le 1\n"
"rDf er 1\n"
"Ynw an 1\n"
"jnQ an 1\n"
"tFb th 1\n"
"rpU er 1\n"
"pPj ij 1\n"
"yjM ij 1\n"
"jmY ij 1\n"
"Cpz sz 1\n"
"uDn an 1\n"
"uqY qu 1\n"
"Pjx ij 1\n"
"qFv qu 1\n"
"Ktf th 1\n"
"Jcj ch 1\n"
"kpO pr 1\n"
"pgZ ng 1\n"
"kfO ka 1\n"
"tZv th 1\n"
"jHq qu 1\n"
"cRq ch 1\n"
"zDm sz 1\n"
"lPm le 1\n"
"svP st 1\n"
"qkx qu 1\n"
"bNp pr 1\n"
"Kjq qu 1\n"
"vqS qu 1\n"
"fQp pr 1\n"
"txR th 1\n"
"Hpf pr 1\n"
"iQg ng 1\n"
"vvP va 1\n"
"iGf in 1\n"
"tjI th 1\n"
"pWn an 1\n"
"Qqg qu 1\n"
"qiF ti 1\n"
"Zzr er 1\n"
"aYf an 1\n"
"zjA sz 1\n"
"kwR ka 1\n"
"gkM ng 1\n"
"Cjf ij 1\n"
"zgM ng 1\n"
"Rxk ka 1\n"
"bCd de 1\n"
"Ypv va 1\n"
"wyE wa 1\n"
"iyB in 1\n"
"hQp th 1\n"
"ipQ in 1\n"
"Ucj ch 1\n"
"qkW qu 1\n"
"krK er 1\n"
"Hpp pr 1\n"
"xnN an 1\n"
"jwB ij 1\n"
"Zdm de 1\n"
"mYj ij 1\n"
"tQx th 1\n"
"qwS qu 1\n"
"Hxo on 1\n"
"qDx qu 1\n"
"cXd ch 1\n"
"gdO ng 1\n"
"aEo an 1\n"
"Twd de 1\n"
"avQ an 1\n"
"lhZ th 1\n"
"lzV le 1\n"
"bHf be 1\n"
"bJn an 1\n"
"Uqz qu 1\n"
"uFy qu 1\n"
"jNl le 1\n"
"xBp pr 1\n"
"dRb de 1\n"
"nlT an 1\n"
"wrO er 1\n"
"lzW le 1\n"
"fYf fo 1\n"
"mRw me 1\n"
"rXy er 1\n"
"qyR qu 1\n"
"fGv va 1\n"
"Uwk ka 1\n"
"kXm ka 1\n"
"hJy th 1\n"
"Xgv ng 1\n"
"xYv va 1\n"
"yYd de 1\n"
"xzC sz 1\n"
"gjB ng 1\n"
"jzI sz 1\n"
"zrO er 1\n"
"tqF th 1\n"
"vwM va 1\n"
"zCq qu 1\n"
"ljL le 1\n"
"vnZ an 1\n"
"eDq qu 1\n"
"Qvq qu 1\n"
"pfL pr 1\n"
"iRb in 1\n"
"gdR ng 1\n"
"qAv qu 1\n"
"vnL an 1\n"
"mkT ka 1\n"
"pVk ka 1\n"
"xKh th 1\n"
"jNk ij 1\n"
"jLt th 1\n"
"cNp ch 1\n"
"tmP th 1\n"
"vVt th 1\n"
"qfP qu 1\n"
"Uqo qu 1\n"
"Dnp an 1\n"
"yGb be 1\n"
"sHd st 1\n"
"pwF pr 1\n"
"fPy ny 1\n"
"Drq qu 1\n"
"bJh th 1\n"
"sQp st 1\n"
"Iws st 1\n"
"uCw qu 1\n"
"Lwj ij 1\n"
"rFw er 1\n"
"sJp st 1\n"
"xiI in 1\n"
"Rqv qu 1\n"
"bkQ ka 1\n"
"qNp qu 1\n"
"dYl le 1\n"
"Vmf me 1\n"
"lYc ch 1\n"
"oPw on 1\n"
"kjO ij 1\n"
"mKb me 1\n"
"fDf fo 1\n"
"fFb be 1\n"
"Vhv th 1\n"
"Hjq qu 1\n"
"qfK qu 1\n"
"Kjp ij 1\n"
"vTg ng 1\n"
"pBq qu 1\n"
"Htd th 1\n"
"pNd de 1\n"
"bQv va 1\n"
"aSx an 1\n"
"jwx ij 1\n"
"Uyx ny 1\n"
"wVj ij 1\n"
"Ioq qu 1\n"
"Nhm th 1\n"
"Hqh th 1\n"
"rUq qu 1\n"
"bBx be 1\n"
"Gqb qu 1\n"
"Ccw ch 1\n"
"hZw th 1\n"
"Qbl le 1\n"
"xFv va 1\n"
"sZv st 1\n"
"qzY qu 1\n"
"pDb pr 1\n"
"cfR ch 1\n"
"rqk qu 1\n"
"fzP sz 1\n"
"hqO th 1\n"
"pzH sz 1\n"
"qSj qu 1\n"
"pxJ pr 1\n"
"xbq qu 1\n"
"sXf st 1\n"
"ybT be 1\n"
"sHn an 1\n"
"vTz sz 1\n"
"Pgf ng 1\n"
"hKw th 1\n"
"jPj ij 1\n"
"wTx wa 1\n"
"jSj ij 1\n"
"Fgz ng 1\n"
"bKk ka 1\n"
"eUj er 1\n"
"cDf ch 1\n"
"xFg ng 1\n"
"cnW an 1\n"
"tUy th 1\n"
"Jgx ng 1\n"
"yuF qu 1\n"
"vyQ va 1\n"
"xCz sz 1\n"
"jRh th 1\n"
"cXx ch 1\n"
"kGk ka 1\n"
"Xnh th 1\n"
"qPh th 1\n"
"lfZ le 1\n"
"qVa an 1\n"
"xws st 1\n"
"Dzt th 1\n"
"xfG fo 1\n"
"fXh th 1\n"
"jgV ng 1\n"
"vJj ij 1\n"
"bXj ij 1\n"
"cgG ch 1\n"
"vuW qu 1\n"
"txG th 1\n"
"Zxz sz 1\n"
"fNc ch 1\n"
"oBq qu 1\n"
"Wgv ng 1\n"
"Hwz sz 1\n"
"oaW an 1\n"
"vRg ng 1\n"
"uXz qu 1\n"
"fzQ sz 1\n"
"bcB ch 1\n"
"Bnw an 1\n"
"gvB ng 1\n"
"rQm er 1\n"
"cvU ch 1\n"
"xhR th 1\n"
"zxR sz 1\n"
"btZ th 1\n"
"Kkf ka 1\n"
"zJw sz 1\n"
"uwq qu 1\n"
"pSx pr 1\n"
"yRv va 1\n"
"nCq an 1\n"
"tGv th 1\n"
"wgT ng 1\n"
"kNz sz 1\n"
"oHk on 1\n"
"Wzw sz 1\n"
"hvU th 1\n"
"skX st 1\n"
"vYz sz 1\n"
"joZ on 1\n"
"nGq an 1\n"
"qmM qu 1\n"
"Bmr er 1\n"
"sVg ng 1\n"
"uCv qu 1\n"
"iXz in 1\n"
"vKp va 1\n"
"lEw le 1\n"
"hhF th 1\n"
"iwS in 1\n"
"qyU qu 1\n"
"jjY ij 1\n"
"Ygm ng 1\n"
"wJd de 1\n"
"eQp er 1\n"
"Yfb be 1\n"
"Wpg ng 1\n"
"jdS de 1\n"
"vmG va 1\n"
"mdT de 1\n"
"grZ ng 1\n"
"yqN qu 1\n"
"pBp po 1\n"
"fkZ ka 1\n"
"qeB qu 1\n"
"cGs ch 1\n"
"Eqg qu 1\n"
"cfO ch 1\n"
"uSx qu 1\n"
"Dhf th 1\n"
"Qjr er 1\n"
"xqZ qu 1\n"
"yQf ny 1\n"
"npY an 1\n"
"xDc ch 1\n"
"bmQ me 1\n"
"kMb ka 1\n"
"aqC an 1\n"
"jYl le 1\n"
"wkD ka 1\n"
"cWs ch 1\n"
"yyJ ny 1\n"
"wvV va 1\n"
"lYb le 1\n"
"qrW qu 1\n"
"bqz qu 1\n"
"wjC ij 1\n"
"vKy va 1\n"
"vjD ij 1\n"
"sDs st 1\n"
"fKf fo 1\n"
"zsT st 1\n"
"jYc ch 1\n"
"Ywt th 1\n"
"Hjw ij 1\n"
"wIy wa 1\n"
"ffU fo 1\n"
"Wnx an 1\n"
"eHq qu 1\n"
"fWy ny 1\n"
"Nwv va 1\n"
"ySj ij 1\n"
"jfC ij 1\n"
"xXq qu 1\n"
"grI ng 1\n"
"oVf on 1\n"
"Vfy ny 1\n"
"jgY ng 1\n"
"Hjp ij 1\n"
"zqC qu 1\n"
"qyH qu 1\n"
"kcQ ch 1\n"
"zsE st 1\n"
"pCx pr 1\n"
"kwP ka 1\n"
"jfQ ij 1\n"
"wZg ng 1\n"
"Vxm me 1\n"
"Jvb va 1\n"
"sEw sz 1\n"
"jLl le 1\n"
"dOx de 1\n"
"wpS pr 1\n"
"yIo on 1\n"
"tGt th 1\n"
"vHz sz 1\n"
"xGj ij 1\n"
"gvQ ng 1\n"
"pNr er 1\n"
"gqY qu 1\n"
"sfK st 1\n"
"dYd de 1\n"
"sMm st 1\n"
"oBx on 1\n"
"qsF qu 1\n"
"bmI me 1\n"
"tmC th 1\n"
"wlW le 1\n"
"Twg ng 1\n"
"srV er 1\n"
"rNz er 1\n"
"Uuc ch 1\n"
"Gjg ng 1\n"
"njY an 1\n"
"vOh th 1\n"
"Qmh th 1\n"
"Fnf an 1\n"
"yvY va 1\n"
"pGf pr 1\n"
"lHp al 1\n"
"qgZ qu 1\n"
"jbS ij 1\n"
"xQi in 1\n"
"tqG th 1\n"
"nwI an 1\n"
"qkY qu 1\n"
"Wxy ny 1\n"
"hDm th 1\n"
"qQe qu 1\n"
"iJp in 1\n"
"xrN er 1\n"
"dGg ng 1\n"
"kQx ka 1\n"
"Jqg qu 1\n"
"hMk th 1\n"
"ljT le 1\n"
"Xkn an 1\n"
"ztq th 1\n"
"qNd qu 1\n"
"suY qu 1\n"
"Uoa an 1\n"
"djR de 1\n"
"mFf me 1\n"
"jzq qu 1\n"
"zjR sz 1\n"
"Nnl an 1\n"
"tJp th 1\n"
"gZr ng 1\n"
"Bwx wa 1\n"
"dWz sz 1\n"
"lwM le 1\n"
"Iqk qu 1\n"
"twZ th 1\n"
"Mwt th 1\n"
"kjY ij 1\n"
"zBv sz 1\n"
"iwF in 1\n"
"rHz er 1\n"
"Sqh th 1\n"
"oKq qu 1\n"
"qjO qu 1\n"
"htQ th 1\n"
"cKx ch 1\n"
"bqW qu 1\n"
"kYh th 1\n"
"tBq th 1\n"
"gmJ ng 1\n"
"eYx er 1\n"
"hGv th 1\n"
"hQd th 1\n"
"pnX an 1\n"
"bvJ va 1\n"
"sxM st 1\n"
"qNt th 1\n"
"Wlj le 1\n"
"kqD qu 1\n"
"qdZ qu 1\n"
"mhY th 1\n"
"tlC th 1\n"
"pqI qu 1\n"
"ybD be 1\n"
"xAe er 1\n"
"pLt th 1\n"
"lHb le 1\n"
"xVc ch 1\n"
"dhN th 1\n"
"qxU qu 1\n"
"dVf de 1\n"
"Zkm ka 1\n"
"kpD ka 1\n"
"pjH ij 1\n"
"yGm me 1\n"
"iyP in 1\n"
"wmK me 1\n"
"mJz sz 1\n"
"fmL me 1\n"
"cBv ch 1\n"
"Vvf va 1\n"
"Eql qu 1\n"
"ohV th 1\n"
"lCx le 1\n"
"oWc ch 1\n"
"nzX an 1\n"
"fIj ij 1\n"
"kPt th 1\n"
"pYm me 1\n"
"zhG th 1\n"
"cqN ch 1\n"
"umQ qu 1\n"
"wXs st 1\n"
"lZj le 1\n"
"Sxs st 1\n"
"Kqd qu 1\n"
"tWc th 1\n"
"Kcc ch 1\n"
"pvB po 1\n"
"tgR th 1\n"
"yrN er 1\n"
"xQr er 1\n"
"Xvz sz 1\n"
"lJh th 1\n"
"Xfk ka 1\n"
"Fvr er 1\n"
"fUb be 1\n"
"lZb le 1\n"
"gdI ng 1\n"
"joI on 1\n"
"yKq qu 1\n"
"twz th 1\n"
"qJj qu 1\n"
"vxM va 1\n"
"Vzs st 1\n"
"fjR ij 1\n"
"Kmz sz 1\n"
"qIw qu 1\n"
"jyD ij 1\n"
"qbU qu 1\n"
"qkZ qu 1\n"
"jVg ng 1\n"
"Fhj th 1\n"
"qJq qu 1\n"
"wPq qu 1\n"
"Ueo er 1\n"
"zXd sz 1\n"
"gFb ng 1\n"
"jJy ij 1\n"
"Nsj st 1\n"
"lMb le 1\n"
"yQn an 1\n"
"dnM an 1\n"
"yRg ng 1\n"
"Fjc ch 1\n"
"dKg ng 1\n"
"gqV ng 1\n"
"gCk ng 1\n"
"sOz st 1\n"
"hlO th 1\n"
"qbN qu 1\n"
"sjN st 1\n"
"Ujz sz 1\n"
"rVm er 1\n"
"Wjs st 1\n"
"bmM me 1\n"
"Vzx sz 1\n"
"hZg th 1\n"
"zFt th 1\n"
"yhJ th 1\n"
"vNk ka 1\n"
"zbT sz 1\n"
"xmJ me 1\n"
"Fcs ch 1\n"
"yTc ch 1\n"
"cSg ch 1\n"
"qmP qu 1\n"
"mFz sz 1\n"
"bdI de 1\n"
"jlK le 1\n"
"bnB an 1\n"
"qyQ qu 1\n"
"Vjk ij 1\n"
"hzU th 1\n"
"qgp qu 1\n"
"lqW qu 1\n"
"fNn an 1\n"
"Tjp ij 1\n"
"vlV le 1\n"
"rVp er 1\n"
"bLd de 1\n"
"ydQ de 1\n"
"gYg ng 1\n"
"qhE th 1\n"
"Gsq qu 1\n"
"gWz ng 1\n"
"Qtk th 1\n"
"Hzw sz 1\n"
"kIo ho 1\n"
"kfC ka 1\n"
"zBg ng 1\n"
"jJp ij 1\n"
"eIq qu 1\n"
"vuB qu 1\n"
"Wbg ng 1\n"
"Jjp ij 1\n"
"lXk le 1\n"
"Tfx fo 1\n"
"zLl le 1\n"
"dqT qu 1\n"
"oZq qu 1\n"
"Jfu qu 1\n"
"Qhh th 1\n"
"qkK qu 1\n"
"Ejc ch 1\n"
"zwN sz 1\n"
"yQq qu 1\n"
"dDp de 1\n"
"Pww wa 1\n"
"ztC th 1\n"
"jtH th 1\n"
"yrX er 1\n"
"vwT va 1\n"
"yRh th 1\n"
"wQt th 1\n"
"lXz le 1\n"
"cfL ch 1\n"
"Fwl le 1\n"
"rNw er 1\n"
"Bhx th 1\n"
"glZ ng 1\n"
"gcD ch 1\n"
"Sfs st 1\n"
"Uzf sz 1\n"
"Tdl le 1\n"
"dRn an 1\n"
"vYw va 1\n"
"xcD ch 1\n"
"xcC ch 1\n"
"lBx le 1\n"
"gHq qu 1\n"
"wJy wa 1\n"
"yrO er 1\n"
"vqF qu 1\n"
"tYb th 1\n"
"Zjw ij 1\n"
"jLk ij 1\n"
"Hvf va 1\n"
"pnS an 1\n"
"pcT ch 1\n"
"sFk st 1\n"
"dcO ch 1\n"
"zPw sz 1\n"
"vNf va 1\n"
"Gdx de 1\n"
"dlP le 1\n"
"jLx jo 1\n"
"jZj ij 1\n"
"wwT wa 1\n"
"tGx th 1\n"
"fhS th 1\n"
"Xtk th 1\n"
"xnW on 1\n"
"pkJ ka 1\n"
"xIo on 1\n"
"Zxb be 1\n"
"nOj an 1\n"
"wHj ij 1\n"
"fjS ij 1\n"
"wdL de 1\n"
"jbN ij 1\n"
"ykO ka 1\n"
"xqB qu 1\n"
"qzN qu 1\n"
"Qbq qu 1\n"
"Fqw qu 1\n"
"jWw ij 1\n"
"nxM an 1\n"
"tpX th 1\n"
"Ttz th 1\n"
"zsH st 1\n"
"fjz sz 1\n"
"xIg ng 1\n"
"xkY ka 1\n"
"Fqa an 1\n"
"oGk on 1\n"
"Hnc an 1\n"
"jPq qu 1\n"
"zlW le 1\n"
"uRx qu 1\n"
"uGx qu 1\n"
"jYv ij 1\n"
"Kpz sz 1\n"
"gQo ng 1\n"
"Kwx wa 1\n"
"jNw ij 1\n"
"tdD th 1\n"
"yGj ij 1\n"
"Lbq qu 1\n"
"Rrc ch 1\n"
"qvX qu 1\n"
"hhK th 1\n"
"kZx ka 1\n"
"xDf fo 1\n"
"Pjf ij 1\n"
"cgF ch 1\n"
"vCk ka 1\n"
"fWw ow 1\n"
"mJp me 1\n"
"fXe er 1\n"
"uYp qu 1\n"
"jHk ij 1\n"
"wdP de 1\n"
"qFk qu 1\n"
"jrG er 1\n"
"fgD ng 1\n"
"fsG st 1\n"
"Vgb ng 1\n"
"xAa an 1\n"
"gtZ th 1\n"
"tlq th 1\n"
"Tmw me 1\n"
"gyY ng 1\n"
"Qxt th 1\n"
"Uxz sz 1\n"
"iVr in 1\n"
"zqI qu 1\n"
"Nbw wa 1\n"
"Dhd th 1\n"
"mOq qu 1\n"
"iBd in 1\n"
"cqB ch 1\n"
"zQq qu 1\n"
"Wbv va 1\n"
"Qks ka 1\n"
"qPa an 1\n"
"tfI th 1\n"
"mZs st 1\n"
"pDs st 1\n"
"nJj an 1\n"
"zcp ch 1\n"
"tWj th 1\n"
"Zxp pr 1\n"
"vPy va 1\n"
"dxK de 1\n"
"oPv on 1\n"
"rjN er 1\n"
"oQh th 1\n"
"vwH va 1\n"
"Qhp th 1\n"
"xsU st 1\n"
"kGq qu 1\n"
"wjW ij 1\n"
"Pwx wa 1\n"
"Bbn an 1\n"
"xOq qu 1\n"
"qpN qu 1\n"
"nbq an 1\n"
"zpM sz 1\n"
"jmB ij 1\n"
"Nqj qu 1\n"
"zYd sz 1\n"
"Ybc ch 1\n"
"xcW ch 1\n"
"gPg ng 1\n"
"Qys st 1\n"
"Bhq th 1\n"
"yGx ny 1\n"
"qxL qu 1\n"
"Jfd de 1\n"
"mbV me 1\n"
"pkY ka 1\n"
"cWl ch 1\n"
"wBg ng 1\n"
"vOw va 1\n"
"Gpb pr 1\n"
"Ppq qu 1\n"
"fsX st 1\n"
"vtQ th 1\n"
"yCj ij 1\n"
"yoY on 1\n"
"pwQ pr 1\n"
"yGd de 1\n"
"qtJ th 1\n"
"nrZ an 1\n"
"eVx er 1\n"
"Nrq qu 1\n"
"wtA th 1\n"
"fHf fo 1\n"
"gsQ ng 1\n"
"hlC th 1\n"
"dLc ch 1\n"
"zjC sz 1\n"
"jvY ij 1\n"
"tIj th 1\n"
"pvL va 1\n"
"Hhg th 1\n"
"yMv va 1\n"
"xMn an 1\n"
"tYx th 1\n"
"vVp va 1\n"
"Ynb an 1\n"
"vmX va 1\n"
"qjQ qu 1\n"
"vQr er 1\n"
"hQz th 1\n"
"mNf me 1\n"
"zfY sz 1\n"
"xjS ij 1\n"
"jBm ij 1\n"
"jpq qu 1\n"
"nJq an 1\n"
"Knz an 1\n"
"gGf ng 1\n"
"pZx pr 1\n"
"Gql qu 1\n"
"Uqm qu 1\n"
"eWv er 1\n"
"fGg ng 1\n"
"qsA qu 1\n"
"uhY th 1\n"
"xhH th 1\n"
"yxS ny 1\n"
"rxK er 1\n"
"hNc th 1\n"
"Vwh th 1\n"
"aNv an 1\n"
"Qzv sz 1\n"
"fQn an 1\n"
"jzH sz 1\n"
"Rvh th 1\n"
"Qpt th 1\n"
"qXv qu 1\n"
"phQ th 1\n"
"Qlb le 1\n"
"bnQ an 1\n"
"njK an 1\n"
"Jjs st 1\n"
"tJx th 1\n"
"iwX in 1\n"
"nVd an 1\n"
"kzA sz 1\n"
"uwE qu 1\n"
"Tsq qu 1\n"
"hqM th 1\n"
"Rnq an 1\n"
"rDn an 1\n"
"yNb be 1\n"
"uqN qu 1\n"
"fKw wa 1\n"
"Iqn an 1\n"
"xHc ch 1\n"
"Wwq qu 1\n"
"gMw ng 1\n"
"yWf ny 1\n"
"vcO ch 1\n"
"Gkm ka 1\n"
"fRh th 1\n"
"dMc nd 1\n"
"Zhx th 1\n"
"qlH qu 1\n"
"qUl qu 1\n"
"zHf sz 1\n"
"wCk ka 1\n"
"Qfj ij 1\n"
"Qkw ka 1\n"
"mYh th 1\n"
"dcU ch 1\n"
"jTf ij 1\n"
"rjF er 1\n"
"hxQ th 1\n"
"wNf wa 1\n"
"Lgg ng 1\n"
"Fdu qu 1\n"
"tJw th 1\n"
"ycQ ch 1\n"
"xXf fo 1\n"
"wwQ wa 1\n"
"evQ er 1\n"
"Fcj ch 1\n"
"Cyq qu 1\n"
"tpF th 1\n"
"Axj ij 1\n"
"zGg ng 1\n"
"Qbb be 1\n"
"vfY va 1\n"
"oXd on 1\n"
"wAq qu 1\n"
"Xbk ka 1\n"
"wmR me 1\n"
"rzN er 1\n"
"fcB ch 1\n"
"Bwc ch 1\n"
"xgS ng 1\n"
"dQr er 1\n"
"kJw ka 1\n"
"bgx ng 1\n"
"pZs sz 1\n"
"wfA wa 1\n"
"jmX ij 1\n"
"dNp de 1\n"
"Vxr er 1\n"
"Rvb va 1\n"
"wZl le 1\n"
"wgA ng 1\n"
"Wrq qu 1\n"
"Jcq ch 1\n"
"ljW le 1\n"
"qPt th 1\n"
"gjY ng 1\n"
"jUo on 1\n"
"mIj ij 1\n"
"Hpy pr 1\n"
"Mpj ij 1\n"
"bkO ka 1\n"
"Avz sz 1\n"
"vKk ka 1\n"
"Bfz sz 1\n"
"yYj ij 1\n"
"Egq qu 1\n"
"wxH wa 1\n"
"zHh th 1\n"
"svA st 1\n"
"zcP ch 1\n"
"Bxo on 1\n"
"hSv th 1\n"
"Lxt th 1\n"
"hBz th 1\n"
"cWk ch 1\n"
"xBv va 1\n"
"hwN th 1\n"
"mkJ ka 1\n"
"oNj on 1\n"
"Ugq qu 1\n"
"jZq qu 1\n"
"xfP fo 1\n"
"bYv va 1\n"
"qxF qu 1\n"
"dcI ch 1\n"
"dhY th 1\n"
"cvP ch 1\n"
"qUy qu 1\n"
"mxC me 1\n"
"zPx sz 1\n"
"Nql qu 1\n"
"Yfw wa 1\n"
"Wgp ng 1\n"
"jgD ng 1\n"
"Qfq qu 1\n"
"lcW ch 1\n"
"qxy qu 1\n"
"Xpq qu 1\n"
"wrD er 1\n"
"bEo on 1\n"
"bzV sz 1\n"
"fwS wa 1\n"
"mLj ij 1\n"
"wMr er 1\n"
"vFb va 1\n"
"zfT sz 1\n"
"nRk an 1\n"
"kJh th 1\n"
"Rmw me 1\n"
"nqR an 1\n"
"qpO qu 1\n"
"bHb be 1\n"
"Tkq qu 1\n"
"sjG st 1\n"
"qaT an 1\n"
"Pql qu 1\n"
"hlQ th 1\n"
"kzW sz 1\n"
"yFc ch 1\n"
"uBv qu 1\n"
"vxO va 1\n"
"qvC qu 1\n"
"Yqx qu 1\n"
"jCb ij 1\n"
"Qjk ij 1\n"
"fBh th 1\n"
"vKq qu 1\n"
"rMg ng 1\n"
"hRw th 1\n"
"ykU ka 1\n"
"bUq qu 1\n"
"vYv va 1\n"
"Pdx de 1\n"
"oGv on 1\n"
"jLy ij 1\n"
"duY qu 1\n"
"Wcp ch 1\n"
"oGx on 1\n"
"vGl le 1\n"
"Jdz sz 1\n"
"ijH in 1\n"
"mlX le 1\n"
"jNr er 1\n"
"kCq qu 1\n"
"Ghh th 1\n"
"rMv er 1\n"
"Bgp ng 1\n"
"bFt th 1\n"
"uWl qu 1\n"
"dXg ng 1\n"
"Wcf ch 1\n"
"dbI de 1\n"
"bGx be 1\n"
"exQ er 1\n"
"jWj jo 1\n"
"pQb pr 1\n"
"jcH ch 1\n"
"qOl qu 1\n"
"mtL th 1\n"
"crC ch 1\n"
"pBh th 1\n"
"Wlz le 1\n"
"nHn an 1\n"
"Hfp pr 1\n"
"Xpc ch 1\n"
"Uxp pr 1\n"
"Ksq qu 1\n"
"xWk ka 1\n"
"nqZ an 1\n"
"Cxd de 1\n"
"zJx sz 1\n"
"rWq qu 1\n"
"Cbq qu 1\n"
"qqP qu 1\n"
"lhU th 1\n"
"Ufv va 1\n"
"Uxg ng 1\n"
"hJf th 1\n"
"nvQ an 1\n"
"dhF th 1\n"
"Cvb va 1\n"
"aPf an 1\n"
"Jxj ij 1\n"
"Dwp pr 1\n"
"Ixw wa 1\n"
"kfS ka 1\n"
"rZm er 1\n"
"fmE me 1\n"
"sLq qu 1\n"
"bmR me 1\n"
"uCs qu 1\n"
"kFm ka 1\n"
"Kqk qu 1\n"
"xQk ka 1\n"
"Sfn an 1\n"
"fgU ng 1\n"
"vvT va 1\n"
"mQe er 1\n"
"Gbt th 1\n"
"tbY th 1\n"
"lQk le 1\n"
"cIh th 1\n"
"Tjq qu 1\n"
"nQg an 1\n"
"yYp pr 1\n"
"qPw qu 1\n"
"xOa an 1\n"
"pNw pr 1\n"
"fJz sz 1\n"
"zHb sz 1\n"
"kBh th 1\n"
"fdE de 1\n"
"wPg ng 1\n"
"lVv le 1\n"
"mPw me 1\n"
"Rmg ng 1\n"
"xoE on 1\n"
"hnJ th 1\n"
"uvE qu 1\n"
"Woq qu 1\n"
"ucX ch 1\n"
"nmD an 1\n"
"pcX ch 1\n"
"hDw th 1\n"
"dgI ng 1\n"
"vVd de 1\n"
"tDh ch 1\n"
"jHn an 1\n"
"hkX th 1\n"
"pxT pr 1\n"
"xYz sz 1\n"
"rTp er 1\n"
"Ubz sz 1\n"
"Llm le 1\n"
"yjZ ij 1\n"
"Qss st 1\n"
"cfM ch 1\n"
"jbG be 1\n"
"Jfz sz 1\n"
"mWb me 1\n"
"jDp ij 1\n"
"lWz le 1\n"
"cXy ch 1\n"
"oQr er 1\n"
"ucZ ch 1\n"
"cvN ch 1\n"
"cvK ch 1\n"
"zDk sz 1\n"
"bLr er 1\n"
"dDl le 1\n"
"hhD th 1\n"
"vmK va 1\n"
"hLt th 1\n"
"mqW qu 1\n"
"Bfs st 1\n"
"Acj ch 1\n"
"dcG ch 1\n"
"yJc ch 1\n"
"mfS me 1\n"
"drL er 1\n"
"qyK qu 1\n"
"tQz th 1\n"
"jrL er 1\n"
"ccJ ch 1\n"
"wpX pr 1\n"
"Zzf sz 1\n"
"snU an 1\n"
"qEw qu 1\n"
"tQb th 1\n"
"mPd de 1\n"
"vJq qu 1\n"
"vpU va 1\n"
"vzM sz 1\n"
"uZb qu 1\n"
"ywU wa 1\n"
"Rjs st 1\n"
"hKt th 1\n"
"Bfb be 1\n"
"wuQ qu 1\n"
"bvM va 1\n"
"yiW in 1\n"
"hqC th 1\n"
"iUq qu 1\n"
"lBd le 1\n"
"Zxj ij 1\n"
"wpW pr 1\n"
"rHm er 1\n"
"mhQ th 1\n"
"fMb be 1\n"
"vWf va 1\n"
"Fdq qu 1\n"
"jGb ij 1\n"
"Dhw th 1\n"
"cjR ch 1\n"
"kvD ka 1\n"
"qvD qu 1\n"
"Xmk ka 1\n"
"Cjj ij 1\n"
"kkX ka 1\n"
"qkF qu 1\n"
"vWg ng 1\n"
"Msq qu 1\n"
"nNv an 1\n"
"Hzu qu 1\n"
"zrY er 1\n"
"hgB th 1\n"
"pwB pr 1\n"
"Jxc ch 1\n"
"vcJ ch 1\n"
"sYw st 1\n"
"Tqx qu 1\n"
"eJf le 1\n"
"czJ ch 1\n"
"Qyh th 1\n"
"bvV va 1\n"
"Xyh th 1\n"
"fjq qu 1\n"
"dYc ch 1\n"
"pBx pr 1\n"
"jvR ij 1\n"
"gbH ng 1\n"
"ygH ng 1\n"
"hbV th 1\n"
"lwU le 1\n"
"tJk th 1\n"
"pIw pr 1\n"
"Vjl le 1\n"
"Dgm ng 1\n"
"nvR an 1\n"
"yRp pr 1\n"
"fOj ij 1\n"
"Ecf ch 1\n"
"Zrf er 1\n"
"mxD me 1\n"
"Iqf qu 1\n"
"zBj sz 1\n"
"tTs th 1\n"
"lqB qu 1\n"
"kCv ka 1\n"
"nVh th 1\n"
"jGq qu 1\n"
"cgQ ch 1\n"
"Ppd de 1\n"
"Jcd ch 1\n"
"hhP th 1\n"
"sLg ng 1\n"
"xYt th 1\n"
"Qps st 1\n"
"sfE st 1\n"
"wxR wa 1\n"
"pFp pr 1\n"
"Ymf me 1\n"
"Jgy ng 1\n"
"yvI va 1\n"
"Ncz ch 1\n"
"wBf wa 1\n"
"rVx er 1\n"
"jvX ij 1\n"
"nYp an 1\n"
"nNb an 1\n"
"cQi ch 1\n"
"Qwy wa 1\n"
"vPf va 1\n"
"qvd qu 1\n"
"hkD th 1\n"
"Wmr er 1\n"
"gdY ng 1\n"
"Kjj ij 1\n"
"qsN qu 1\n"
"vJg ng 1\n"
"mDc ch 1\n"
"kvF ka 1\n"
"kWx ka 1\n"
"xYu qu 1\n"
"eMq qu 1\n"
"mYy me 1\n"
"Hxt th 1\n"
"pbM pr 1\n"
"Hwd de 1\n"
"mWu qu 1\n"
"zNs st 1\n"
"Qjh th 1\n"
"aqD an 1\n"
"Gcd ch 1\n"
"btX th 1\n"
"Zql qu 1\n"
"Ujw ij 1\n"
"yvM va 1\n"
"Hhw th 1\n"
"zWd sz 1\n"
"pYj ij 1\n"
"xWt th 1\n"
"ylO le 1\n"
"cnX ch 1\n"
"cMf ch 1\n"
"pKb pr 1\n"
"woV on 1\n"
"fzG sz 1\n"
"Lqb qu 1\n"
"eOj er 1\n"
"Gtb th 1\n"
"clX ch 1\n"
"kdC de 1\n"
"cfq ch 1\n"
"hKk th 1\n"
"cJi ch 1\n"
"uSb qu 1\n"
"jgT ng 1\n"
"tcG th 1\n"
"qNv qu 1\n"
"fpB pr 1\n"
"vPw va 1\n"
"jmA ij 1\n"
"dxI de 1\n"
"jGg ng 1\n"
"Bvg ng 1\n"
"qrC qu 1\n"
"nPx an 1\n"
"Qmn an 1\n"
"cqC ch 1\n"
"kFh th 1\n"
"Jtf th 1\n"
"Cqz qu 1\n"
"rCd er 1\n"
"Zms st 1\n"
"dVq qu 1\n"
"Gwg ng 1\n"
"cwP ch 1\n"
"wVu qu 1\n"
"dNg ng 1\n"
"jXc ch 1\n"
"Mbz sz 1\n"
"wvG ve 1\n"
"Vpw pr 1\n"
"yXq qu 1\n"
"hlK th 1\n"
"pYv va 1\n"
"Fbd de 1\n"
"zcV ch 1\n"
"rQk er 1\n"
"wtN th 1\n"
"qeI qu 1\n"
"eGt th 1\n"
"kMq qu 1\n"
"kqS qu 1\n"
"cqd ch 1\n"
"pLf po 1\n"
"xvO va 1\n"
"rfH er 1\n"
"gIq qu 1\n"
"Pqk qu 1\n"
"xCn an 1\n"
"dVs st 1\n"
"iqY qu 1\n"
"bsJ st 1\n"
"Vww wa 1\n"
"Znm an 1\n"
"Yrz er 1\n"
"Rvz sz 1\n"
"dzK de 1\n"
"zbW sz 1\n"
"tkx th 1\n"
"xkP ka 1\n"
"kzS sz 1\n"
"gXq qu 1\n"
"Lxf fo 1\n"
"Fwr er 1\n"
"lHs le 1\n"
"zrB er 1\n"
"jNb ij 1\n"
"Hxy ny 1\n"
"Gfw wa 1\n"
"Egw ng 1\n"
"Jxw wa 1\n"
"tVm th 1\n"
"bwQ wa 1\n"
"gIx ng 1\n"
"Wqu un 1\n"
"jvI ij 1\n"
"cGc ch 1\n"
"kSb ka 1\n"
"hxG th 1\n"
"zHm sz 1\n"
"Jpk ka 1\n"
"fVb be 1\n"
"Ukf ka 1\n"
"rxF er 1\n"
"dVu qu 1\n"
"sdX st 1\n"
"mjM ij 1\n"
"xwq qu 1\n"
"Ogk ng 1\n"
"qhr th 1\n"
"vfA va 1\n"
"qbA qu 1\n"
"Lfu qu 1\n"
"hzY th 1\n"
"iHf in 1\n"
"jxb ij 1\n"
"vmP va 1\n"
"bvI va 1\n"
"fmH me 1\n"
"qtx th 1\n"
"bvQ va 1\n"
"qzX qu 1\n"
"bVn an 1\n"
"Xmt th 1\n"
"qXo qu 1\n"
"pfD pr 1\n"
"fCd de 1\n"
"vbx va 1\n"
"Zhz th 1\n"
"Kwg ng 1\n"
"rcJ ch 1\n"
"jlT le 1\n"
"jzM sz 1\n"
"rpP er 1\n"
"tmA th 1\n"
"aYw an 1\n"
"zBq qu 1\n"
"xhT th 1\n"
"yLq qu 1\n"
"cKf ch 1\n"
"qdP qu 1\n"
"Ybx be 1\n"
"dHs st 1\n"
"jhH th 1\n"
"Bsv st 1\n"
"rZt th 1\n"
"mhJ th 1\n"
"Zwq qu 1\n"
"kXf ka 1\n"
"zvT sz 1\n"
"yiC in 1\n"
"gkT ng 1\n"
"nJw an 1\n"
"zpV sz 1\n"
"tPq th 1\n"
"cVt th 1\n"
"dBg ng 1\n"
"cRf ch 1\n"
"vRq qu 1\n"
"jgA ng 1\n"
"bMz sz 1\n"
"hJh th 1\n"
"mHd de 1\n"
"Ckq qu 1\n"
"qcj ch 1\n"
"yIb be 1\n"
"wqE qu 1\n"
"pMh th 1\n"
"Hqj qu 1\n"
"jZu qu 1\n"
"iqO qu 1\n"
"tqC th 1\n"
"qoK qu 1\n"
"Knq an 1\n"
"bQm me 1\n"
"uuX qu 1\n"
"Wzc ch 1\n"
"Pxy ny 1\n"
"Qgf ng 1\n"
"sFw st 1\n"
"gHf ng 1\n"
"kgN ng 1\n"
"rCw er 1\n"
"Yjy ij 1\n"
"pnV an 1\n"
"fbS be 1\n"
"iHz in 1\n"
"kGx ka 1\n"
"kwS ka 1\n"
"sDm st 1\n"
"Vhk th 1\n"
"phN th 1\n"
"Jbf be 1\n"
"pWz sz 1\n"
"vvQ va 1\n"
"vNm va 1\n"
"lYw le 1\n"
"zHx sz 1\n"
"Zzc ch 1\n"
"bDt th 1\n"
"Fcv ch 1\n"
"dJg ng 1\n"
"Qwb wa 1\n"
"qFw qu 1\n"
"wmO me 1\n"
"Bvy va 1\n"
"qgY qu 1\n"
"vYs st 1\n"
"xwF wa 1\n"
"qwP qu 1\n"
"uEc ch 1\n"
"mWq qu 1\n"
"fzO sz 1\n"
"bPg ng 1\n"
"pnW an 1\n"
"hGx th 1\n"
"Vkk ka 1\n"
"Xrx er 1\n"
"gJd ng 1\n"
"Llq qu 1\n"
"Vqu un 1\n"
"fgH ng 1\n"
"Vcy ch 1\n"
"hVc th 1\n"
"rwZ er 1\n"
"Xlc ch 1\n"
"xJd de 1\n"
"Fnn an 1\n"
"Ypj ij 1\n"
"lhJ th 1\n"
"aUj an 1\n"
"lBp pr 1\n"
"dlW le 1\n"
"pvV va 1\n"
"Mwr er 1\n"
"Zwc ch 1\n"
"wcU ch 1\n"
"cVq ch 1\n"
"ycU ch 1\n"
"Lcq ch 1\n"
"rvQ er 1\n"
"eYm er 1\n"
"qCn an 1\n"
"dBx de 1\n"
"Iwq qu 1\n"
"gMt th 1\n"
"bhC th 1\n"
"bDs st 1\n"
"Vhz th 1\n"
"kJz sz 1\n"
"Ohz th 1\n"
"kDz sz 1\n"
"hTn th 1\n"
"eqG qu 1\n"
"gJr ng 1\n"
"Zpz sz 1\n"
"hwQ th 1\n"
"fgY ng 1\n"
"sdV st 1\n"
"ljV le 1\n"
"yGg ng 1\n"
"uWg qu 1\n"
"sbO st 1\n"
"qdD qu 1\n"
"yJj ij 1\n"
"nwq an 1\n"
"Apq qu 1\n"
"ccK ch 1\n"
"Qwl le 1\n"
"oyQ on 1\n"
"lPw le 1\n"
"cYt th 1\n"
"brG er 1\n"
"xkT ka 1\n"
"dUj de 1\n"
"rhR th 1\n"
"xPw wa 1\n"
"xoF on 1\n"
"hYj th 1\n"
"hYw th 1\n"
"lPn an 1\n"
"zCg ng 1\n"
"sJt th 1\n"
"wDs st 1\n"
"fVh th 1\n"
"zwW sz 1\n"
"yLj ij 1\n"
"aBx an 1\n"
"Dvv va 1\n"
"tKb th 1\n"
"jfG ij 1\n"
"xMm me 1\n"
"bLp pr 1\n"
"xwW wa 1\n"
"bzH sz 1\n"
"cIw ch 1\n"
"zdN sz 1\n"
"Ggv va 1\n"
"lwV le 1\n"
"qyV qu 1\n"
"vBv va 1\n"
"Owm me 1\n"
"Ltx th 1\n"
"mqE qu 1\n"
"Xjc ch 1\n"
"pzY sz 1\n"
"Jds st 1\n"
"kMl le 1\n"
"Ddj de 1\n"
"tfX th 1\n"
"cqT ch 1\n"
"buG qu 1\n"
"oHb po 1\n"
"vRx va 1\n"
"qyq qu 1\n"
"kpY ka 1\n"
"vqN qu 1\n"
"jNq qu 1\n"
"cWb ch 1\n"
"gbJ ng 1\n"
"oZw on 1\n"
"cBz ch 1\n"
"Pvv va 1\n"
"ljI le 1\n"
"hvQ th 1\n"
"kwY ka 1\n"
"hBg th 1\n"
"kdN de 1\n"
"yxH ny 1\n"
"fxH fo 1\n"
"tXj th 1\n"
"uBx qu 1\n"
"uJm qu 1\n"
"Gxh th 1\n"
"fjK ij 1\n"
"gqO qu 1\n"
"dMt th 1\n"
"lVx le 1\n"
"Rhp th 1\n"
"cDn ch 1\n"
"Xkv ka 1\n"
"zmB sz 1\n"
"qaY an 1\n"
"Ivq qu 1\n"
"wmP me 1\n"
"bjq qu 1\n"
"cmU ch 1\n"
"slC le 1\n"
"Krx er 1\n"
"iVv in 1\n"
"Zwz sz 1\n"
"yPd de 1\n"
"qUv qu 1\n"
"Pdz sz 1\n"
"Qzk sz 1\n"
"zoU on 1\n"
"xJf fo 1\n"
"Udq qu 1\n"
"Qwj ij 1\n"
"Kvd de 1\n"
"vQw va 1\n"
"Rdk de 1\n"
"sIj st 1\n"
"Ggt th 1\n"
"lNw le 1\n"
"qvr qu 1\n"
"yqD qu 1\n"
"fXl le 1\n"
"jqg qu 1\n"
"qmA qu 1\n"
"Tgd ng 1\n"
"zpO po 1\n"
"tEz th 1\n"
"Bqz qu 1\n"
"wfL wa 1\n"
"vYu qu 1\n"
"Dxw wa 1\n"
"qWl qu 1\n"
"Rzc ch 1\n"
"mQo on 1\n"
"Ttc th 1\n"
"tVv th 1\n"
"Rqn an 1\n"
"Wcn ch 1\n"
"Nwu qu 1\n"
"xoJ on 1\n"
"vDf va 1\n"
"phH th 1\n"
"fJs st 1\n"
"Pxm me 1\n"
"rFb er 1\n"
"hlM th 1\n"
"mkX ka 1\n"
"nnQ an 1\n"
"Xfn an 1\n"
"sbZ st 1\n"
"Yyf ny 1\n"
"Bjw ij 1\n"
"Ilx le 1\n"
"qpA qu 1\n"
"Mqc ch 1\n"
"gqZ qu 1\n"
"sNv st 1\n"
"Zvq qu 1\n"
"kSx ka 1\n"
"vBd de 1\n"
"wvZ va 1\n"
"Uoe er 1\n"
"Fjy ij 1\n"
"zKb sz 1\n"
"pvI va 1\n"
"Zll le 1\n"
"hdE th 1\n"
"Fpv va 1\n"
"lhV th 1\n"
"rqQ qu 1\n"
"wjG ij 1\n"
"pLq qu 1\n"
"bpJ pr 1\n"
"wzV sz 1\n"
"Hgq ng 1\n"
"zhW th 1\n"
"Lvq qu 1\n"
"Xhr th 1\n"
"quY un 1\n"
"jqZ qu 1\n"
"vuH qu 1\n"
"Fzj sz 1\n"
"gzG ng 1\n"
"tFc th 1\n"
"vfE va 1\n"
"Igx ng 1\n"
"fqY qu 1\n"
"gYb ng 1\n"
"lJg ng 1\n"
"wcO ch 1\n"
"Qvk ka 1\n"
"Tqq qu 1\n"
"bdY de 1\n"
"wuT qu 1\n"
"lHw le 1\n"
"zRm sz 1\n"
"Hgw ng 1\n"
"tPk th 1\n"
"Jqv qu 1\n"
"tKx th 1\n"
"xpA pr 1\n"
"bkI ka 1\n"
"bSj ij 1\n"
"mxW me 1\n"
"mjR ij 1\n"
"Oip in 1\n"
"wyY wa 1\n"
"dFc ch 1\n"
"qDg qu 1\n"
"wXp pr 1\n"
"Vbp pr 1\n"
"jyN ij 1\n"
"yvP va 1\n"
"yVr er 1\n"
"aWm an 1\n"
"Gjk ij 1\n"
"Apw pr 1\n"
"Zsw st 1\n"
"jQv ij 1\n"
"jbT ij 1\n"
"bdB de 1\n"
"kcY ch 1\n"
"rqC qu 1\n"
"bxD be 1\n"
"vlx le 1\n"
"kjJ ij 1\n"
"xqW qu 1\n"
"zxE sz 1\n"
"sHf st 1\n"
"juF qu 1\n"
"kwX ka 1\n"
"oqW qu 1\n"
"qWt th 1\n"
"fHc ch 1\n"
"cHc ch 1\n"
"Jjm ij 1\n"
"xbA be 1\n"
"Rqj qu 1\n"
"Ijy ij 1\n"
"vSx va 1\n"
"pVj ij 1\n"
"rQx er 1\n"
"fmK me 1\n"
"fnA an 1\n"
"Phv th 1\n"
"bhN th 1\n"
"Hxp pr 1\n"
"Vjq qu 1\n"
"lqC qu 1\n"
"Whd th 1\n"
"zsF st 1\n"
"tYt th 1\n"
"Jzq qu 1\n"
"Nff fo 1\n"
"qXs qu 1\n"
"xJj ij 1\n"
"lXn an 1\n"
"Zpv va 1\n"
"qTh th 1\n"
"npH an 1\n"
"kYx ka 1\n"
"bBs st 1\n"
"vEa an 1\n"
"pjq qu 1\n"
"qIi qu 1\n"
"Fdk de 1\n"
"fNx fo 1\n"
"Ofh th 1\n"
"wXe er 1\n"
"mvZ va 1\n"
"Cjs st 1\n"
"Fmm me 1\n"
"pkR ka 1\n"
"zfZ sz 1\n"
"Zpm me 1\n"
"cbA ch 1\n"
"tvY th 1\n"
"Lmp me 1\n"
"gFd ng 1\n"
"bFx be 1\n"
"Fjm ij 1\n"
"wjF ij 1\n"
"bjv ij 1\n"
"dbT de 1\n"
"jmQ ij 1\n"
"xFw wa 1\n"
"cDk ch 1\n"
"hFz th 1\n"
"uGm qu 1\n"
"Yhx th 1\n"
"Vtl th 1\n"
"azV an 1\n"
"xJs st 1\n"
"Mxw wa 1\n"
"vgK ng 1\n"
"cwQ ch 1\n"
"Gnx an 1\n"
"lbP le 1\n"
"kdS de 1\n"
"kDt th 1\n"
"Pvq qu 1\n"
"yHs st 1\n"
"Lgq qu 1\n"
"Xmj ij 1\n"
"pvA va 1\n"
"vUu qu 1\n"
"Qju qu 1\n"
"qDf qu 1\n"
"Gxj ij 1\n"
"Gfz sz 1\n"
"gbY ng 1\n"
"Sjf ij 1\n"
"Ogw ng 1\n"
"hGt th 1\n"
"btT th 1\n"
"gwH ng 1\n"
"Mwj ij 1\n"
"fvU va 1\n"
"frG er 1\n"
"cMx ch 1\n"
"Ydv de 1\n"
"xkZ ka 1\n"
"fjL ij 1\n"
"yPx ny 1\n"
"drX er 1\n"
"jxR ij 1\n"
"hYq th 1\n"
"xHn an 1\n"
"jrP er 1\n"
"tcJ th 1\n"
"qJz qu 1\n"
"zUd sz 1\n"
"jXj ij 1\n"
"qDd qu 1\n"
"Bjh th 1\n"
"qFz sz 1\n"
"mxG me 1\n"
"xOd de 1\n"
"hgL th 1\n"
"cpD ch 1\n"
"jhS th 1\n"
"Zqp qu 1\n"
"yNq qu 1\n"
"pHq qu 1\n"
"rZq qu 1\n"
"Wjy ij 1\n"
"Tfb be 1\n"
"Nwb wa 1\n"
"zQk sz 1\n"
"Rkc ch 1\n"
"Qvw va 1\n"
"wlJ le 1\n"
"cFp ch 1\n"
"oDb on 1\n"
"lsY le 1\n"
"Zbn an 1\n"
"wCd de 1\n"
"zxN sz 1\n"
"bQf be 1\n"
"Kjy ij 1\n"
"Ovk ka 1\n"
"cxA ch 1\n"
"Hqw qu 1\n"
"hwY th 1\n"
"sGv st 1\n"
"Rwn an 1\n"
"zvH sz 1\n"
"yVw wa 1\n"
"zmX sz 1\n"
"qdM qu 1\n"
"dJv de 1\n"
"wDj ij 1\n"
"Vhm th 1\n"
"fLt th 1\n"
"bvC va 1\n"
"xVn an 1\n"
"Hfx fo 1\n"
"tQl th 1\n"
"lhW th 1\n"
"oqS qu 1\n"
"Qya an 1\n"
"gZf ng 1\n"
"bKy be 1\n"
"tjX th 1\n"
"Vkc ch 1\n"
"yjv ij 1\n"
"bgN ng 1\n"
"lNm le 1\n"
"Jzl le 1\n"
"Lwx wa 1\n"
"vcL ch 1\n"
"yXh th 1\n"
"ztZ th 1\n"
"yJx ny 1\n"
"npV an 1\n"
"swG st 1\n"
"sXn an 1\n"
"eJb er 1\n"
"dcR ch 1\n"
"Zrg ng 1\n"
"Pgv ng 1\n"
"xYr er 1\n"
"jlI le 1\n"
"Fmf me 1\n"
"Gqk qu 1\n"
"vlZ le 1\n"
"Csq qu 1\n"
"uQj qu 1\n"
"lLm le 1\n"
"hwK th 1\n"
"cQv ch 1\n"
"qfH qu 1\n"
"rRw er 1\n"
"aUo an 1\n"
"qpE qu 1\n"
"lPc ch 1\n"
"dHd de 1\n"
"gqL qu 1\n"
"zWp sz 1\n"
"bBq be 1\n"
"wWp pr 1\n"
"cfK ch 1\n"
"fWx fo 1\n"
"rvV er 1\n"
"zhR th 1\n"
"Klh th 1\n"
"cbQ ch 1\n"
"Jmg ng 1\n"
"fPg ng 1\n"
"Qnn an 1\n"
"sMq qu 1\n"
"aFz an 1\n"
"sJs st 1\n"
"Pwj ij 1\n"
"jcL ch 1\n"
"gmQ ng 1\n"
"Yqr qu 1\n"
"Cgz ng 1\n"
"wqz qu 1\n"
"fnI nt 1\n"
"qOt th 1\n"
"vyU va 1\n"
"wQz sz 1\n"
"vUa an 1\n"
"xBt th 1\n"
"dNm de 1\n"
"Ewx wa 1\n"
"ypD pr 1\n"
"wxL wa 1\n"
"qeN qu 1\n"
"vkB ka 1\n"
"jBj ij 1\n"
"gUj ng 1\n"
"kQk ka 1\n"
"fwO wa 1\n"
"qQt th 1\n"
"Qrl er 1\n"
"dTx de 1\n"
"fWd de 1\n"
"jxK ij 1\n"
"fHl le 1\n"
"jcY ch 1\n"
"oJs on 1\n"
"sRx st 1\n"
"uQg qu 1\n"
"hhY th 1\n"
"sdN st 1\n"
"mxR me 1\n"
"Xsv st 1\n"
"Pcq ch 1\n"
"pkZ ka 1\n"
"zDl le 1\n"
"rIh th 1\n"
"Hnv an 1\n"
"jpA ij 1\n"
"hZj th 1\n"
"Znd an 1\n"
"hZd th 1\n"
"qrO qu 1\n"
"Sbx be 1\n"
"tWp th 1\n"
"Hpd de 1\n"
"Hjz sz 1\n"
"zcS ch 1\n"
"kPz sz 1\n"
"Htq th 1\n"
"gcG ch 1\n"
"Xqx qu 1\n"
"mZc ch 1\n"
"Xzv sz 1\n"
"Kgw ng 1\n"
"aUf an 1\n"
"Ymq qu 1\n"
"wcY ch 1\n"
"oVh th 1\n"
"pdM de 1\n"
"vzK sz 1\n"
"lrX er 1\n"
"ydV de 1\n"
"uqP qu 1\n"
"fmN me 1\n"
"Ocg ch 1\n"
"fLk ka 1\n"
"cJs ch 1\n"
"uGf qu 1\n"
"cMk ch 1\n"
"gTx ng 1\n"
"xNc ch 1\n"
"bHl le 1\n"
"uWp qu 1\n"
"dxL de 1\n"
"zxG sz 1\n"
"dVn an 1\n"
"Nbh th 1\n"
"Cxs st 1\n"
"cvG ch 1\n"
"wCf wa 1\n"
"kjC ij 1\n"
"cfY ch 1\n"
"zcf ch 1\n"
"dpW de 1\n"
"Pqy qu 1\n"
"tlN th 1\n"
"sIi in 1\n"
"qxC qu 1\n"
"Kjm ij 1\n"
"zZk sz 1\n"
"Fks st 1\n"
"gWb ng 1\n"
"tqK th 1\n"
"Jlv le 1\n"
"kCk ka 1\n"
"whT th 1\n"
"Owv va 1\n"
"zKm sz 1\n"
"jql qu 1\n"
"tGz th 1\n"
"dCw de 1\n"
"ymQ me 1\n"
"xnF an 1\n"
"wuF qu 1\n"
"pFq qu 1\n"
"jyS ij 1\n"
"pjX ij 1\n"
"lOj le 1\n"
"Jmd de 1\n"
"Zvz sz 1\n"
"jqM qu 1\n"
"jTd de 1\n"
"qOi qu 1\n"
"oJg ng 1\n"
"Mjx ij 1\n"
"Tpb pr 1\n"
"Wtv th 1\n"
"jxO ij 1\n"
"dBs st 1\n"
"tNv th 1\n"
"qTb qu 1\n"
"vnU an 1\n"
"zDx sz 1\n"
"pSq qu 1\n"
"xRm me 1\n"
"qUf qu 1\n"
"mBb me 1\n"
"qjI qu 1\n"
"sIy st 1\n"
"dCg ng 1\n"
"qIx qu 1\n"
"pZp pr 1\n"
"qDt th 1\n"
"xrM er 1\n"
"uOe qu 1\n"
"xgO ng 1\n"
"grX ng 1\n"
"Pgg ng 1\n"
"yVq qu 1\n"
"qEu un 1\n"
"kBc ch 1\n"
"Sgz ng 1\n"
"hjX th 1\n"
"gOq qu 1\n"
"pmW me 1\n"
"Gnw an 1\n"
"xZl le 1\n"
"hTd th 1\n"
"Gfq qu 1\n"
"sLf st 1\n"
"Pgj ng 1\n"
"twF th 1\n"
"mDk ka 1\n"
"qdY qu 1\n"
"vsZ st 1\n"
"vcC ch 1\n"
"Dcj ch 1\n"
"wUh th 1\n"
"qId qu 1\n"
"qrZ qu 1\n"
"cbS ch 1\n"
"Xzc ch 1\n"
"vWj ij 1\n"
"pvC va 1\n"
"Jrw er 1\n"
"yxI ny 1\n"
"dqI qu 1\n"
"uCm qu 1\n"
"vXd de 1\n"
"Wdp de 1\n"
"Dzc ch 1\n"
"hdV th 1\n"
"qbO qu 1\n"
"Jwk ka 1\n"
"Wqm qu 1\n"
"iXw in 1\n"
"fYl le 1\n"
"quQ un 1\n"
"kjD ij 1\n"
"mIh th 1\n"
"xWw wa 1\n"
"oCw on 1\n"
"Zcv ch 1\n"
"jdN de 1\n"
"uYb qu 1\n"
"Srx er 1\n"
"pgU ng 1\n"
"rQg ng 1\n"
"mHf me 1\n"
"fBt th 1\n"
"jVx ij 1\n"
"vYc ch 1\n"
"Vgj ng 1\n"
"qaS an 1\n"
"pxW pr 1\n"
"mnJ an 1\n"
"Bww wa 1\n"
"Tqz qu 1\n"
"jFv ij 1\n"
"xwM wa 1\n"
"Dqw qu 1\n"
"mwI me 1\n"
"vhW th 1\n"
"sqX qu 1\n"
"tlR th 1\n"
"aBh th 1\n"
"qnZ an 1\n"
"gXg ng 1\n"
"sCj st 1\n"
"grN ng 1\n"
"tYv th 1\n"
"Wwg ng 1\n"
"fYi in 1\n"
"btF th 1\n"
"wQn an 1\n"
"Zlt th 1\n"
"cJz ch 1\n"
"Xbn an 1\n"
"tLm th 1\n"
"Zlx le 1\n"
"Nmj ij 1\n"
"hcG th 1\n"
"Wrk er 1\n"
"Nhc th 1\n"
"vqD qu 1\n"
"ujY qu 1\n"
"iJd in 1\n"
"dLf de 1\n"
"cQn ch 1\n"
"Wfx fo 1\n"
"hkZ th 1\n"
"mhC th 1\n"
"zMq qu 1\n"
"zLz sz 1\n"
"Xgt th 1\n"
"qKr qu 1\n"
"yjJ ij 1\n"
"rJm er 1\n"
"Vxc ch 1\n"
"Bxn an 1\n"
"cnQ ch 1\n"
"qkQ qu 1\n"
"Nlw le 1\n"
"hWv th 1\n"
"wdU de 1\n"
"qtB th 1\n"
"qIe qu 1\n"
"qeY qu 1\n"
"Zrp er 1\n"
"Nhd th 1\n"
"fDp po 1\n"
"Cnj an 1\n"
"kxU ka 1\n"
"Bqv qu 1\n"
"vXr er 1\n"
"kBx ka 1\n"
"fBn an 1\n"
"pMx pr 1\n"
"kxR ka 1\n"
"Lzg ng 1\n"
"jBh th 1\n"
"Fjn an 1\n"
"wpC pr 1\n"
"fKy ny 1\n"
"hwD th 1\n"
"fqf qu 1\n"
"qBy qu 1\n"
"Ycq ch 1\n"
"Nns an 1\n"
"jmZ ij 1\n"
"gKw ng 1\n"
"dqA qu 1\n"
"Bjg ng 1\n"
"fGx fo 1\n"
"Lnp an 1\n"
"whU th 1\n"
"qPd qu 1\n"
"yMx ny 1\n"
"wEj ij 1\n"
"kmJ ka 1\n"
"Qsx st 1\n"
"lCw le 1\n"
"Qqb qu 1\n"
"hvJ th 1\n"
"xkN ka 1\n"
"uVg qu 1\n"
"sQm st 1\n"
"uJp qu 1\n"
"Yzn an 1\n"
"cXh th 1\n"
"srI er 1\n"
"tBz th 1\n"
"cRj ch 1\n"
"yIw wa 1\n"
"jHg ng 1\n"
"xFp pr 1\n"
"wJq qu 1\n"
"qdF qu 1\n"
"vKv va 1\n"
"sHc ch 1\n"
"hBf th 1\n"
"jDy ij 1\n"
"Gjx ij 1\n"
"Fkd de 1\n"
"Hhz th 1\n"
"xSg ng 1\n"
"jFf ij 1\n"
"qvM qu 1\n"
"oRw on 1\n"
"xgX ng 1\n"
"gjF ng 1\n"
"qDz qu 1\n"
"Ycf ch 1\n"
"Xcw ch 1\n"
"nfQ an 1\n"
"qGs qu 1\n"
"kGs st 1\n"
"fxV fo 1\n"
"iPj in 1\n"
"qgP qu 1\n"
"jIv ij 1\n"
"Vhu th 1\n"
"Bzj sz 1\n"
"Jvg ng 1\n"
"Vjf ij 1\n"
"wTq qu 1\n"
"pDw pr 1\n"
"Ysv st 1\n"
"ztV th 1\n"
"mtZ th 1\n"
"jFy ij 1\n"
"gqC qu 1\n"
"Vsg ng 1\n"
"gjS ng 1\n"
"vXz sz 1\n"
"bpK pr 1\n"
"nDq an 1\n"
"sKx st 1\n"
"xYg ng 1\n"
"fZd de 1\n"
"pxf pr 1\n"
"jqS qu 1\n"
"hTb th 1\n"
"Nkq qu 1\n"
"qpH qu 1\n"
"vEz sz 1\n"
"vqP qu 1\n"
"vHw va 1\n"
"Dkp ka 1\n"
"cqY ch 1\n"
"mqS qu 1\n"
"sVt th 1\n"
"Pxh th 1\n"
"hxN th 1\n"
"yTf ny 1\n"
"wCj ij 1\n"
"qQw qu 1\n"
"Vfv va 1\n"
"yQd de 1\n"
"gUc ch 1\n"
"wsQ st 1\n"
"fGw wa 1\n"
"wKf wa 1\n"
"wwB wa 1\n"
"vFt th 1\n"
"twQ th 1\n"
"nrB an 1\n"
"lpY le 1\n"
"xlR le 1\n"
"fdK de 1\n"
"eFz er 1\n"
"jyQ ij 1\n"
"lwT le 1\n"
"xCw wa 1\n"
"cgM ch 1\n"
"wtV th 1\n"
"aqJ an 1\n"
"bXu qu 1\n"
"qdQ qu 1\n"
"Yxd de 1\n"
"xcS ch 1\n"
"nmV an 1\n"
"rQd er 1\n"
"Glk le 1\n"
"qEm qu 1\n"
"uvO qu 1\n"
"svF st 1\n"
"sJx st 1\n"
"Qyg ng 1\n"
"mXh th 1\n"
"btD th 1\n"
"wGc ch 1\n"
"fZo on 1\n"
"Evx va 1\n"
"vzD sz 1\n"
"ufC qu 1\n"
"Pxq qu 1\n"
"qdt th 1\n"
"rKz er 1\n"
"Jhh th 1\n"
"Cxk ka 1\n"
"qxR qu 1\n"
"gTl ng 1\n"
"qGf qu 1\n"
"wYh th 1\n"
"cEh th 1\n"
"bzU sz 1\n"
"zWq qu 1\n"
"rWb er 1\n"
"Wrp er 1\n"
"sLc ch 1\n"
"Jpu qu 1\n"
"Jkf ka 1\n"
"vgE ng 1\n"
"Bqk qu 1\n"
"oQs on 1\n"
"kbZ ka 1\n"
"rVf er 1\n"
"qLw qu 1\n"
"Lrc ch 1\n"
"xsR st 1\n"
"hwB th 1\n"
"Qnk an 1\n"
"cPz ch 1\n"
"Ucq ch 1\n"
"egJ ng 1\n"
"Qyq qu 1\n"
"Xwr pr 1\n"
"xfD fo 1\n"
"wyH wa 1\n"
"lBw le 1\n"
"Mdx de 1\n"
"Qsy st 1\n"
"zqV qu 1\n"
"vpY va 1\n"
"slY le 1\n"
"wgL ng 1\n"
"snN an 1\n"
"hVd th 1\n"
"yKx ny 1\n"
"bdW de 1\n"
"lqL qu 1\n"
"yhD th 1\n"
"tNz th 1\n"
"zJg ng 1\n"
"kIx ka 1\n"
"fHp pr 1\n"
"yrJ er 1\n"
"lrR er 1\n"
"wzY sz 1\n"
"pgB pr 1\n"
"mfC me 1\n"
"qkL qu 1\n"
"jUu qu 1\n"
"qCh th 1\n"
"zlN le 1\n"
"Bgj ng 1\n"
"gcE ch 1\n"
"zRx sz 1\n"
"jhN th 1\n"
"eGz er 1\n"
"Fpq qu 1\n"
"Wvi in 1\n"
"mBf me 1\n"
"hhW th 1\n"
"oUq qu 1\n"
"dxQ de 1\n"
"Whq th 1\n"
"rMk er 1\n"
"lWd le 1\n"
"xWz sz 1\n"
"oQn an 1\n"
"mWx me 1\n"
"nuV an 1\n"
"wWz sz 1\n"
"hvR th 1\n"
"Zwd de 1\n"
"smJ st 1\n"
"Hlh th 1\n"
"sJh th 1\n"
"zmY sz 1\n"
"hZn th 1\n"
"Vjg ng 1\n"
"Jhz th 1\n"
"mqR qu 1\n"
"hcO th 1\n"
"dqL qu 1\n"
"Bfh th 1\n"
"pkV ka 1\n"
"tBx th 1\n"
"Hkc ch 1\n"
"Kqm qu 1\n"
"qWv qu 1\n"
"lXy le 1\n"
"yRd de 1\n"
"mjH ij 1\n"
"qzA qu 1\n"
"qxm qu 1\n"
"Qvm va 1\n"
"gcM ch 1\n"
"xqx qu 1\n"
"kKv ka 1\n"
"yoX po 1\n"
"xrT er 1\n"
"cWq ch 1\n"
"jqW qu 1\n"
"sWj st 1\n"
"Sdw de 1\n"
"dfR de 1\n"
"Kqn an 1\n"
"Gjd do 1\n"
"Qbd de 1\n"
"yyK ny 1\n"
"xmX me 1\n"
"xuF qu 1\n"
"yVg ng 1\n"
"qoO qu 1\n"
"Glq qu 1\n"
"Mkx ka 1\n"
"xLb be 1\n"
"gMr ng 1\n"
"sCp st 1\n"
"bGh th 1\n"
"cXo ch 1\n"
"zTz sz 1\n"
"qkC qu 1\n"
"hTp th 1\n"
"qNf qu 1\n"
"mXk ka 1\n"
"xcZ ch 1\n"
"jVm ij 1\n"
"bIi in 1\n"
"qnH an 1\n"
"nwC an 1\n"
"dSg ng 1\n"
"qoD qu 1\n"
"tDx th 1\n"
"jdU de 1\n"
"Xmw me 1\n"
"kNh th 1\n"
"jYr er 1\n"
"Ygp ng 1\n"
"blJ le 1\n"
"mFv va 1\n"
"Sxr er 1\n"
"Fzl le 1\n"
"jTq qu 1\n"
"cIp pr 1\n"
"ajY an 1\n"
"yYb be 1\n"
"rKb er 1\n"
"pzB sz 1\n"
"eIy er 1\n"
"wfK wa 1\n"
"Fmh th 1\n"
"ufL qu 1\n"
"Xlm le 1\n"
"Czg ng 1\n"
"lPq qu 1\n"
"tqV th 1\n"
"wFy wa 1\n"
"bQc ch 1\n"
"kVw ka 1\n"
"nMh th 1\n"
"cCj ch 1\n"
"oeE er 1\n"
"wHf wa 1\n"
"fNf fo 1\n"
"mXv va 1\n"
"Nkg ng 1\n"
"jWc ch 1\n"
"zFj sz 1\n"
"Kfx fo 1\n"
"bgY ng 1\n"
"lYz le 1\n"
"cgD ch 1\n"
"pgM ng 1\n"
"fhH th 1\n"
"jrD er 1\n"
"jwA ij 1\n"
"jyM ij 1\n"
"vzC sz 1\n"
"lQd le 1\n"
"zcH ch 1\n"
"lbX le 1\n"
"vzG sz 1\n"
"mSr er 1\n"
"xYf fo 1\n"
"qgB qu 1\n"
"jYk ij 1\n"
"dIq qu 1\n"
"wpG pr 1\n"
"hVk th 1\n"
"Tjb ij 1\n"
"zvP sz 1\n"
"bZg ng 1\n"
"bFg ng 1\n"
"kfU ka 1\n"
"Sxz sz 1\n"
"fwF wa 1\n"
"Qwg ng 1\n"
"fWb be 1\n"
"jqQ ij 1\n"
"Vfx fo 1\n"
"cJj ch 1\n"
"zwJ sz 1\n"
"xBg ng 1\n"
"Ddm de 1\n"
"bWv va 1\n"
"zpG sz 1\n"
"xrQ er 1\n"
"hcS th 1\n"
"wHn an 1\n"
"hIy th 1\n"
"Yxj ij 1\n"
"sdC st 1\n"
"yVu qu 1\n"
"qjf qu 1\n"
"Tzy sz 1\n"
"Ffn an 1\n"
"zzX sz 1\n"
"Hdx de 1\n"
"gLg ng 1\n"
"Yqg qu 1\n"
"fLb be 1\n"
"lQc ch 1\n"
"vjG ij 1\n"
"wpL pr 1\n"
"cJr ch 1\n"
"aJq an 1\n"
"Ynq an 1\n"
"Wvc ch 1\n"
"lKy le 1\n"
"eYq qu 1\n"
"kxL ka 1\n"
"gCb ng 1\n"
"sRd st 1\n"
"rMd er 1\n"
"Bvh th 1\n"
"kKg ng 1\n"
"wlK le 1\n"
"mDd de 1\n"
"zkJ sz 1\n"
"vRc ch 1\n"
"Xlh th 1\n"
"pRk ka 1\n"
"xvN va 1\n"
"nxI an 1\n"
"fCx fo 1\n"
"Ybt th 1\n"
"Ebq qu 1\n"
"bkN ka 1\n"
"bQy be 1\n"
"rDw er 1\n"
"djJ de 1\n"
"tmM th 1\n"
"nwH an 1\n"
"hJz th 1\n"
"lcM ch 1\n"
"ozV on 1\n"
"mLd de 1\n"
"bKc ch 1\n"
"eZf er 1\n"
"Fhg th 1\n"
"Zcj ch 1\n"
"pLr er 1\n"
"wqs qu 1\n"
"bXi in 1\n"
"tgD th 1\n"
"hQc th 1\n"
"zDp sz 1\n"
"oDg ng 1\n"
"sgM ng 1\n"
"bnD an 1\n"
"gHp ng 1\n"
"Wkf ka 1\n"
"qIs qu 1\n"
"wLd de 1\n"
"ztN th 1\n"
"gdQ ng 1\n"
"wCm ow 1\n"
"vVf va 1\n"
"Jmw me 1\n"
"hbC th 1\n"
"srW er 1\n"
"nxN an 1\n"
"pVs st 1\n"
"uWq qu 1\n"
"hgM th 1\n"
"lBc ch 1\n"
"wUo on 1\n"
"flH le 1\n"
"yWg ng 1\n"
"jjN ij 1\n"
"Uwn an 1\n"
"nYj an 1\n"
"mtN th 1\n"
"Pgp ng 1\n"
"zFc ch 1\n"
"oXz on 1\n"
"iCg ng 1\n"
"Lpc ch 1\n"
"Gqd qu 1\n"
"rYc ch 1\n"
"vqA qu 1\n"
"Vhc th 1\n"
"zmF sz 1\n"
"Bpc ch 1\n"
"Jfq qu 1\n"
"oXv on 1\n"
"lgX ng 1\n"
"Jfx fo 1\n"
"zpS sz 1\n"
"gcO ch 1\n"
"xwQ wa 1\n"
"pkQ ka 1\n"
"wOc ch 1\n"
"Wgm ng 1\n"
"cOj ch 1\n"
"Nft th 1\n"
"pqN qu 1\n"
"qsB qu 1\n"
"ydH de 1\n"
"qRs qu 1\n"
"ykX ka 1\n"
"cDq ch 1\n"
"mfU me 1\n"
"xzM sz 1\n"
"vGt th 1\n"
"fuW qu 1\n"
"lqG qu 1\n"
"Tqp qu 1\n"
"zvD sz 1\n"
"wWb wa 1\n"
"Fzi in 1\n"
"qpK qu 1\n"
"oyq qu 1\n"
"gQe ng 1\n"
"Zmw me 1\n"
"qYp qu 1\n"
"Wvf va 1\n"
"aQl an 1\n"
"oqO qu 1\n"
"eqJ qu 1\n"
"nvT an 1\n"
"fUk ka 1\n"
"ibH in 1\n"
"jvZ ij 1\n"
"Wwz sz 1\n"
"lgY ng 1\n"
"eFp er 1\n"
"Xgx ng 1\n"
"fYs st 1\n"
"kZs st 1\n"
"vpD va 1\n"
"qcZ ch 1\n"
"Bqo qu 1\n"
"jLb ij 1\n"
"rwX er 1\n"
"fyK ny 1\n"
"Sxv va 1\n"
"sxZ st 1\n"
"wkK ka 1\n"
"yJp pr 1\n"
"tjT th 1\n"
"qPv qu 1\n"
"yZj ij 1\n"
"Rrm er 1\n"
"nhJ th 1\n"
"vqJ qu 1\n"
"yxY ny 1\n"
"vsE st 1\n"
"fkK ka 1\n"
"fuY qu 1\n"
"zQo on 1\n"
"Xvr er 1\n"
"mMq qu 1\n"
"Oqm qu 1\n"
"Dxs st 1\n"
"Lqa an 1\n"
"Wnh th 1\n"
"jmG ij 1\n"
"Wqa an 1\n"
"mhT th 1\n"
"bgZ ng 1\n"
"vmO va 1\n"
"zFm sz 1\n"
"Khk th 1\n"
"yqB qu 1\n"
"nVv an 1\n"
"Rft th 1\n"
"zmL sz 1\n"
"hdD th 1\n"
"nWp an 1\n"
"vvO va 1\n"
"dYp de 1\n"
"ohX th 1\n"
"qoU qu 1\n"
"rjB er 1\n"
"Dwc ch 1\n"
"aWq an 1\n"
"clD ch 1\n"
"Vdk de 1\n"
"twM th 1\n"
"fZz sz 1\n"
"wQp pr 1\n"
"dwD de 1\n"
"iYv in 1\n"
"Awv va 1\n"
"pgG ng 1\n"
"Xoq qu 1\n"
"krQ er 1\n"
"Vxg ng 1\n"
"lwB le 1\n"
"Pxw wa 1\n"
"Jwf wa 1\n"
"zLh th 1\n"
"btH th 1\n"
"pwY pr 1\n"
"Mjd de 1\n"
"Xrh th 1\n"
"qXu un 1\n"
"Eqy qu 1\n"
"Bpy pr 1\n"
"znY an 1\n"
"Rqd qu 1\n"
"nQf an 1\n"
"Zvw va 1\n"
"zjO sz 1\n"
"wNd de 1\n"
"lIq qu 1\n"
"vMq qu 1\n"
"Gqt th 1\n"
"lMf le 1\n"
"Jqn an 1\n"
"fVw wa 1\n"
"qvQ qu 1\n"
"eHk er 1\n"
"jbK ij 1\n"
"fWs st 1\n"
"qTk qu 1\n"
"znF an 1\n"
"yxO ny 1\n"
"Fqr qu 1\n"
"nFb an 1\n"
"oDp on 1\n"
"jUc ch 1\n"
"qHg qu 1\n"
"gGq qu 1\n"
"qPs qu 1\n"
"jHv ij 1\n"
"Iwj ij 1\n"
"vzV sz 1\n"
"yUq qu 1\n"
"jQt th 1\n"
"sFb st 1\n"
"Lvg ng 1\n"
"zTt th 1\n"
"bvK va 1\n"
"Ccx ch 1\n"
"jyA ij 1\n"
"yEj ij 1\n"
"zdG sz 1\n"
"tqT th 1\n"
"qbH qu 1\n"
"nHd an 1\n"
"Hhj th 1\n"
"jVb ij 1\n"
"uHw un 1\n"
"Zck ch 1\n"
"gPq qu 1\n"
"mxq qu 1\n"
"wHs st 1\n"
"fDy ny 1\n"
"tlV th 1\n"
"Lsv st 1\n"
"zvF va 1\n"
"mqx qu 1\n"
"nqF an 1\n"
"xgM ng 1\n"
"gyq qu 1\n"
"grJ ng 1\n"
"jSq qu 1\n"
"Mmw me 1\n"
"Cgx ng 1\n"
"Rlr er 1\n"
"mvG va 1\n"
"fuA qu 1\n"
"uVh th 1\n"
"sMz st 1\n"
"wWr er 1\n"
"qpD qu 1\n"
"hQw th 1\n"
"xBc ch 1\n"
"fcW ch 1\n"
"hxL th 1\n"
"rfK er 1\n"
"mFn an 1\n"
"Qnw an 1\n"
"tjB th 1\n"
"Rkx ka 1\n"
"srE er 1\n"
"drG er 1\n"
"Cfy ny 1\n"
"yZw wa 1\n"
"Wxw wa 1\n"
"zCp sz 1\n"
"jZt th 1\n"
"Nqf qu 1\n"
"jgO ng 1\n"
"fWc ch 1\n"
"qrN qu 1\n"
"Nzj sz 1\n"
"Hjy ij 1\n"
"Uxy ny 1\n"
"oIy on 1\n"
"rfX er 1\n"
"oBw on 1\n"
"yyV ny 1\n"
"Qiv in 1\n"
"dKh th 1\n"
"qDk qu 1\n"
"tgQ th 1\n"
"xNw wa 1\n"
"qdL qu 1\n"
"ovY on 1\n"
"fbZ be 1\n"
"qiI qu 1\n"
"bvT va 1\n"
"jYq qu 1\n"
"kbK ka 1\n"
"Mfn an 1\n"
"Rpd de 1\n"
"pHb pr 1\n"
"qqO qu 1\n"
"vkV ka 1\n"
"sWp st 1\n"
"kPf ka 1\n"
"qLy qu 1\n"
"qoE qu 1\n"
"wLh th 1\n"
"zhV th 1\n"
"bpL pr 1\n"
"Tqf qu 1\n"
"pzG sz 1\n"
"kcT ch 1\n"
"wjX ij 1\n"
"kPy ku 1\n"
"fdB de 1\n"
"Qxs st 1\n"
"gYf ng 1\n"
"Ypx pr 1\n"
"zSk sz 1\n"
"tDg th 1\n"
"xbJ be 1\n"
"yfO ny 1\n"
"uQf qu 1\n"
"bpQ pr 1\n"
"dXc ch 1\n"
"lwP le 1\n"
"vTs st 1\n"
"Jlq qu 1\n"
"Cqw qu 1\n"
"bWy be 1\n"
"cUq ch 1\n"
"Ybk ka 1\n"
"wyq qu 1\n"
"jhq th 1\n"
"xUy ny 1\n"
"Ncj ch 1\n"
"kMh th 1\n"
"vZy va 1\n"
"zcq ch 1\n"
"Qsr er 1\n"
"Lhx th 1\n"
"Gcj ch 1\n"
"uQt th 1\n"
"wYn an 1\n"
"dYm de 1\n"
"Qvx va 1\n"
"Rcg ch 1\n"
"qGz qu 1\n"
"bxJ be 1\n"
"jFg ng 1\n"
"xLp pr 1\n"
"lDn an 1\n"
"wqS qu 1\n"
"bIq qu 1\n"
"tBm th 1\n"
"bQs st 1\n"
"zJb sz 1\n"
"jfJ ij 1\n"
"qTc ch 1\n"
"kbX ka 1\n"
"Hlz le 1\n"
"puQ qu 1\n"
"hKb th 1\n"
"rBb er 1\n"
"vpW va 1\n"
"Yjk ij 1\n"
"Wnm an 1\n"
"pZr er 1\n"
"ldZ le 1\n"
"gMm ng 1\n"
"pZf pi 1\n"
"eYp er 1\n"
"vTp va 1\n"
"Gkc ch 1\n"
"Cgy ng 1\n"
"qDw qu 1\n"
"gxW ng 1\n"
"Cwz sz 1\n"
"jhY th 1\n"
"Fvk ka 1\n"
"nfH an 1\n"
"zcW ch 1\n"
"zgC ng 1\n"
"Dfk ka 1\n"
"vpJ va 1\n"
"Wpj ij 1\n"
"sCb st 1\n"
"fgF ng 1\n"
"tPx th 1\n"
"oCp on 1\n"
"Nrx er 1\n"
"Hwm me 1\n"
"fRp pr 1\n"
"aeX an 1\n"
"jdI de 1\n"
"sBv st 1\n"
"vOv va 1\n"
"gQt th 1\n"
"Wmk ka 1\n"
"Pqj qu 1\n"
"khV th 1\n"
"Hkj ij 1\n"
"hbB th 1\n"
"vzF sz 1\n"
"Ybz sz 1\n"
"sXb st 1\n"
"yQr er 1\n"
"hhV th 1\n"
"tgW th 1\n"
"bXo on 1\n"
"Nxp pr 1\n"
"aOx an 1\n"
"zfb sz 1\n"
"Qxp pr 1\n"
"qwQ qu 1\n"
"fjV ij 1\n"
"hjY ij 1\n"
"wtX th 1\n"
"jgU ng 1\n"
"nMq an 1\n"
"Nwx wa 1\n"
"vPg ng 1\n"
"Xfh th 1\n"
"yFf ny 1\n"
"fHz sz 1\n"
"nZf an 1\n"
"jPt th 1\n"
"Jgb ng 1\n"
"xBb bi 1\n"
"sjO st 1\n"
"wDx wa 1\n"
"njN an 1\n"
"ohF th 1\n"
"pqR qu 1\n"
"Fzw sz 1\n"
"qrU qu 1\n"
"cjG ch 1\n"
"kFv ka 1\n"
"zQd sz 1\n"
"vbE vi 1\n"
"Ujt th 1\n"
"qIb qu 1\n"
"cFt th 1\n"
"bvY va 1\n"
"Szq qu 1\n"
"wlH le 1\n"
"qcY ch 1\n"
"gEw ng 1\n"
"xhL th 1\n"
"kVg ng 1\n"
"bfH be 1\n"
"Nrz er 1\n"
"sJn an 1\n"
"bWn an 1\n"
"nvK an 1\n"
"qiH qu 1\n"
"qbS qu 1\n"
"vxB va 1\n"
"tvT th 1\n"
"Nrh th 1\n"
"lYx le 1\n"
"tkX th 1\n"
"Gzx sz 1\n"
"vCx vi 1\n"
"Zbj ij 1\n"
"mWp me 1\n"
"Dqx qu 1\n"
"pfE pr 1\n"
"hvW th 1\n"
"Eox on 1\n"
"dbZ de 1\n"
"lNb le 1\n"
"rTd er 1\n"
"ljQ le 1\n"
"Vvp va 1\n"
"gJw ng 1\n"
"uqW qu 1\n"
"Gjf ij 1\n"
"pDd de 1\n"
"sgQ ng 1\n"
"hkQ th 1\n"
"fJc ch 1\n"
"mdI de 1\n"
"Gcp ch 1\n"
"pXa an 1\n"
"pQj ij 1\n"
"bgE ng 1\n"
"Kzv sz 1\n"
"cPb ch 1\n"
"Hcz ch 1\n"
"djQ de 1\n"
"pGd de 1\n"
"fyE ny 1\n"
"dBb de 1\n"
"ePj er 1\n"
"fgO ng 1\n"
"xRq qu 1\n"
"xqK qu 1\n"
"pKp pr 1\n"
"xmY me 1\n"
"hgO th 1\n"
"wdG de 1\n"
"hvZ th 1\n"
"srF er 1\n"
"Bvf vi 1\n"
"yvD va 1\n"
"xVg ng 1\n"
"fYg ng 1\n"
"bqd qu 1\n"
"eFq qu 1\n"
"cwZ ch 1\n"
"cqG ch 1\n"
"sKp st 1\n"
"hJq th 1\n"
"vLd de 1\n"
"hdK th 1\n"
"pcN ch 1\n"
"tNf th 1\n"
"xlK le 1\n"
"rJx er 1\n"
"qaN an 1\n"
"zKf sz 1\n"
"sNf st 1\n"
"qPz qu 1\n"
"bzL sz 1\n"
"Jdw de 1\n"
"nRb an 1\n"
"jNs st 1\n"
"tnV th 1\n"
"ynI an 1\n"
"tZp th 1\n"
"fZp pr 1\n"
"wMq qu 1\n"
"Onq an 1\n"
"zIh th 1\n"
"bvH va 1\n"
"Uvc ch 1\n"
"zxJ sz 1\n"
"Vmq qu 1\n"
"uPm qu 1\n"
"mwD me 1\n"
"jQc ch 1\n"
"gPk ng 1\n"
"vfV va 1\n"
"Tql qu 1\n"
"bJl le 1\n"
"lwO le 1\n"
"wbG wa 1\n"
"fTd de 1\n"
"Xtq th 1\n"
"hzX th 1\n"
"Pzv sz 1\n"
"Pmx me 1\n"
"xZm me 1\n"
"jCp ij 1\n"
"bKm me 1\n"
"Tmq qu 1\n"
"Hnf an 1\n"
"kjX ij 1\n"
"vgH ng 1\n"
"fSm me 1\n"
"ylN le 1\n"
"gvq qu 1\n"
"jTz sz 1\n"
"tWw th 1\n"
"ywB wa 1\n"
"bCq qu 1\n"
"dNk de 1\n"
"yCq qu 1\n"
"Rxj ij 1\n"
"nTq an 1\n"
"gFs ng 1\n"
"Xwq qu 1\n"
"gJl ng 1\n"
"vcR ch 1\n"
"fbT be 1\n"
"Fcd ch 1\n"
"Wxm me 1\n"
"qwv qu 1\n"
"Sfh th 1\n"
"lcK ch 1\n"
"sbV st 1\n"
"fSf fo 1\n"
"lbB le 1\n"
"Ocw ch 1\n"
"jgM ng 1\n"
"nbI an 1\n"
"qsK qu 1\n"
"Xyf ny 1\n"
"pxv va 1\n"
"mRc ch 1\n"
"Ogq qu 1\n"
"zuY qu 1\n"
"fXu qu 1\n"
"Wbj ij 1\n"
"Tbw wa 1\n"
"zrR er 1\n"
"gmP ng 1\n"
"cCm ch 1\n"
"gtQ th 1\n"
"phG th 1\n"
"qjV qu 1\n"
"ygG ng 1\n"
"wFb wa 1\n"
"rqL qu 1\n"
"qSx qu 1\n"
"ybK be 1\n"
"mqJ qu 1\n"
"Qrq qu 1\n"
"qdI qu 1\n"
"bcG ch 1\n"
"iFb in 1\n"
"mcZ ch 1\n"
"vCz sz 1\n"
"xHz tz 1\n"
"hjM th 1\n"
"qtL th 1\n"
"tmH th 1\n"
"slD le 1\n"
"vRz sz 1\n"
"gCd ng 1\n"
"Xxc ch 1\n"
"qKc ch 1\n"
"sIw st 1\n"
"fsY st 1\n"
"xrJ er 1\n"
"tNs th 1\n"
"gbD ng 1\n"
"wLl le 1\n"
"hFf th 1\n"
"Nxi in 1\n"
"fRb be 1\n"
"Jrb er 1\n"
"jEq qu 1\n"
"hwM th 1\n"
"uVw qu 1\n"
"fgN ng 1\n"
"mAo on 1\n"
"Pjb ij 1\n"
"npP in 1\n"
"Jcy ch 1\n"
"yJb bi 1\n"
"jxI ij 1\n"
"Kkc ch 1\n"
"kwV ka 1\n"
"gRf ng 1\n"
"Wfm me 1\n"
"Tdp po 1\n"
"wEz sz 1\n"
"Lvk ka 1\n"
"Dqn an 1\n"
"tqL th 1\n"
"jJq qu 1\n"
"vdC de 1\n"
"hxU th 1\n"
"xUe er 1\n"
"tQc th 1\n"
"Lzk sz 1\n"
"dTj de 1\n"
"Tlz le 1\n"
"xQw wa 1\n"
"Fcq ch 1\n"
"wgE ng 1\n"
"Ckd de 1\n"
"yKs st 1\n"
"xwS wa 1\n"
"wRt th 1\n"
"gkK ng 1\n"
"hQv th 1\n"
"sLp st 1\n"
"jAi in 1\n"
"dmG de 1\n"
"jKn an 1\n"
"qUb qu 1\n"
"wXy wa 1\n"
"bzJ sz 1\n"
"gzJ ng 1\n"
"hNz th 1\n"
"ygY ng 1\n"
"qhU th 1\n"
"afX an 1\n"
"jZw ij 1\n"
"Xdx de 1\n"
"Tdx de 1\n"
"jNn an 1\n"
"vXf va 1\n"
"qcE ch 1\n"
"Mnw an 1\n"
"qDh th 1\n"
"Tdj de 1\n"
"dgJ ng 1\n"
"sdR st 1\n"
"qGn an 1\n"
"Mjj ij 1\n"
"sxH st 1\n"
"Ppz sz 1\n"
"gfV ng 1\n"
"fOy ny 1\n"
"Nvx vi 1\n"
"qaV an 1\n"
"xjl le 1\n"
"xgZ ng 1\n"
"cGv ch 1\n"
"Zxu qu 1\n"
"Mfp pr 1\n"
"zFp sz 1\n"
"jgJ ng 1\n"
"bpG pr 1\n"
"vKz sz 1\n"
"hqI th 1\n"
"Qgw ng 1\n"
"Qyy ny 1\n"
"jmI ij 1\n"
"Vgd ng 1\n"
"xCt th 1\n"
"yVs st 1\n"
"uEq qu 1\n"
"dcN ch 1\n"
"Bzb sz 1\n"
"gVl ng 1\n"
"sXg ng 1\n"
"kQf ka 1\n"
"lrY er 1\n"
"Vtd th 1\n"
"nHs an 1\n"
"wjN ij 1\n"
"rzJ er 1\n"
"sYy st 1\n"
"wxQ wa 1\n"
"Ztb th 1\n"
"tWf th 1\n"
"tCx th 1\n"
"aFb an 1\n"
"lqf qu 1\n"
"feZ er 1\n"
"fPz sz 1\n"
"cjY ch 1\n"
"wKh th 1\n"
"Qhy th 1\n"
"dCj de 1\n"
"bkH ka 1\n"
"yjD ij 1\n"
"jTs st 1\n"
"hxI th 1\n"
"lvK vi 1\n"
"Lwz sz 1\n"
"swQ st 1\n"
"dTk di 1\n"
"fsO st 1\n"
"ljE le 1\n"
"wjM ij 1\n"
"uQk qu 1\n"
"xPg ng 1\n"
"vmC va 1\n"
"qsD qu 1\n"
"gDw ng 1\n"
"wJk ka 1\n"
"Zpq qu 1\n"
"Yhg th 1\n"
"kNc ch 1\n"
"bWl le 1\n"
"Fwh th 1\n"
"fHx fo 1\n"
"Fnv an 1\n"
"fdL de 1\n"
"oqD qu 1\n"
"aYx an 1\n"
"Vqx qu 1\n"
"vKf va 1\n"
"Cbw wa 1\n"
"vyq qu 1\n"
"cqZ ch 1\n"
"Rfh th 1\n"
"Swc ch 1\n"
"qNi qu 1\n"
"qoW qu 1\n"
"jhD th 1\n"
"kJq qu 1\n"
"gdF ng 1\n"
"pvF va 1\n"
"cpV ch 1\n"
"qtC th 1\n"
"gWm ng 1\n"
"gPc ch 1\n"
"jBs st 1\n"
"rlV er 1\n"
"gZc ch 1\n"
"kTk ka 1\n"
"hfJ th 1\n"
"Svv va 1\n"
"kmG ka 1\n"
"sDq qu 1\n"
"hGb th 1\n"
"Blq qu 1\n"
"Qry er 1\n"
"hHz th 1\n"
"yLx ny 1\n"
"lqF qu 1\n"
"wbB bi 1\n"
"iYr in 1\n"
"wDz tz 1\n"
"xsJ st 1\n"
"bzY sz 1\n"
"pMw pr 1\n"
"Uuj qu 1\n"
"hxK th 1\n"
"Xvf va 1\n"
"krZ er 1\n"
"fwV wa 1\n"
"gPw ng 1\n"
"qVn an 1\n"
"Qnq an 1\n"
"gDb ng 1\n"
"hVr th 1\n"
"zKh th 1\n"
"Fxy ny 1\n"
"oZj on 1\n"
"zAy sz 1\n"
"jMm ij 1\n"
"mvI va 1\n"
"Fwm me 1\n"
"zql qu 1\n"
"eVv er 1\n"
"yWq qu 1\n"
"Lwk ka 1\n"
"Lmw me 1\n"
"vXb va 1\n"
"Xhs th 1\n"
"hlR th 1\n"
"Qqw qu 1\n"
"zbK sz 1\n"
"Pxl le 1\n"
"nPm an 1\n"
"wQo on 1\n"
"Dcb ch 1\n"
"hjT th 1\n"
"rjJ er 1\n"
"bMc ch 1\n"
"iYb in 1\n"
"Fqj qu 1\n"
"Uoq qu 1\n"
"Xvp va 1\n"
"Lwb wa 1\n"
"Jpd de 1\n"
"qUg qu 1\n"
"lJx le 1\n"
"Xwd de 1\n"
"xKf fo 1\n"
"Znq an 1\n"
"qCb qu 1\n"
"Zbz sz 1\n"
"Qux qu 1\n"
"qNq qu 1\n"
"fvV va 1\n"
"Qqz qu 1\n"
"Hdf de 1\n"
"ySx ny 1\n"
"qSm qu 1\n"
"Lhb th 1\n"
"Mvf va 1\n"
"cDp ch 1\n"
"bHq qu 1\n"
"Wmg ng 1\n"
"ytG th 1\n"
"dbJ de 1\n"
"Ffg ng 1\n"
"hvM th 1\n"
"Wqy qu 1\n"
"gXd ng 1\n"
"uFg qu 1\n"
"jpR ij 1\n"
"Xcc ch 1\n"
"Tbp pr 1\n"
"Qwq qu 1\n"
"tPp th 1\n"
"fMh th 1\n"
"qiV qu 1\n"
"dcB ch 1\n"
"dFx de 1\n"
"Ymj ij 1\n"
"Ldq qu 1\n"
"lxV le 1\n"
"cCk ch 1\n"
"hVx th 1\n"
"dlT le 1\n"
"khP th 1\n"
"qVg qu 1\n"
"Ljj ij 1\n"
"zCv sz 1\n"
"ywV wa 1\n"
"ybZ be 1\n"
"vGh th 1\n"
"Bvj ij 1\n"
"Zqq qu 1\n"
"Gwk ka 1\n"
"qLq qu 1\n"
"fkX ka 1\n"
"Nbz sz 1\n"
"bXm me 1\n"
"dQh th 1\n"
"uYd qu 1\n"
"xYs st 1\n"
"zSs st 1\n"
"ycZ ch 1\n"
"lnU an 1\n"
"tCj th 1\n"
"xnY an 1\n"
"ptQ th 1\n"
"swO st 1\n"
"hXu th 1\n"
"mBw mb 1\n"
"wmF me 1\n"
"xJx xe 1\n"
"dXj de 1\n"
"eqg qu 1\n"
"nBf an 1\n"
"Xbd de 1\n"
"fcQ ch 1\n"
"xkS ka 1\n"
"tOq th 1\n"
"uQb qu 1\n"
"cvV ch 1\n"
"sBh th 1\n"
"dCk de 1\n"
"cKv ch 1\n"
"cVf ch 1\n"
"wZx wa 1\n"
"Bvm va 1\n"
"lqJ qu 1\n"
"fxR fo 1\n"
"vmF va 1\n"
"xnq an 1\n"
"bBg ng 1\n"
"tPd th 1\n"
"fNs st 1\n"
"Fkp ka 1\n"
"Yye er 1\n"
"Ubq qu 1\n"
"xzP sz 1\n"
"fmQ me 1\n"
"qcA ch 1\n"
"yKc ch 1\n"
"xvZ va 1\n"
"cbN ch 1\n"
"yYl le 1\n"
"Pmw me 1\n"
"wFx wa 1\n"
"hRh th 1\n"
"qpS qu 1\n"
"Vqf qu 1\n"
"Ghg th 1\n"
"Wvq qu 1\n"
"xkC ka 1\n"
"ytM th 1\n"
"Lnh th 1\n"
"dxD de 1\n"
"bMw wa 1\n"
"xvU va 1\n"
"Qzx sz 1\n"
"srM er 1\n"
"vLg ng 1\n"
"cGq ch 1\n"
"Vmy me 1\n"
"hcL th 1\n"
"pKx pr 1\n"
"Jxs st 1\n"
"blW le 1\n"
"pQo on 1\n"
"bEq qu 1\n"
"fWt th 1\n"
"sYm st 1\n"
"nKw an 1\n"
"dtF th 1\n"
"kTz sz 1\n"
"epX er 1\n"
"fCp pr 1\n"
"bFk ka 1\n"
"Rzb sz 1\n"
"vqI qu 1\n"
"Zhc th 1\n"
"Hvv va 1\n"
"mVt th 1\n"
"Iwx wa 1\n"
"phR th 1\n"
"wNb wa 1\n"
"fRc ch 1\n"
"ljq qu 1\n"
"lvY le 1\n"
"jcA ch 1\n"
"dGw de 1\n"
"Cqn an 1\n"
"mBx me 1\n"
"Mmx me 1\n"
"Vxa an 1\n"
"Xhw th 1\n"
"eqK qu 1\n"
"tCw th 1\n"
"zvU sz 1\n"
"lxQ le 1\n"
"vMv va 1\n"
"gqA qu 1\n"
"Jbn an 1\n"
"gCj ng 1\n"
"oTf on 1\n"
"kbW ka 1\n"
"qjY qu 1\n"
"Rqf qu 1\n"
"hYh th 1\n"
"yhE th 1\n"
"gYj ng 1\n"
"jcI ch 1\n"
"qvJ qu 1\n"
"qoC qu 1\n"
"qFc ch 1\n"
"qqH qu 1\n"
"Nxq qu 1\n"
"wVo on 1\n"
"zHv sz 1\n"
"ybS be 1\n"
"Hwc ch 1\n"
"Mxa an 1\n"
"xkL ka 1\n"
"qmO qu 1\n"
"qbR qu 1\n"
"Zfy ny 1\n"
"Rkf ka 1\n"
"vgV ng 1\n"
"hBw th 1\n"
"pXx pr 1\n"
"brQ er 1\n"
"fvO va 1\n"
"hDc th 1\n"
"xQa an 1\n"
"wfF wa 1\n"
"hZx th 1\n"
"Jgz ng 1\n"
"qnY an 1\n"
"qXl le 1\n"
"eNb er 1\n"
"fxS fo 1\n"
"sNk st 1\n"
"mFc ch 1\n"
"Uux qu 1\n"
"Ydg ng 1\n"
"ozW on 1\n"
"Xzd de 1\n"
"Jfe er 1\n"
"Ftx th 1\n"
"vzR sz 1\n"
"wZk ka 1\n"
"oHz on 1\n"
"qvT qu 1\n"
"qoA qu 1\n"
"Sdq qu 1\n"
"txW th 1\n"
"Egf ng 1\n"
"dMf de 1\n"
"Rhh th 1\n"
"vRn an 1\n"
"ujX qu 1\n"
"fRj ij 1\n"
"gjA ng 1\n"
"gDg ng 1\n"
"smZ st 1\n"
"jId de 1\n"
"qkM qu 1\n"
"bKz sz 1\n"
"sCg ng 1\n"
"uTp qu 1\n"
"lVs le 1\n"
"uQo qu 1\n"
"Jfs st 1\n"
"vKm va 1\n"
"jQh th 1\n"
"fUf fo 1\n"
"uTf qu 1\n"
"Bnv an 1\n"
"tdU th 1\n"
"dxY de 1\n"
"hgV th 1\n"
"Zdf de 1\n"
"hqS th 1\n"
"eJg ng 1\n"
"qGu un 1\n"
"vmE va 1\n"
"gKz ng 1\n"
"mUg ng 1\n"
"Vjy ij 1\n"
"uvJ qu 1\n"
"mHr er 1\n"
"Mhv th 1\n"
"zsZ st 1\n"
"Vzy sz 1\n"
"jKb ij 1\n"
"zPp sz 1\n"
"qgD qu 1\n"
"Xhf th 1\n"
"Ogp ng 1\n"
"jwX ij 1\n"
"lYy le 1\n"
"qzD qu 1\n"
"wXj jo 1\n"
"Kpx pr 1\n"
"ydY de 1\n"
"vBq qu 1\n"
"Zpp pr 1\n"
"bDd de 1\n"
"Fjk ij 1\n"
"kdA de 1\n"
"zWt th 1\n"
"wSd de 1\n"
"kFd de 1\n"
"Sxl le 1\n"
"Fvh th 1\n"
"pbR pr 1\n"
"qrD qu 1\n"
"vZs st 1\n"
"vUm va 1\n"
"wEy wa 1\n"
"jjH jo 1\n"
"sDg ng 1\n"
"Ujc ch 1\n"
"knI an 1\n"
"fOa an 1\n"
"Cjg ng 1\n"
"tbV th 1\n"
"gqd qu 1\n"
"ePx er 1\n"
"wRm me 1\n"
"pvG va 1\n"
"Qyl le 1\n"
"cwG ch 1\n"
"Dtq th 1\n"
"Pbz sz 1\n"
"Rgq qu 1\n"
"fjU ij 1\n"
"jJf ij 1\n"
"Rxq qu 1\n"
"Jtx th 1\n"
"qvZ qu 1\n"
"kKm ka 1\n"
"hFm th 1\n"
"kcX ch 1\n"
"fNm me 1\n"
"bpB pr 1\n"
"xqY qu 1\n"
"hYy th 1\n"
"gGp ng 1\n"
"Vfs st 1\n"
"wDt th 1\n"
"bTs st 1\n"
"hfV th 1\n"
"qzp qu 1\n"
"yUv va 1\n"
"qGc ch 1\n"
"Vdl le 1\n"
"Xjt th 1\n"
"kMj ij 1\n"
"hTg th 1\n"
"Hlc ch 1\n"
"tKz th 1\n"
"Wvt th 1\n"
"lMz le 1\n"
"Mwx wa 1\n"
"Wlv le 1\n"
"xzG sz 1\n"
"gmD ng 1\n"
"zOi in 1\n"
"bbI be 1\n"
"bpI pr 1\n"
"fQg ng 1\n"
"pQv va 1\n"
"vEb va 1\n"
"jFz sz 1\n"
"Whf th 1\n"
"jvQ ij 1\n"
"qYx qu 1\n"
"rxM er 1\n"
"vPp va 1\n"
"fjD ij 1\n"
"Vwy wa 1\n"
"Yqc ch 1\n"
"tcW th 1\n"
"jYg ng 1\n"
"gJb ng 1\n"
"Tkc ch 1\n"
"qhj th 1\n"
"jxF ij 1\n"
"Fpz sz 1\n"
"kXh th 1\n"
"lgZ ng 1\n"
"znI an 1\n"
"qyN qu 1\n"
"vBj ij 1\n"
"jSx ij 1\n"
"cqI ch 1\n"
"qYv qu 1\n"
"Zrr er 1\n"
"sHr er 1\n"
"vrK er 1\n"
"pbH pr 1\n"
"zVh th 1\n"
"dQb de 1\n"
"lxF le 1\n"
"sgW ng 1\n"
"Ghf th 1\n"
"xpq qu 1\n"
"qhN th 1\n"
"Fsf st 1\n"
"Qga an 1\n"
"Rdp de 1\n"
"fvK va 1\n"
"Ydz de 1\n"
"wvW va 1\n"
"cPm ch 1\n"
"cQy ch 1\n"
"ywF wa 1\n"
"Ypq qu 1\n"
"Rsj st 1\n"
"Ygw ng 1\n"
"xVp pr 1\n"
"yxL ny 1\n"
"Ywl le 1\n"
"jMc ch 1\n"
"zTl le 1\n"
"aIq an 1\n"
"qQi qu 1\n"
"tqI th 1\n"
"Hvp va 1\n"
"wQd de 1\n"
"hfG th 1\n"
"cTd ch 1\n"
"bfQ be 1\n"
"Kfd de 1\n"
"cXs ch 1\n"
"vYx va 1\n"
"Qoc ro 1\n"
"vrL er 1\n"
"pZk ka 1\n"
"cdX ch 1\n"
"Ygn an 1\n"
"lnO an 1\n"
"mfY me 1\n"
"fnV an 1\n"
"mbZ me 1\n"
"gbE ng 1\n"
"xjZ ij 1\n"
"Fpy pr 1\n"
"npE an 1\n"
"Rxy ny 1\n"
"oWp on 1\n"
"hVh th 1\n"
"yJf ny 1\n"
"sQd st 1\n"
"Zvg ng 1\n"
"bDm me 1\n"
"pLv va 1\n"
"wwF wa 1\n"
"xBh th 1\n"
"qKm qu 1\n"
"wXx wa 1\n"
"Iux qu 1\n"
"dgB ng 1\n"
"gJp ng 1\n"
"qgx qu 1\n"
"fNh ho 1\n"
"cvE ch 1\n"
"cgH ch 1\n"
"lNs le 1\n"
"vDj ij 1\n"
"zcG ch 1\n"
"fZn on 1\n"
"uUx qu 1\n"
"clQ le 1\n"
"fdH de 1\n"
"eZj er 1\n"
"Vqc ch 1\n"
"Rcx ch 1\n"
"jGh th 1\n"
"qzM sz 1\n"
"Qpw pr 1\n"
"Spx pr 1\n"
"cGx ch 1\n"
"cqA ch 1\n"
"vbK va 1\n"
"xeW er 1\n"
"vkC ka 1\n"
"xzB sz 1\n"
"xuR qu 1\n"
"Oyq qu 1\n"
"Mqx qu 1\n"
"qqj qu 1\n"
"yqY qu 1\n"
"cwL ch 1\n"
"pPt th 1\n"
"dSx de 1\n"
"dPk de 1\n"
"uzH qu 1\n"
"fvH va 1\n"
"pcH ch 1\n"
"hlY le 1\n"
"qtX th 1\n"
"Nvs st 1\n"
"hvL th 1\n"
"zRk sz 1\n"
"tNj th 1\n"
"Dbv va 1\n"
"jKc ch 1\n"
"dKy de 1\n"
"yVz sz 1\n"
"iqJ qu 1\n"
"zgJ ng 1\n"
"eJs er 1\n"
"wOx wa 1\n"
"rXh th 1\n"
"Hqp qu 1\n"
"vWx va 1\n"
"bTt th 1\n"
"fCy ny 1\n"
"aOq an 1\n"
"oCg ng 1\n"
"pnE an 1\n"
"Fwc ch 1\n"
"zrT er 1\n"
"xHs st 1\n"
"ydX de 1\n"
"dkV de 1\n"
"Rqy qu 1\n"
"Zyq qu 1\n"
"kXl le 1\n"
"oJt th 1\n"
"sxI st 1\n"
"qZw qu 1\n"
"zqx qu 1\n"
"clZ ch 1\n"
"swX sz 1\n"
"aHw an 1\n"
"rWc ch 1\n"
"cQp ch 1\n"
"Jwj ij 1\n"
"qeV qu 1\n"
"sQj st 1\n"
"Rpb pr 1\n"
"mZq qu 1\n"
"rBx er 1\n"
"mxV me 1\n"
"Mvy ny 1\n"
"cRl ch 1\n"
"Fzv sz 1\n"
"pBs sz 1\n"
"jWs st 1\n"
"vqK qu 1\n"
"Ixl le 1\n"
"yhw th 1\n"
"wyQ wa 1\n"
"uCb qu 1\n"
"zrF sz 1\n"
"iyQ in 1\n"
"qsP qu 1\n"
"hLr er 1\n"
"cvX ch 1\n"
"Scq ch 1\n"
"zrL er 1\n"
"ecU ch 1\n"
"Vxz sz 1\n"
"fCq qu 1\n"
"ovX on 1\n"
"Uqn an 1\n"
"sVw st 1\n"
"spX st 1\n"
"Qkv ka 1\n"
"fyW ny 1\n"
"rBc ch 1\n"
"mdC de 1\n"
"Wjk ij 1\n"
"jYh th 1\n"
"hXq th 1\n"
"xkm ka 1\n"
"hhU th 1\n"
"Dvz sz 1\n"
"tcq th 1\n"
"wZy wa 1\n"
"jtC th 1\n"
"qnD an 1\n"
"vmB va 1\n"
"kjB ij 1\n"
"cdG ch 1\n"
"Vkt th 1\n"
"hNq th 1\n"
"Jft th 1\n"
"iWv in 1\n"
"Wtn th 1\n"
"lfE le 1\n"
"dZb de 1\n"
"eqQ qu 1\n"
"gUq qu 1\n"
"qwL qu 1\n"
"hUq th 1\n"
"hGc th 1\n"
"nwX an 1\n"
"Nbt th 1\n"
"jjP ij 1\n"
"sqJ qu 1\n"
"lQf le 1\n"
"jZz sz 1\n"
"wWn an 1\n"
"Mxu qu 1\n"
"qFi qu 1\n"
"mjX ij 1\n"
"vDx va 1\n"
"vDn an 1\n"
"wUc ch 1\n"
"zhU th 1\n"
"zHw sz 1\n"
"Tjl le 1\n"
"xuX qu 1\n"
"jZp ij 1\n"
"wVc ch 1\n"
"gFp ng 1\n"
"Gyq qu 1\n"
"Jlh th 1\n"
"Bkf ka 1\n"
"hhJ th 1\n"
"tvW th 1\n"
"bIy ny 1\n"
"Llg ng 1\n"
"zJz sz 1\n"
"qeQ qu 1\n"
"nlX an 1\n"
"tcQ th 1\n"
"qtU th 1\n"
"fkW ka 1\n"
"gJk ng 1\n"
"gQy ng 1\n"
"sPz st 1\n"
"bmO me 1\n"
"Ytx th 1\n"
"yqF qu 1\n"
"iBk in 1\n"
"uzV qu 1\n"
"xNp pr 1\n"
"zRz sz 1\n"
"qHq qu 1\n"
"yuY qu 1\n"
"jqh th 1\n"
"xBd de 1\n"
"vvA va 1\n"
"eVj er 1\n"
"zGp sz 1\n"
"vcB ch 1\n"
"kpH ka 1\n"
"mDw me 1\n"
"vuG qu 1\n"
"vVy ny 1\n"
"mzS sz 1\n"
"jvM ij 1\n"
"sfV st 1\n"
"hQq th 1\n"
"wTm me 1\n"
"Plq qu 1\n"
"fxJ fo 1\n"
"qQq qu 1\n"
"Fnw an 1\n"
"qJo qu 1\n"
"Nsg ng 1\n"
"Ljx ij 1\n"
"sRb st 1\n"
"pcY ch 1\n"
"vVm va 1\n"
"sQg ng 1\n"
"Ywz sz 1\n"
"hqJ th 1\n"
"sjK st 1\n"
"Zks st 1\n"
"Mjt th 1\n"
"Dwh th 1\n"
"wbN wa 1\n"
"mvK va 1\n"
"rLp er 1\n"
"Lbm me 1\n"
"wjO ij 1\n"
"lQz le 1\n"
"Kwf wa 1\n"
"qmB qu 1\n"
"Xbv va 1\n"
"cKq ch 1\n"
"hqR th 1\n"
"yVb be 1\n"
"xcF ch 1\n"
"Ewv va 1\n"
"Gpq qu 1\n"
"Gbh th 1\n"
"yHj ij 1\n"
"gXk ng 1\n"
"qOx qu 1\n"
"Kbw wa 1\n"
"qHx qu 1\n"
"wjP ij 1\n"
"jQl le 1\n"
"Ffq qu 1\n"
"oYb on 1\n"
"Fqo qu 1\n"
"wXz sz 1\n"
"fIp pr 1\n"
"pMf pr 1\n"
"nqP an 1\n"
"bbZ be 1\n"
"hsX th 1\n"
"Wjr er 1\n"
"Zqn an 1\n"
"Pxb be 1\n"
"Bzs st 1\n"
"pbI pr 1\n"
"Yvp va 1\n"
"jxM ij 1\n"
"jyZ ij 1\n"
"mzJ sz 1\n"
"vYg ng 1\n"
"qMm qu 1\n"
"fhL th 1\n"
"qOg qu 1\n"
"Mnp an 1\n"
"Ifv va 1\n"
"qYm qu 1\n"
"gxv ng 1\n"
"zfG sz 1\n"
"fqG qu 1\n"
"lLq qu 1\n"
"hkK th 1\n"
"oYk on 1\n"
"lRg le 1\n"
"lOx le 1\n"
"Vxv va 1\n"
"qAs qu 1\n"
"tKk th 1\n"
"lhF th 1\n"
"dCv de 1\n"
"wvY va 1\n"
"wiV in 1\n"
"crF ch 1\n"
"fEp pr 1\n"
"Rrl er 1\n"
"Zjy ij 1\n"
"qbY qu 1\n"
"kMw ka 1\n"
"vZi in 1\n"
"Fxi in 1\n"
"zkS sz 1\n"
"vKb va 1\n"
"zbI sz 1\n"
"uHg qu 1\n"
"qzG qu 1\n"
"jMk ij 1\n"
"Fkc ch 1\n"
"dKm de 1\n"
"nHh th 1\n"
"xGc ch 1\n"
"qpU qu 1\n"
"rcU ch 1\n"
"aWx an 1\n"
"xdS de 1\n"
"qhV th 1\n"
"aHc ch 1\n"
"vmI va 1\n"
"Wcc ch 1\n"
"zBn an 1\n"
"kQe er 1\n"
"awJ an 1\n"
"xdD de 1\n"
"yZx ny 1\n"
"Kkd de 1\n"
"wBz sz 1\n"
"lzA le 1\n"
"yyT ny 1\n"
"qeK qu 1\n"
"zpE sz 1\n"
"zFn an 1\n"
"yyG ny 1\n"
"lLw le 1\n"
"bvS va 1\n"
"mvX va 1\n"
"hlW th 1\n"
"pgX ng 1\n"
"lQt th 1\n"
"ymY me 1\n"
"mjJ ij 1\n"
"mVc ch 1\n"
"Xqs qu 1\n"
"bKr er 1\n"
"bHt th 1\n"
"jRv ij 1\n"
"Lpw pr 1\n"
"zPb sz 1\n"
"wkR ka 1\n"
"kxS ka 1\n"
"jWf ij 1\n"
"Nkx ka 1\n"
"Kcj ch 1\n"
"bJb be 1\n"
"xwZ wa 1\n"
"Rqc ch 1\n"
"Qzg ng 1\n"
"jwH ij 1\n"
"Dqd qu 1\n"
"vLf va 1\n"
"hXd th 1\n"
"cfD ch 1\n"
"sjX st 1\n"
"hzI th 1\n"
"qUd qu 1\n"
"tSx th 1\n"
"hxA th 1\n"
"gxK ng 1\n"
"hVm th 1\n"
"yzX sz 1\n"
"Ucs ch 1\n"
"qaH an 1\n"
"Yfy ny 1\n"
"sJg ng 1\n"
"iHp in 1\n"
"iyC in 1\n"
"Tjf ij 1\n"
"dJp de 1\n"
"Jgv ng 1\n"
"uJf qu 1\n"
"nNl an 1\n"
"zdA sz 1\n"
"xIq qu 1\n"
"qjK qu 1\n"
"vzY sz 1\n"
"wqv qu 1\n"
"Xvx va 1\n"
"fJr er 1\n"
"nqH an 1\n"
"qGd qu 1\n"
"vQg ng 1\n"
"iQz in 1\n"
"tLn th 1\n"
"lVj le 1\n"
"vqW qu 1\n"
"zrN er 1\n"
"xKz sz 1\n"
"waV an 1\n"
"Ydq qu 1\n"
"dkq qu 1\n"
"fCn an 1\n"
"Xcy ch 1\n"
"pIl le 1\n"
"hXl th 1\n"
"aFs an 1\n"
"iwM in 1\n"
"Gwx wa 1\n"
"Xlp le 1\n"
"Qfu qu 1\n"
"jqE qu 1\n"
"lqP qu 1\n"
"kVq qu 1\n"
"xqJ qu 1\n"
"Mzf sz 1\n"
"mNw me 1\n"
"Wsv st 1\n"
"fnM an 1\n"
"uSf qu 1\n"
"hCf th 1\n"
"zjH sz 1\n"
"mTs st 1\n"
"jWz sz 1\n"
"Dxk ka 1\n"
"Ztd th 1\n"
"Rvv va 1\n"
"gBx ng 1\n"
"Lzx sz 1\n"
"ezU er 1\n"
"jqH qu 1\n"
"Rjh th 1\n"
"Dcg ch 1\n"
"bBh th 1\n"
"fhO th 1\n"
"hpH th 1\n"
"Zqa an 1\n"
"kCx ka 1\n"
"rRv er 1\n"
"dkZ de 1\n"
"Ggx ng 1\n"
"pQh th 1\n"
"Gcv ch 1\n"
"Scg ch 1\n"
"vDb va 1\n"
"pbD pr 1\n"
"vEh th 1\n"
"vlE le 1\n"
"Rjl le 1\n"
"lFw le 1\n"
"zqN qu 1\n"
"aPq an 1\n"
"gjD ng 1\n"
"jcE ch 1\n"
"wSw wa 1\n"
"Dgj ng 1\n"
"huZ th 1\n"
"gPv ng 1\n"
"pJj ij 1\n"
"cQh th 1\n"
"mwq qu 1\n"
"vpA va 1\n"
"hGf th 1\n"
"cXz ch 1\n"
"Lcb ch 1\n"
"fJm me 1\n"
"Qzy sz 1\n"
"zQm sz 1\n"
"Hhn th 1\n"
"xdY de 1\n"
"uYl qu 1\n"
"Xkj ij 1\n"
"jvA ij 1\n"
"Jvp va 1\n"
"iwZ in 1\n"
"zkq qu 1\n"
"Nhb th 1\n"
"kmV ka 1\n"
"qKd qu 1\n"
"Bcq ch 1\n"
"pfY pr 1\n"
"qUj qu 1\n"
"gqR qu 1\n"
"gwO ng 1\n"
"gXm ng 1\n"
"jHh th 1\n"
"rBn an 1\n"
"uPw qu 1\n"
"pJk ka 1\n"
"Ipj ij 1\n"
"yqM qu 1\n"
"Yqn an 1\n"
"Kbz sz 1\n"
"vfL va 1\n"
"npZ an 1\n"
"oqY qu 1\n"
"Zqf qu 1\n"
"jzU sz 1\n"
"vNx va 1\n"
"hXf th 1\n"
"fCg ng 1\n"
"nzJ an 1\n"
"mKj ij 1\n"
"wmB me 1\n"
"Wjq qu 1\n"
"Dbq qu 1\n"
"zXy sz 1\n"
"xYw wa 1\n"
"fQf fo 1\n"
"dqP qu 1\n"
"Kxq qu 1\n"
"jdZ de 1\n"
"qrX qu 1\n"
"Lxb be 1\n"
"yfL ny 1\n"
"yYm me 1\n"
"sbH st 1\n"
"wlV le 1\n"
"uKp qu 1\n"
"hhN th 1\n"
"Xxq qu 1\n"
"jLg ng 1\n"
"nQh th 1\n"
"Wqp qu 1\n"
"Nqd qu 1\n"
"jfD ij 1\n"
"Jnq an 1\n"
"Bzn an 1\n"
"mJr er 1\n"
"qaX an 1\n"
"pJw pr 1\n"
"jHz sz 1\n"
"yaX an 1\n"
"Whs th 1\n"
"hYr th 1\n"
"tmS th 1\n"
"Fhy th 1\n"
"Ggd ng 1\n"
"Xmy me 1\n"
"Rqh th 1\n"
"Fsn an 1\n"
"qhA th 1\n"
"fhX th 1\n"
"Hqx qu 1\n"
"wIo on 1\n"
"Ibx be 1\n"
"cFx ch 1\n"
"dRg ng 1\n"
"snV an 1\n"
"kqz qu 1\n"
"eqO er 1\n"
"Gkz sz 1\n"
"Nnz an 1\n"
"yqE qu 1\n"
"cJh th 1\n"
"xvA va 1\n"
"qMx qu 1\n"
"dwS de 1\n"
"yAj ij 1\n"
"xCq qu 1\n"
"gmE ng 1\n"
"bhP th 1\n"
"rwE er 1\n"
"Xnz an 1\n"
"Uhw th 1\n"
"xnR an 1\n"
"nfZ an 1\n"
"Qpx pr 1\n"
"qxO qu 1\n"
"lGt th 1\n"
"qRc ch 1\n"
"Rwx wa 1\n"
"tcM th 1\n"
"fBd de 1\n"
"Rjc ch 1\n"
"dfY de 1\n"
"hhR th 1\n"
"bCj ij 1\n"
"fqL qu 1\n"
"lzS le 1\n"
"Lrm er 1\n"
"eqE qu 1\n"
"vgL ng 1\n"
"wQr er 1\n"
"bwB wa 1\n"
"lGf le 1\n"
"Nwq qu 1\n"
"sdU st 1\n"
"Zxv va 1\n"
"yDm me 1\n"
"Lsw st 1\n"
"cNq ch 1\n"
"Dqc ch 1\n"
"vLz sz 1\n"
"dWv de 1\n"
"fkQ ka 1\n"
"zjD sz 1\n"
"yYv va 1\n"
"qeT qu 1\n"
"cvL ch 1\n"
"wkA ka 1\n"
"Nvb va 1\n"
"djM de 1\n"
"hgK th 1\n"
"pXb pr 1\n"
"Tlw le 1\n"
"Rhz ha 1\n"
"wkP ka 1\n"
"wDk ka 1\n"
"eFc ch 1\n"
"ehU th 1\n"
"Xly le 1\n"
"wxK wa 1\n"
"dPw de 1\n"
"sFd st 1\n"
"vcI ch 1\n"
"Fxd de 1\n"
"fvR va 1\n"
"jqs qu 1\n"
"rMj er 1\n"
"qbW qu 1\n"
"kpP ka 1\n"
"Bvw va 1\n"
"Tmk ka 1\n"
"hbP th 1\n"
"hMx th 1\n"
"jgL ng 1\n"
"efU er 1\n"
"cQb ch 1\n"
"mcA ch 1\n"
"Ewq qu 1\n"
"xmV me 1\n"
"Qcq ch 1\n"
"mzG sz 1\n"
"pKm me 1\n"
"Fwq qu 1\n"
"lRn an 1\n"
"jPk ij 1\n"
"jMb ij 1\n"
"mzO sz 1\n"
"oFw on 1\n"
"hJb th 1\n"
"sVq qu 1\n"
"iVz in 1\n"
"oqU qu 1\n"
"bhW th 1\n"
"Oxq qu 1\n"
"mQk ka 1\n"
"Xfb be 1\n"
"cNw ch 1\n"
"fgZ ng 1\n"
"Tvf va 1\n"
"sIx st 1\n"
"uZs qu 1\n"
"xzX sz 1\n"
"Ylq qu 1\n"
"oHf on 1\n"
"csU ch 1\n"
"Qzs st 1\n"
"Bfq qu 1\n"
"yJn an 1\n"
"pgQ ng 1\n"
"wxk ka 1\n"
"Tnw an 1\n"
"bKx be 1\n"
"bqX qu 1\n"
"Qjs st 1\n"
"pFh th 1\n"
"Xvl le 1\n"
"kfB ka 1\n"
"mZl le 1\n"
"Csg ng 1\n"
"vrJ er 1\n"
"Gfy ny 1\n"
"jbP ij 1\n"
"Yvl le 1\n"
"Hxb be 1\n"
"lrD er 1\n"
"qTl qu 1\n"
"aBc ch 1\n"
"fGb be 1\n"
"mhS th 1\n"
"zTp sz 1\n"
"kRd de 1\n"
"Wph th 1\n"
"Npj ij 1\n"
"lwS le 1\n"
"mGm me 1\n"
"nqT an 1\n"
"Ujn an 1\n"
"xjO ij 1\n"
"dMz sz 1\n"
"wKj ij 1\n"
"yZr er 1\n"
"Njb ij 1\n"
"Ylr er 1\n"
"mVf me 1\n"
"gZg ng 1\n"
"Hcb ch 1\n"
"xcB ch 1\n"
"kMm ka 1\n"
"lwC le 1\n"
"Dnf an 1\n"
"hjW th 1\n"
"rTk er 1\n"
"Vzj sz 1\n"
"Vxy ny 1\n"
"wlQ le 1\n"
"Nrv er 1\n"
"pjP ij 1\n"
"fwZ wa 1\n"
"tnW th 1\n"
"oJw on 1\n"
"kJx ka 1\n"
"Vpj ij 1\n"
"qAw qu 1\n"
"Qht th 1\n"
"bCn an 1\n"
"vrU er 1\n"
"hRc th 1\n"
"clC ch 1\n"
"rFd er 1\n"
"twH th 1\n"
"kCw ka 1\n"
"mSd de 1\n"
"Xnw an 1\n"
"fXm me 1\n"
"Twf wa 1\n"
"Fwj ij 1\n"
"bjJ ij 1\n"
"lbQ le 1\n"
"kvS ka 1\n"
"Smz sz 1\n"
"fBp pr 1\n"
"Nzz sz 1\n"
"bQp pr 1\n"
"vLx va 1\n"
"hVf th 1\n"
"yUj ij 1\n"
"cZd ch 1\n"
"gIy eg 1\n"
"hVq th 1\n"
"aQx an 1\n"
"Qfv va 1\n"
"lKb le 1\n"
"zhN th 1\n"
"Zbm me 1\n"
"Gcq ch 1\n"
"gbT ng 1\n"
"pYk ka 1\n"
"Xvd de 1\n"
"xMl le 1\n"
"uHb qu 1\n"
"bXf be 1\n"
"sNc ch 1\n"
"qVy qu 1\n"
"cpO ch 1\n"
"Fgb ng 1\n"
"eWl er 1\n"
"kKd de 1\n"
"Cbj ij 1\n"
"mfH me 1\n"
"qIa an 1\n"
"sfX st 1\n"
"snH an 1\n"
"Hjg ng 1\n"
"Lmf me 1\n"
"xgf ng 1\n"
"Evw va 1\n"
"wOk ka 1\n"
"Hjf ij 1\n"
"zuJ qu 1\n"
"fZm me 1\n"
"lNq qu 1\n"
"xUg ng 1\n"
"nLs an 1\n"
"jkS ij 1\n"
"Gvp va 1\n"
"jPd de 1\n"
"ywQ wa 1\n"
"qrG qu 1\n"
"bbH be 1\n"
"ghJ th 1\n"
"mMh th 1\n"
"Yvt th 1\n"
"xLq qu 1\n"
"Bdq qu 1\n"
"zJd sz 1\n"
"xRs st 1\n"
"vgP ng 1\n"
"Hhb th 1\n"
"npL an 1\n"
"vFp va 1\n"
"hSj th 1\n"
"bdC de 1\n"
"kGg ng 1\n"
"kVf ka 1\n"
"qvP qu 1\n"
"kwO ka 1\n"
"Jqt th 1\n"
"zWx sz 1\n"
"sQk st 1\n"
"hnV th 1\n"
"rrD er 1\n"
"jVh th 1\n"
"vvY va 1\n"
"bfI be 1\n"
"fSz sz 1\n"
"Czf sz 1\n"
"kWl le 1\n"
"jJc ch 1\n"
"Gwj ij 1\n"
"lFh th 1\n"
"Vpf fo 1\n"
"fkV ka 1\n"
"cYj ch 1\n"
"mrW er 1\n"
"hBb th 1\n"
"hJx th 1\n"
"wIq qu 1\n"
"cdA ch 1\n"
"wQy wa 1\n"
"wCq qu 1\n"
"wqZ qu 1\n"
"jfX ij 1\n"
"jtG th 1\n"
"xkJ ka 1\n"
"Qzf sz 1\n"
"gKs ng 1\n"
"Qzo on 1\n"
"bwI wa 1\n"
"Tsb st 1\n"
"vvX va 1\n"
"jlR le 1\n"
"qlQ qu 1\n"
"dbX de 1\n"
"Hfc ch 1\n"
"Bsj st 1\n"
"Yqk qu 1\n"
"Xnc ch 1\n"
"bzZ sz 1\n"
"dGt th 1\n"
"Xgg ng 1\n"
"jwE wa 1\n"
"Oyc ch 1\n"
"pQd de 1\n"
"jRy ij 1\n"
"pmX me 1\n"
"lZx le 1\n"
"gFq qu 1\n"
"mJd de 1\n"
"sKq qu 1\n"
"Ikj ij 1\n"
"zkG sz 1\n"
"wGf wa 1\n"
"qRp qu 1\n"
"xDn an 1\n"
"gvL ng 1\n"
"mGx me 1\n"
"iIj in 1\n"
"Gzd sz 1\n"
"bLx be 1\n"
"jUl le 1\n"
"Qvc ch 1\n"
"mVh th 1\n"
"uhF th 1\n"
"fVk ka 1\n"
"cnM ch 1\n"
"uFh th 1\n"
"mXf me 1\n"
"rCb er 1\n"
"nLw an 1\n"
"vfH fo 1\n"
"iqV qu 1\n"
"qhD th 1\n"
"sHx st 1\n"
"Ywy wa 1\n"
"mDx me 1\n"
"cBt th 1\n"
"Bmq qu 1\n"
"xRc ch 1\n"
"bSz sz 1\n"
"vCj ij 1\n"
"Tcv ch 1\n"
"aZq an 1\n"
"Jcx ch 1\n"
"nbF an 1\n"
"Qzb sz 1\n"
"vkQ ka 1\n"
"hzD th 1\n"
"xHp pr 1\n"
"hqX th 1\n"
"fEv va 1\n"
"yjF ij 1\n"
"Pjk ij 1\n"
"sfU st 1\n"
"bGc ch 1\n"
"mcX ch 1\n"
"pXc ch 1\n"
"yvS va 1\n"
"pMl le 1\n"
"wJs st 1\n"
"Vwq qu 1\n"
"yCw wa 1\n"
"qds qu 1\n"
"rRj er 1\n"
"Qhv th 1\n"
"ucG ch 1\n"
"oEh th 1\n"
"wQi in 1\n"
"lSg ng 1\n"
"Lqt th 1\n"
"nlH an 1\n"
"uqG qu 1\n"
"Oao an 1\n"
"hlX th 1\n"
"fPw wa 1\n"
"tIb th 1\n"
"zIq qu 1\n"
"qmG qu 1\n"
"xJm me 1\n"
"Vgw ng 1\n"
"Ukx ka 1\n"
"ztH th 1\n"
"lhP th 1\n"
"Jtk th 1\n"
"Hzd sz 1\n"
"yxQ ny 1\n"
"nrP an 1\n"
"fHh th 1\n"
"Yct th 1\n"
"Gqa an 1\n"
"Fgy ng 1\n"
"oBn an 1\n"
"vuC qu 1\n"
"Bnz an 1\n"
"vPu qu 1\n"
"xFf fo 1\n"
"jdJ de 1\n"
"fGf fo 1\n"
"Yjq qu 1\n"
"Qjp ij 1\n"
"xTj ij 1\n"
"vOq qu 1\n"
"vLw va 1\n"
"sMf st 1\n"
"oVl on 1\n"
"cwN ch 1\n"
"sgR ng 1\n"
"jjQ ij 1\n"
"wzR sz 1\n"
"zhY th 1\n"
"vbR va 1\n"
"wgW ng 1\n"
"qwX qu 1\n"
"Nxw wa 1\n"
"eQo er 1\n"
"mQp me 1\n"
"Kqh th 1\n"
"tvA th 1\n"
"dlJ le 1\n"
"yVx ny 1\n"
"sPf st 1\n"
"dQz sz 1\n"
"sZb st 1\n"
"zhS th 1\n"
"kWb ka 1\n"
"mqP qu 1\n"
"Ffk ka 1\n"
"xql qu 1\n"
"gqH qu 1\n"
"Tly le 1\n"
"kpL ka 1\n"
"qEg qu 1\n"
"bMg ng 1\n"
"xRj ij 1\n"
"xsC st 1\n"
"jlS le 1\n"
"lzM le 1\n"
"Pfb be 1\n"
"uJv qu 1\n"
"yVf ny 1\n"
"Zgq qu 1\n"
"xbS be 1\n"
"oFh th 1\n"
"xvb va 1\n"
"hcU th 1\n"
"wwU wa 1\n"
"yCg ng 1\n"
"mPz sz 1\n"
"sJd st 1\n"
"bmN me 1\n"
"uVc ch 1\n"
"qdS qu 1\n"
"Vwp pr 1\n"
"Vml le 1\n"
"Hqy qu 1\n"
"Lfz sz 1\n"
"Ayj ij 1\n"
"yxK ny 1\n"
"Hwv va 1\n"
"gIp ng 1\n"
"Zgt th 1\n"
"Xtw th 1\n"
"hLf th 1\n"
"Nkd de 1\n"
"jMs st 1\n"
"xFt th 1\n"
"xBw wa 1\n"
"wHd de 1\n"
"Qzz sz 1\n"
"gYt th 1\n"
"Pvk ka 1\n"
"pvY va 1\n"
"Jxt th 1\n"
"ugQ qu 1\n"
"Lqq qu 1\n"
"xlL le 1\n"
"wMb wa 1\n"
"Sbz sz 1\n"
"vEv va 1\n"
"qfz qu 1\n"
"gcS ch 1\n"
"tCq th 1\n"
"yHp pr 1\n"
"zkF sz 1\n"
"xuK qu 1\n"
"Tbf be 1\n"
"Ipg ng 1\n"
"Yzk sz 1\n"
"Qwz sz 1\n"
"pFj ij 1\n"
"jPm ij 1\n"
"Dpq qu 1\n"
"pJz sz 1\n"
"wpN pr 1\n"
"wzE sz 1\n"
"gqD qu 1\n"
"Xwm me 1\n"
"oQx on 1\n"
"lCp le 1\n"
"Mhk th 1\n"
"dTq qu 1\n"
"xUw wa 1\n"
"hgE th 1\n"
"gcB ch 1\n"
"hpJ th 1\n"
"mqK qu 1\n"
"gBn an 1\n"
"hIv th 1\n"
"lqD qu 1\n"
"wPx wa 1\n"
"sMt th 1\n"
"yXw wa 1\n"
"jKq qu 1\n"
"Lrz er 1\n"
"Hwj ij 1\n"
"yfW ny 1\n"
"Yyu qu 1\n"
"qYs qu 1\n"
"yvR va 1\n"
"sRz st 1\n"
"Kyx ny 1\n"
"nxR an 1\n"
"cdJ ch 1\n"
"Nwc ch 1\n"
"tbE th 1\n"
"oeZ er 1\n"
"bcQ ch 1\n"
"Swb wa 1\n"
"Ikq qu 1\n"
"Bvz sz 1\n"
"zhF th 1\n"
"Xqy qu 1\n"
"kKb ka 1\n"
"Wdk de 1\n"
"wpP pr 1\n"
"kQy ka 1\n"
"Bqe qu 1\n"
"qfZ qu 1\n"
"pPw pr 1\n"
"Aoh th 1\n"
"plJ le 1\n"
"Ynv an 1\n"
"jMh th 1\n"
"bQg ng 1\n"
"afM an 1\n"
"jvO ij 1\n"
"eHf er 1\n"
"hQg th 1\n"
"kqY qu 1\n"
"zJq qu 1\n"
"pYh th 1\n"
"qeM qu 1\n"
"Kpk ka 1\n"
"kfW ka 1\n"
"Wds st 1\n"
"bNc ch 1\n"
"vBx va 1\n"
"suJ qu 1\n"
"qEx qu 1\n"
"rfZ er 1\n"
"oHg ng 1\n"
"eFw er 1\n"
"fPp pr 1\n"
"kDb ka 1\n"
"tZn th 1\n"
"dcK ch 1\n"
"yWv va 1\n"
"Uxv va 1\n"
"yQe er 1\n"
"Zjq qu 1\n"
"Wjv ij 1\n"
"ygO ng 1\n"
"ojQ on 1\n"
"Kwc ch 1\n"
"pFg ng 1\n"
"sMd st 1\n"
"Mfq qu 1\n"
"Mzy sz 1\n"
"Nwp pr 1\n"
"ywT wa 1\n"
"wLq qu 1\n"
"Hqm qu 1\n"
"qsC qu 1\n"
"bNn an 1\n"
"bUv va 1\n"
"nRc ch 1\n"
"Rlk le 1\n"
"Bqp qu 1\n"
"cfI ch 1\n"
"mVq qu 1\n"
"qGj qu 1\n"
"vlX le 1\n"
"kfG ka 1\n"
"wVd de 1\n"
"cdE ch 1\n"
"hzE th 1\n"
"Dhv th 1\n"
"bzj sz 1\n"
"vvL va 1\n"
"bzQ sz 1\n"
"wVb wa 1\n"
"Zxl le 1\n"
"zLw sz 1\n"
"hTq th 1\n"
"Vqp qu 1\n"
"hmW th 1\n"
"flD le 1\n"
"Kcd ch 1\n"
"pDq qu 1\n"
"kvY ka 1\n"
"cQl ch 1\n"
"Ixk ka 1\n"
"sGf st 1\n"
"gFh th 1\n"
"Rkd de 1\n"
"qHl qu 1\n"
"rCg ng 1\n"
"qBn an 1\n"
"sJw st 1\n"
"cWj ch 1\n"
"zXp sz 1\n"
"Hhl th 1\n"
"hjP th 1\n"
"qlZ qu 1\n"
"Hxr er 1\n"
"zrE er 1\n"
"gkH ng 1\n"
"uHk qu 1\n"
"Gzm sz 1\n"
"cBc ch 1\n"
"zff sz 1\n"
"zLs st 1\n"
"Uqy qu 1\n"
"vkD ka 1\n"
"fqX qu 1\n"
"hLj th 1\n"
"fYu qu 1\n"
"jKw ij 1\n"
"jIb ij 1\n"
"nrU an 1\n"
"fFp pr 1\n"
"sbC st 1\n"
"mGv va 1\n"
"fXp pr 1\n"
"Pkv ka 1\n"
"Cqe qu 1\n"
"cCx ch 1\n"
"rNq qu 1\n"
"Zwf wa 1\n"
"Jgc ch 1\n"
"xlQ le 1\n"
"gBz ng 1\n"
"cIx ch 1\n"
"odQ on 1\n"
"Qnz an 1\n"
"Uzx sz 1\n"
"Jpt th 1\n"
"gxX ng 1\n"
"Zkd de 1\n"
"Xkk ka 1\n"
"hRv th 1\n"
"ycV ch 1\n"
"zMm sz 1\n"
"eBq qu 1\n"
"gHd ng 1\n"
"bxU be 1\n"
"xdK de 1\n"
"mQc ch 1\n"
"tYj th 1\n"
"hlF th 1\n"
"cRz ch 1\n"
"lGz le 1\n"
"zFz ze 1\n"
"qOp qu 1\n"
"Ggc ch 1\n"
"oGm on 1\n"
"Xnp an 1\n"
"wYg ng 1\n"
"wuJ qu 1\n"
"sNs st 1\n"
"zqU qu 1\n"
"kCp ka 1\n"
"Whw th 1\n"
"nQx an 1\n"
"vwA va 1\n"
"Vcg ch 1\n"
"kWj ij 1\n"
"Hqd qu 1\n"
"Cpy pr 1\n"
"zcL ch 1\n"
"cfF ch 1\n"
"kXn an 1\n"
"aXj an 1\n"
"Swk ka 1\n"
"fhq th 1\n"
"Vxi in 1\n"
"Gqu un 1\n"
"Uxd de 1\n"
"zdK sz 1\n"
"hZq th 1\n"
"mwJ me 1\n"
"cvD ch 1\n"
"lbZ le 1\n"
"Pzl le 1\n"
"hdO th 1\n"
"hJn th 1\n"
"qWp qu 1\n"
"dXy de 1\n"
"fuU qu 1\n"
"fXy ny 1\n"
"xnL an 1\n"
"gMf ng 1\n"
"rNf er 1\n"
"xQh th 1\n"
"kqH qu 1\n"
"rFz er 1\n"
"vpT va 1\n"
"Nwy wa 1\n"
"yqA qu 1\n"
"vhO th 1\n"
"kVh th 1\n"
"nYb an 1\n"
"jvN ij 1\n"
"bIf be 1\n"
"qqS qu 1\n"
"jbF ij 1\n"
"gMk ng 1\n"
"bTd de 1\n"
"Rhd th 1\n"
"tWq th 1\n"
"gLz ng 1\n"
"fsD st 1\n"
"uMt th 1\n"
"yHq qu 1\n"
"Xgj ng 1\n"
"Lmm me 1\n"
"vkU ka 1\n"
"lAx le 1\n"
"Kzd sz 1\n"
"hKm th 1\n"
"kQd de 1\n"
"gFc ch 1\n"
"wyX wa 1\n"
"zfU sz 1\n"
"xpU pr 1\n"
"ywJ wa 1\n"
"Ayq qu 1\n"
"gIu qu 1\n"
"zuQ qu 1\n"
"Vfn an 1\n"
"vBn an 1\n"
"Hty th 1\n"
"gRv ng 1\n"
"pTb pr 1\n"
"Uqx qu 1\n"
"vTn an 1\n"
"vJc ch 1\n"
"Uiw in 1\n"
"Jlp le 1\n"
"zPq qu 1\n"
"rCx er 1\n"
"lqS qu 1\n"
"zlZ le 1\n"
"zOw sz 1\n"
"klK le 1\n"
"kfQ ka 1\n"
"uJx qu 1\n"
"pkP ka 1\n"
"Gqz qu 1\n"
"Jlc ch 1\n"
"yyD ny 1\n"
"jhX th 1\n"
"crV ch 1\n"
"Dww wa 1\n"
"yjw ij 1\n"
"qpX qu 1\n"
"Qmd de 1\n"
"yWz sz 1\n"
"wPd de 1\n"
"Uqk qu 1\n"
"nbR an 1\n"
"Ydc ch 1\n"
"qQl qu 1\n"
"pmD me 1\n"
"Jkj ka 1\n"
"jTk ka 1\n"
"wYf wa 1\n"
"Zzx sz 1\n"
"rkQ er 1\n"
"bDp pr 1\n"
"qSs qu 1\n"
"gXr ng 1\n"
"cZb ch 1\n"
"Ngp ng 1\n"
"hqQ th 1\n"
"Wvw va 1\n"
"Wbw wa 1\n"
"wvK va 1\n"
"cJf ch 1\n"
"Mwd de 1\n"
"ddJ de 1\n"
"iwE in 1\n"
"bxX be 1\n"
"jxT ij 1\n"
"Ycn ch 1\n"
"wMf wa 1\n"
"bqD qu 1\n"
"yqI qu 1\n"
"dRj de 1\n"
"wYy wa 1\n"
"Txz sz 1\n"
"vrN er 1\n"
"qVu un 1\n"
"mRj ij 1\n"
"Fjx ij 1\n"
"fyQ ny 1\n"
"xeI er 1\n"
"Wqf qu 1\n"
"Jly le 1\n"
"jDb ij 1\n"
"Yzu qu 1\n"
"Bxm me 1\n"
"wLj ij 1\n"
"bqc ch 1\n"
"sgK ng 1\n"
"kqW qu 1\n"
"Zsn an 1\n"
"Fqq qu 1\n"
"rXz er 1\n"
"lJq qu 1\n"
"jEh th 1\n"
"nCb an 1\n"
"Xrd er 1\n"
"Rzh th 1\n"
"gfW ng 1\n"
"Xtl th 1\n"
"mTx me 1\n"
"ufA qu 1\n"
"wjQ ij 1\n"
"xlW le 1\n"
"dqH qu 1\n"
"xhM th 1\n"
"Xwt th 1\n"
"dnW an 1\n"
"Rfz sz 1\n"
"fKp pr 1\n"
"kFw ka 1\n"
"Quv qu 1\n"
"mXw me 1\n"
"Vkw ka 1\n"
"tFh ch 1\n"
"hIu th 1\n"
"lTf le 1\n"
"Mwv va 1\n"
"wvT va 1\n"
"kKp ka 1\n"
"tRv th 1\n"
"wXo on 1\n"
"vzL sz 1\n"
"Jcf ch 1\n"
"Tbq qu 1\n"
"jdQ de 1\n"
"Rbx be 1\n"
"Jrm er 1\n"
"sRj st 1\n"
"zWz sz 1\n"
"qnE an 1\n"
"Kcf ch 1\n"
"Qqm qu 1\n"
"fpI pr 1\n"
"iNw in 1\n"
"ujE qu 1\n"
"qHv qu 1\n"
"Jvx va 1\n"
"hHc th 1\n"
"fvJ va 1\n"
"nqY an 1\n"
"wpE wa 1\n"
"Hws st 1\n"
"xzI sz 1\n"
"Cgg ng 1\n"
"cWd ch 1\n"
"quV un 1\n"
"bjN ij 1\n"
"xQp pr 1\n"
"bxE be 1\n"
"uVk qu 1\n"
"Wrl er 1\n"
"Lrx er 1\n"
"Iwl le 1\n"
"aqB an 1\n"
"Vcp ch 1\n"
"Wwt th 1\n"
"aGx an 1\n"
"fPn an 1\n"
"mFq qu 1\n"
"qgd qu 1\n"
"Zsd st 1\n"
"Vxs sz 1\n"
"Khq th 1\n"
"wSs st 1\n"
"oGq qu 1\n"
"Yzv sz 1\n"
"dqX qu 1\n"
"mpQ me 1\n"
"Kcp ch 1\n"
"swD st 1\n"
"rZg ng 1\n"
"jYm ij 1\n"
"uJl qu 1\n"
"vWv va 1\n"
"svO st 1\n"
"pFd de 1\n"
"Yjx ij 1\n"
"tpI th 1\n"
"dVt th 1\n"
"sNm st 1\n"
"lKt th 1\n"
"nvU an 1\n"
"Hxf fo 1\n"
"puW qu 1\n"
"wJg ng 1\n"
"gxR ng 1\n"
"fAg ng 1\n"
"Yqe qu 1\n"
"Pwz sz 1\n"
"hmC th 1\n"
"ylJ le 1\n"
"mqT qu 1\n"
"cCf ch 1\n"
"pZg ng 1\n"
"aFx an 1\n"
"oYq qu 1\n"
"fPj ij 1\n"
"dJt th 1\n"
"xwn an 1\n"
"Ccb ch 1\n"
"wFn an 1\n"
"wrY er 1\n"
"Cdh th 1\n"
"hLc th 1\n"
"Zxg ng 1\n"
"Mxc ch 1\n"
"hcY th 1\n"
"zVw sz 1\n"
"hkV th 1\n"
"txE th 1\n"
"yvT va 1\n"
"Mlw le 1\n"
"ztF th 1\n"
"fGd de 1\n"
"zjE sz 1\n"
"gjM ng 1\n"
"jwP ij 1\n"
"Kxt th 1\n"
"yFg ng 1\n"
"Wcg ch 1\n"
"thZ ch 1\n"
"hzQ th 1\n"
"Jtg th 1\n"
"yvK va 1\n"
"zVz sz 1\n"
"Pwb wa 1\n"
"xqD qu 1\n"
"uyQ qu 1\n"
"gCm ng 1\n"
"zjU sz 1\n"
"xGq qu 1\n"
"Mqy qu 1\n"
"Ocx ch 1\n"
"sqM qu 1\n"
"lRb le 1\n"
"tfU th 1\n"
"vZg ng 1\n"
"fZc ch 1\n"
"gpZ ng 1\n"
"Fpf pr 1\n"
"qtQ th 1\n"
"mhZ th 1\n"
"bqF qu 1\n"
"fgG ng 1\n"
"woT on 1\n"
"zSb sz 1\n"
"wxS wa 1\n"
"Wrf er 1\n"
"Oqk qu 1\n"
"xLc ch 1\n"
"Qzj sz 1\n"
"wXk ka 1\n"
"tdX th 1\n"
"Jqc ch 1\n"
"fXk ka 1\n"
"kBd de 1\n"
"iqW qu 1\n"
"Ocb ch 1\n"
"fUo on 1\n"
"jXk ij 1\n"
"hbI th 1\n"
"Zcg ch 1\n"
"zwS wa 1\n"
"cVm ch 1\n"
"vwj ij 1\n"
"gwG ng 1\n"
"zsM st 1\n"
"Pqo qu 1\n"
"hPj th 1\n"
"fwG wa 1\n"
"Xwh th 1\n"
"Wwh th 1\n"
"Vqw qu 1\n"
"vmY va 1\n"
"uvF qu 1\n"
"tfK th 1\n"
"Xbg ng 1\n"
"Nfn an 1\n"
"wpH pr 1\n"
"yJq qu 1\n"
"wqO qu 1\n"
"ncV ch 1\n"
"wgM ng 1\n"
"fQk ka 1\n"
"hvK th 1\n"
"qLr qu 1\n"
"Wce ch 1\n"
"kFn an 1\n"
"rBm er 1\n"
"mdV de 1\n"
"jFc ch 1\n"
"knX an 1\n"
"nMf an 1\n"
"sCc ch 1\n"
"pCq qu 1\n"
"uJt th 1\n"
"Cfk ka 1\n"
"Cxb be 1\n"
"fOw wa 1\n"
"aJz an 1\n"
"gLt th 1\n"
"bmX me 1\n"
"Yfo on 1\n"
"dJf de 1\n"
"Eay an 1\n"
"qSd qu 1\n"
"mjQ ij 1\n"
"pNk ka 1\n"
"Nvh th 1\n"
"xkX ka 1\n"
"Jwx wa 1\n"
"jvL ij 1\n"
"fpH pr 1\n"
"pxO pr 1\n"
"vPx va 1\n"
"dWu qu 1\n"
"hbR th 1\n"
"woE on 1\n"
"gtX th 1\n"
"bfF be 1\n"
"mvW va 1\n"
"xsM st 1\n"
"wLv va 1\n"
"wHh th 1\n"
"sCn an 1\n"
"pLw pr 1\n"
"kXw ka 1\n"
"xVl le 1\n"
"hCc th 1\n"
"oUk on 1\n"
"zcF ch 1\n"
"sMv st 1\n"
"drZ er 1\n"
"wfO wa 1\n"
"yFv va 1\n"
"hXa th 1\n"
"qMu un 1\n"
"fCv va 1\n"
"fwC wa 1\n"
"oTg ng 1\n"
"Fkm ka 1\n"
"eQt th 1\n"
"Pxd de 1\n"
"kjG ij 1\n"
"tGs th 1\n"
"dqB qu 1\n"
"fmX me 1\n"
"xYi in 1\n"
"kIk ka 1\n"
"vDd de 1\n"
"kvC ka 1\n"
"qtZ th 1\n"
"fPc ch 1\n"
"dpN de 1\n"
"hNr th 1\n"
"Znj an 1\n"
"Hke er 1\n"
"Iqp qu 1\n"
"wfN wa 1\n"
"Vhx th 1\n"
"Dgk ng 1\n"
"mkQ ka 1\n"
"Wxd de 1\n"
"Icx ch 1\n"
"yYt th 1\n"
"tqx th 1\n"
"Zvf va 1\n"
"sxU st 1\n"
"Lqk qu 1\n"
"nfI an 1\n"
"jyq qu 1\n"
"Wvn an 1\n"
"Sdv de 1\n"
"uYc ch 1\n"
"Qgm ng 1\n"
"cXa ch 1\n"
"wBx wa 1\n"
"pYx pr 1\n"
"jWl le 1\n"
"Kfw wa 1\n"
"qjJ qu 1\n"
"Pjj ij 1\n"
"ajX an 1\n"
"sXd st 1\n"
"xHg ng 1\n"
"xhA th 1\n"
"rGm er 1\n"
"Qtm th 1\n"
"srY er 1\n"
"qPx qu 1\n"
"wRz sz 1\n"
"wOg wa 1\n"
"fLg ng 1\n"
"hQt th 1\n"
"jhW th 1\n"
"Cwk ka 1\n"
"zWl le 1\n"
"wJc ch 1\n"
"Pxv va 1\n"
"npI an 1\n"
"lnW an 1\n"
"kqy qu 1\n"
"ywg ng 1\n"
"sCd st 1\n"
"qfF qu 1\n"
"qpg qu 1\n"
"Mbx be 1\n"
"nwN an 1\n"
"wLs st 1\n"
"Wcv ch 1\n"
"Vvr er 1\n"
"Vkx ka 1\n"
"dmU de 1\n"
"fGs st 1\n"
"gJz ng 1\n"
"dFz sz 1\n"
"qCf qu 1\n"
"lvW le 1\n"
"Svb va 1\n"
"xJr er 1\n"
"uZf qu 1\n"
"Tjc ch 1\n"
"pIj ij 1\n"
"bVg ng 1\n"
"vdO de 1\n"
"lTq qu 1\n"
"bMh th 1\n"
"nDm an 1\n"
"Tzb sz 1\n"
"pCw pr 1\n"
"Qkg ng 1\n"
"fpY pr 1\n"
"yQj ij 1\n"
"qiC qu 1\n"
"mQi in 1\n"
"wUq qu 1\n"
"kVj ij 1\n"
"tjQ th 1\n"
"mXj ij 1\n"
"Xfd de 1\n"
"cgI ch 1\n"
"Pkj ij 1\n"
"jjF ij 1\n"
"jrJ er 1\n"
"qwZ qu 1\n"
"Rtz th 1\n"
"fHb be 1\n"
"Hgx ng 1\n"
"Dzf sz 1\n"
"cbE ch 1\n"
"Xfs st 1\n"
"Rjm ij 1\n"
"fmY me 1\n"
"wYj ij 1\n"
"uFp qu 1\n"
"vWm va 1\n"
"yVc ch 1\n"
"cgL ch 1\n"
"zmR sz 1\n"
"zfB sz 1\n"
"znH an 1\n"
"hgG th 1\n"
"xuE qu 1\n"
"Bsl le 1\n"
"oWx on 1\n"
"Pjl le 1\n"
"Jdf de 1\n"
"Xmp me 1\n"
"sgO ng 1\n"
"hCj th 1\n"
"wtR th 1\n"
"fDs st 1\n"
"bQb be 1\n"
"quM un 1\n"
"fLl le 1\n"
"Nhp th 1\n"
"znU an 1\n"
"sdS st 1\n"
"wWu qu 1\n"
"tFq th 1\n"
"cFq ch 1\n"
"Wwl le 1\n"
"Lqy qu 1\n"
"nqQ an 1\n"
"zmD sz 1\n"
"Gyx ny 1\n"
"bkR ka 1\n"
"lQw le 1\n"
"Pqm qu 1\n"
"Fwk ka 1\n"
"tHt th 1\n"
"jyL ij 1\n"
"qxA qu 1\n"
"mrC er 1\n"
"qzL qu 1\n"
"jJg ng 1\n"
"jfS ij 1\n"
"qMh th 1\n"
"mlV le 1\n"
"bkJ ka 1\n"
"knH an 1\n"
"Uqt th 1\n"
"cuF ch 1\n"
"iYq qu 1\n"
"fUe er 1\n"
"sBb st 1\n"
"Nhx th 1\n"
"rhP th 1\n"
"dWp de 1\n"
"Yvf va 1\n"
"Rxr er 1\n"
"kzG sz 1\n"
"xuZ qu 1\n"
"xvD va 1\n"
"fwq qu 1\n"
"hjJ th 1\n"
"kZr er 1\n"
"vJn an 1\n"
"xnO an 1\n"
"vcA ch 1\n"
"mfK me 1\n"
"vjS ij 1\n"
"Nvp va 1\n"
"dfB de 1\n"
"Qsb st 1\n"
"dXp pr 1\n"
"zRl le 1\n"
"Ejq qu 1\n"
"aGz an 1\n"
"nHg an 1\n"
"bvA va 1\n"
"Bfd de 1\n"
"zVg ng 1\n"
"zsY st 1\n"
"hVz th 1\n"
"Pjm ij 1\n"
"sXi in 1\n"
"iKj in 1\n"
"qaE an 1\n"
"Cfj ij 1\n"
"zMc ch 1\n"
"mgZ ng 1\n"
"vgA ng 1\n"
"iwJ in 1\n"
"vGx va 1\n"
"tfY th 1\n"
"ljH le 1\n"
"zGj sz 1\n"
"bmK me 1\n"
"nUq an 1\n"
"zRt th 1\n"
"tGj th 1\n"
"zVd sz 1\n"
"jSr er 1\n"
"fNq qu 1\n"
"xTg ng 1\n"
"nqE an 1\n"
"Wng an 1\n"
"zVv sz 1\n"
"gVs ng 1\n"
"fNd de 1\n"
"qNw qu 1\n"
"Znc ch 1\n"
"uJs qu 1\n"
"yvJ va 1\n"
"xlM le 1\n"
"Jzc ch 1\n"
"vRh th 1\n"
"fcK ch 1\n"
"wVn an 1\n"
"rWw er 1\n"
"cHk ch 1\n"
"vOx va 1\n"
"iUa an 1\n"
"nWn an 1\n"
"zqZ qu 1\n"
"xFj ij 1\n"
"nCg an 1\n"
"fYj ij 1\n"
"Vsx st 1\n"
"mtM th 1\n"
"mhG th 1\n"
"jtN th 1\n"
"hcC th 1\n"
"Nwk ka 1\n"
"dXu qu 1\n"
"mJq qu 1\n"
"xsO st 1\n"
"qRn an 1\n"
"Rnj an 1\n"
"kmP ka 1\n"
"Xtg th 1\n"
"Gvh th 1\n"
"jqv qu 1\n"
"cVl ch 1\n"
"cdI ch 1\n"
"zdE sz 1\n"
"hZk th 1\n"
"Bdx de 1\n"
"hHn th 1\n"
"hkG th 1\n"
"vxJ va 1\n"
"lrA er 1\n"
"lrT er 1\n"
"hjV th 1\n"
"qbI qu 1\n"
"mTg ng 1\n"
"fmV me 1\n"
"rDk er 1\n"
"dNd de 1\n"
"Gzj sz 1\n"
"aVj an 1\n"
"vNr er 1\n"
"kXa an 1\n"
"rGs er 1\n"
"xaX an 1\n"
"crG ch 1\n"
"qJa an 1\n"
"jDt th 1\n"
"Mfx fo 1\n"
"xEa an 1\n"
"Qvz sz 1\n"
"wRg ng 1\n"
"pFc ch 1\n"
"Cpv va 1\n"
"rJk er 1\n"
"fbQ be 1\n"
"Xzg ng 1\n"
"qFy qu 1\n"
"Zfj ij 1\n"
"twE th 1\n"
"Oaq an 1\n"
"ysY st 1\n"
"wdZ de 1\n"
"gmO ng 1\n"
"wGn an 1\n"
"wRk ka 1\n"
"gqS qu 1\n"
"Agq qu 1\n"
"Twv va 1\n"
"Qnv an 1\n"
"bVv va 1\n"
"cDw ch 1\n"
"tGq th 1\n"
"fbq qu 1\n"
"Tvw va 1\n"
"mNv va 1\n"
"dtE th 1\n"
"pzP sz 1\n"
"Vsw sz 1\n"
"qGq qu 1\n"
"qPc ch 1\n"
"qyC qu 1\n"
"nxF an 1\n"
"jDl le 1\n"
"jHt th 1\n"
"fxZ fo 1\n"
"sQc ch 1\n"
"nmH an 1\n"
"xrD er 1\n"
"hMh th 1\n"
"vHk ka 1\n"
"hmS th 1\n"
"Xdt th 1\n"
"Xwl le 1\n"
"uJr qu 1\n"
"sPk st 1\n"
"Xjp ij 1\n"
"Uqi qu 1\n"
"kgD ng 1\n"
"jgI ng 1\n"
"uFw qu 1\n"
"xNd de 1\n"
"dhI th 1\n"
"Lxo on 1\n"
"Sfq qu 1\n"
"zRp sz 1\n"
"xwK wa 1\n"
"fmB me 1\n"
"vrV er 1\n"
"qSf qu 1\n"
"jPn an 1\n"
"Hbp pr 1\n"
"bJt th 1\n"
"lqQ qu 1\n"
"xSd de 1\n"
"dMk de 1\n"
"vVz sz 1\n"
"vkK ka 1\n"
"Xds de 1\n"
"ybB be 1\n"
"gpE ng 1\n"
"qcC ch 1\n"
"pxL pr 1\n"
"gPm ng 1\n"
"Bpd de 1\n"
"dpB de 1\n"
"jlJ le 1\n"
"pkC ka 1\n"
"ypP pr 1\n"
"Nqm qu 1\n"
"tgZ th 1\n"
"Eqo qu 1\n"
"dRk de 1\n"
"Ubc ch 1\n"
"xhY th 1\n"
"lJd le 1\n"
"pvN va 1\n"
"Qfc ch 1\n"
"Dbw wa 1\n"
"sFc ch 1\n"
"wkX ka 1\n"
"xpR pr 1\n"
"pjJ ij 1\n"
"gkQ ng 1\n"
"rMf er 1\n"
"Jsn an 1\n"
"xOw wa 1\n"
"Dqu un 1\n"
"nbJ an 1\n"
"gvF ng 1\n"
"Fnp an 1\n"
"jpV ij 1\n"
"qtD th 1\n"
"uEj qu 1\n"
"yhY th 1\n"
"Ohq th 1\n"
"nXy an 1\n"
"pdU de 1\n"
"mDz sz 1\n"
"iVk in 1\n"
"Hqq qu 1\n"
"xpZ po 1\n"
"aeU an 1\n"
"sjZ st 1\n"
"sGp st 1\n"
"Wqn an 1\n"
"xqS qu 1\n"
"Jjc ch 1\n"
"qPp qu 1\n"
"sXz st 1\n"
"xvP va 1\n"
"Wbq qu 1\n"
"tjK th 1\n"
"lhH th 1\n"
"hqV th 1\n"
"dYf de 1\n"
"pFk ka 1\n"
"sFq qu 1\n"
"uHq qu 1\n"
"vhA th 1\n"
"jlE le 1\n"
"sqB qu 1\n"
"qnr an 1\n"
"Fxq qu 1\n"
"zHn an 1\n"
"pdB de 1\n"
"wHc ch 1\n"
"Pxj ij 1\n"
"gHx ng 1\n"
"nqJ an 1\n"
"oqX qu 1\n"
"Xby be 1\n"
"tbI th 1\n"
"kSf ka 1\n"
"vhD th 1\n"
"qHj qu 1\n"
"Npx pr 1\n"
"Qzp sz 1\n"
"xiU in 1\n"
"rjZ er 1\n"
"wjU ij 1\n"
"jtB th 1\n"
"Ygq qu 1\n"
"aQf an 1\n"
"xWu qu 1\n"
"aVf an 1\n"
"pQx pr 1\n"
"Lnw an 1\n"
"qWa an 1\n"
"uHp qu 1\n"
"Lvp va 1\n"
"Jxp pr 1\n"
"zHk sz 1\n"
"wvU va 1\n"
"Wqh th 1\n"
"hVs th 1\n"
"Xgy ng 1\n"
"dZj de 1\n"
"uCq qu 1\n"
"Gxl le 1\n"
"Hlg ng 1\n"
"Wqd qu 1\n"
"Dxz sz 1\n"
"hdN th 1\n"
"pvM va 1\n"
"Wxk ka 1\n"
"qWd qu 1\n"
"fiO in 1\n"
"fDw wa 1\n"
"bHj ij 1\n"
"iVh th 1\n"
"Pmg ng 1\n"
"fXc ch 1\n"
"xfL fo 1\n"
"yGc ch 1\n"
"yBn an 1\n"
"hCk th 1\n"
"Llk le 1\n"
"yMh th 1\n"
"qrY qu 1\n"
"gdX ng 1\n"
"qxG qu 1\n"
"Zmt th 1\n"
"Rzw sz 1\n"
"nBd an 1\n"
"mWl le 1\n"
"xuI qu 1\n"
"jyF ij 1\n"
"bVu qu 1\n"
"ygP ng 1\n"
"dFq qu 1\n"
"jFm ij 1\n"
"Rml le 1\n"
"klH le 1\n"
"Vff fo 1\n"
"Kzk sz 1\n"
"Lhv th 1\n"
"cSj ch 1\n"
"Qrh th 1\n"
"uBw qu 1\n"
"sCk ka 1\n"
"qyS qu 1\n"
"cXu ch 1\n"
"wfM wa 1\n"
"kdK de 1\n"
"cXj ch 1\n"
"ctZ th 1\n"
"fjI ij 1\n"
"cgS ch 1\n"
"mwL me 1\n"
"kzU sz 1\n"
"cZr ch 1\n"
"fqU qu 1\n"
"qJi qu 1\n"
"gDd ng 1\n"
"bKq qu 1\n"
"aUw an 1\n"
"sxE st 1\n"
"mxU me 1\n"
"cwY ch 1\n"
"fpC pr 1\n"
"sRw st 1\n"
"Kkq qu 1\n"
"wxA wa 1\n"
"gQf ng 1\n"
"pPb pr 1\n"
"Hwu ku 1\n"
"suX qu 1\n"
"lqY qu 1\n"
"sxW st 1\n"
"aFh th 1\n"
"lWq qu 1\n"
"pbZ pr 1\n"
"bqm qu 1\n"
"kJk ka 1\n"
"qtT th 1\n"
"zMd sz 1\n"
"hGs th 1\n"
"xlH le 1\n"
"dmq qu 1\n"
"Xrk er 1\n"
"Ocf ch 1\n"
"mKc ch 1\n"
"zrA er 1\n"
"gxE ng 1\n"
"qWu un 1\n"
"xQf fo 1\n"
"Xoz on 1\n"
"fmP me 1\n"
"kdD de 1\n"
"bBz sz 1\n"
"wpA pr 1\n"
"nMb an 1\n"
"tHq th 1\n"
"jMt th 1\n"
"Svq qu 1\n"
"jMl le 1\n"
"wBc ch 1\n"
"ymX me 1\n"
"hcB th 1\n"
"brU er 1\n"
"paX an 1\n"
"hdG th 1\n"
"Fwp pr 1\n"
"sbY st 1\n"
"mhB th 1\n"
"pfZ pr 1\n"
"Vmh th 1\n"
"sCq qu 1\n"
"Zfw wa 1\n"
"Ljm ij 1\n"
"pqG qu 1\n"
"dpK de 1\n"
"tfG th 1\n"
"ijR in 1\n"
"iJy in 1\n"
"qfN qu 1\n"
"crS ch 1\n"
"cgT ch 1\n"
"wOt th 1\n"
"fnE an 1\n"
"hWp th 1\n"
"Zpw pr 1\n"
"wdO de 1\n"
"vYy va 1\n"
"qrI qu 1\n"
"dmF de 1\n"
"jhJ th 1\n"
"wHr er 1\n"
"Jzb sz 1\n"
"fEy ny 1\n"
"hhZ th 1\n"
"wpQ pr 1\n"
"qYg qu 1\n"
"qtY th 1\n"
"Kdx de 1\n"
"qfj qu 1\n"
"Rbv va 1\n"
"bbO be 1\n"
"Xcn ch 1\n"
"kCd de 1\n"
"Gcx ch 1\n"
"zmC sz 1\n"
"wJl le 1\n"
"qDc ch 1\n"
"Jzr er 1\n"
"Yrw er 1\n"
"Ksx st 1\n"
"uKx qu 1\n"
"jSc ch 1\n"
"Ljz sz 1\n"
"xdB de 1\n"
"zWb sz 1\n"
"vwY va 1\n"
"vMd de 1\n"
"dbH de 1\n"
"Qsu qu 1\n"
"wHq qu 1\n"
"gJh th 1\n"
"wZp pr 1\n"
"btO th 1\n"
"Xmv va 1\n"
"qpd qu 1\n"
"Jnw an 1\n"
"vlD le 1\n"
"xcX ch 1\n"
"Yvv va 1\n"
"Zft th 1\n"
"Hqz qu 1\n"
"xqM qu 1\n"
"Hth ch 1\n"
"ztL th 1\n"
"iOj in 1\n"
"cIz ch 1\n"
"hhC th 1\n"
"tvX th 1\n"
"Fgk ng 1\n"
"mjC ij 1\n"
"Ojp ij 1\n"
"kvI ka 1\n"
"zqb qu 1\n"
"qqW qu 1\n"
"iHg ng 1\n"
"jxJ ij 1\n"
"Gbz sz 1\n"
"nQc ch 1\n"
"pXq qu 1\n"
"jDd de 1\n"
"qQr qu 1\n"
"vJx va 1\n"
"zbY sz 1\n"
"fRm me 1\n"
"qEl qu 1\n"
"oaZ an 1\n"
"vjF ij 1\n"
"lqX qu 1\n"
"pSd de 1\n"
"bXq qu 1\n"
"jJv ij 1\n"
"Wrv er 1\n"
"Kpw pr 1\n"
"xaY an 1\n"
"jCv ij 1\n"
"fbR be 1\n"
"pTp pr 1\n"
"wdI de 1\n"
"qfQ qu 1\n"
"Rrq qu 1\n"
"dbF de 1\n"
"bzF sz 1\n"
"qwO qu 1\n"
"vrY er 1\n"
"twI th 1\n"
"zLf sz 1\n"
"bVc ch 1\n"
"Xnl an 1\n"
"Wgb ng 1\n"
"fuS qu 1\n"
"vIf va 1\n"
"Twt th 1\n"
"nKd an 1\n"
"Dkh th 1\n"
"uBd qu 1\n"
"kOz ka 1\n"
"zOj sz 1\n"
"nzE an 1\n"
"Zbh th 1\n"
"qMg qu 1\n"
"gfC ng 1\n"
"vgD ng 1\n"
"ytC th 1\n"
"mqM qu 1\n"
"Kjn an 1\n"
"xbX be 1\n"
"zfH sz 1\n"
"mwH me 1\n"
"zQb sz 1\n"
"Gzk sz 1\n"
"qsW qu 1\n"
"kNs st 1\n"
"Lqz qu 1\n"
"nmW an 1\n"
"qNx qu 1\n"
"zcQ ch 1\n"
"qMz qu 1\n"
"wGz sz 1\n"
"uCd qu 1\n"
"Bpv pr 1\n"
"qNe qu 1\n"
"bpP pr 1\n"
"lXf le 1\n"
"cLq ch 1\n"
"pdX de 1\n"
"qzU qu 1\n"
"Kxd de 1\n"
"jvF ij 1\n"
"rFn an 1\n"
"Etq th 1\n"
"zYh th 1\n"
"Ksv st 1\n"
"fJk ka 1\n"
"fkC ka 1\n"
"mxK me 1\n"
"fbz sz 1\n"
"vrW er 1\n"
"mPq qu 1\n"
"yBt th 1\n"
"iCf in 1\n"
"srH er 1\n"
"hjB th 1\n"
"fcG ch 1\n"
"Ftg th 1\n"
"uBp qu 1\n"
"yqT qu 1\n"
"djF de 1\n"
"tgU th 1\n"
"Wrj er 1\n"
"xFc ch 1\n"
"ycC ch 1\n"
"eqA qu 1\n"
"pbG pr 1\n"
"Cwh th 1\n"
"fDk ka 1\n"
"wTz sz 1\n"
"xrW er 1\n"
"kQs st 1\n"
"wMl le 1\n"
"yCn nd 1\n"
"eGp er 1\n"
"uPv qu 1\n"
"Wqe qu 1\n"
"yiI in 1\n"
"rqF qu 1\n"
"Kjs st 1\n"
"lwK le 1\n"
"fjQ ij 1\n"
"uIq qu 1\n"
"dxR de 1\n"
"Gqj qu 1\n"
"nLb an 1\n"
"gRd ng 1\n"
"qyv qu 1\n"
"wtZ th 1\n"
"cRk ch 1\n"
"iKf in 1\n"
"hbK th 1\n"
"rqT qu 1\n"
"xmF me 1\n"
"vHt th 1\n"
"tqN th 1\n"
"vLv va 1\n"
"xvJ va 1\n"
"bgJ ng 1\n"
"Qjq qu 1\n"
"Lvb va 1\n"
"Hxg ng 1\n"
"tVq th 1\n"
"rhZ th 1\n"
"slL le 1\n"
"kdH de 1\n"
"Kfb be 1\n"
"Dfh th 1\n"
"Cqq qu 1\n"
"nQk an 1\n"
"Wnz an 1\n"
"Njj ij 1\n"
"bJf be 1\n"
"wRh th 1\n"
"Dpb pr 1\n"
"sPj st 1\n"
"Zpn an 1\n"
"mPj ij 1\n"
"Qcl ch 1\n"
"zCd sz 1\n"
"yrC er 1\n"
"hCb th 1\n"
"aBv an 1\n"
"yuG qu 1\n"
"fcN ch 1\n"
"bZp pr 1\n"
"Gtf th 1\n"
"wbW wa 1\n"
"vPq qu 1\n"
"Vtj th 1\n"
"kWq qu 1\n"
"Jbm me 1\n"
"Wmb me 1\n"
"pxY pr 1\n"
"hQx th 1\n"
"tNn th 1\n"
"qdx qu 1\n"
"cYv ch 1\n"
"zlX le 1\n"
"rwF er 1\n"
"cZm ch 1\n"
"ybJ be 1\n"
"qaB an 1\n"
"tVj th 1\n"
"zUg ng 1\n"
"cfC ch 1\n"
"hxB th 1\n"
"Tbz sz 1\n"
"oFn an 1\n"
"bTp pr 1\n"
"hBk th 1\n"
"hQe th 1\n"
"qBe de 1\n"
"dpC de 1\n"
"kpW ka 1\n"
"Zkj ij 1\n"
"Nwn an 1\n"
"grC ng 1\n"
"uXq qu 1\n"
"Uoy on 1\n"
"Zfu qu 1\n"
"xKb be 1\n"
"hSb th 1\n"
"bPc ch 1\n"
"qcg ch 1\n"
"xIu qu 1\n"
"gBv ng 1\n"
"gZm me 1\n"
"qPu un 1\n"
"Bfp pr 1\n"
"rxC er 1\n"
"sLk st 1\n"
"hGj th 1\n"
"qvR qu 1\n"
"qpR qu 1\n"
"vNn an 1\n"
"Dft th 1\n"
"nRq an 1\n"
"khR th 1\n"
"pqP qu 1\n"
"tNp th 1\n"
"Vwt th 1\n"
"xwA wa 1\n"
"wMn an 1\n"
"Snq an 1\n"
"dfD de 1\n"
"vGw va 1\n"
"Xqb qu 1\n"
"Kww wa 1\n"
"Qhx th 1\n"
"Oyx ny 1\n"
"dvB de 1\n"
"sVh th 1\n"
"Hcn ch 1\n"
"sbU st 1\n"
"fFw wa 1\n"
"kfT ka 1\n"
"rvW er 1\n"
"Yxw wa 1\n"
"nFk an 1\n"
"Lqd qu 1\n"
"hoQ th 1\n"
"Nfj ij 1\n"
"grH ng 1\n"
"cJk ch 1\n"
"Pnv an 1\n"
"Nqx qu 1\n"
"yfE ny 1\n"
"kmI ka 1\n"
"Gmz sz 1\n"
"bxS be 1\n"
"quU un 1\n"
"qYf qu 1\n"
"zKw sz 1\n"
"whK th 1\n"
"ofY on 1\n"
"prH er 1\n"
"jXz sz 1\n"
"vQm va 1\n"
"iWx in 1\n"
"bzC sz 1\n"
"nYx an 1\n"
"qaK an 1\n"
"Ggb ng 1\n"
"zSf sz 1\n"
"rQz er 1\n"
"hkW th 1\n"
"Vnl an 1\n"
"Gtd th 1\n"
"rMw er 1\n"
"wvX va 1\n"
"jyU ij 1\n"
"Qqp qu 1\n"
"Hnq an 1\n"
"bFb be 1\n"
"qkH qu 1\n"
"Wck ch 1\n"
"fMw wa 1\n"
"zgE ng 1\n"
"oJz on 1\n"
"xvH va 1\n"
"hQy th 1\n"
"cYf ch 1\n"
"cxD ch 1\n"
"yDs st 1\n"
"qBh th 1\n"
"cJx ch 1\n"
"dPj de 1\n"
"wWd de 1\n"
"rHn an 1\n"
"iyM in 1\n"
"yxD ny 1\n"
"kPc ch 1\n"
"cXv ch 1\n"
"Nmg ng 1\n"
"vkN ka 1\n"
"lFj le 1\n"
"ymU me 1\n"
"pZv va 1\n"
"gZt th 1\n"
"Jqy qu 1\n"
"qAz qu 1\n"
"Bcy ch 1\n"
"pqj qu 1\n"
"cqE ch 1\n"
"Rwv va 1\n"
"crM ch 1\n"
"Axz sz 1\n"
"Zjp ij 1\n"
"yxF ny 1\n"
"vZh th 1\n"
"sPb st 1\n"
"vCs st 1\n"
"fQq qu 1\n"
"qYq qu 1\n"
"hBp th 1\n"
"Jbk ka 1\n"
"gqK qu 1\n"
"krq qu 1\n"
"Cfz sz 1\n"
"mbJ me 1\n"
"fRq qu 1\n"
"Iwv va 1\n"
"uFn an 1\n"
"cYz ch 1\n"
"qDb qu 1\n"
"xHd de 1\n"
"qmI qu 1\n"
"ycE ch 1\n"
"Mhf th 1\n"
"iuE qu 1\n"
"gXf ng 1\n"
"lPy le 1\n"
"bPv va 1\n"
"jXh th 1\n"
"gOx ng 1\n"
"Nmv va 1\n"
"xDg ng 1\n"
"Cwd de 1\n"
"ljP le 1\n"
"wqV qu 1\n"
"nrE an 1\n"
"Kmw me 1\n"
"gJt th 1\n"
"tgB th 1\n"
"xzR sz 1\n"
"vJr er 1\n"
"aUi an 1\n"
"ynY an 1\n"
"bZv va 1\n"
"fFq qu 1\n"
"Sxg ng 1\n"
"qAc ch 1\n"
"iZv in 1\n"
"jXu qu 1\n"
"gpR ng 1\n"
"wVl le 1\n"
"dNj de 1\n"
"fBw wa 1\n"
"Mjy ij 1\n"
"kjZ ij 1\n"
"tLs th 1\n"
"iYj in 1\n"
"wbO wa 1\n"
"qXb qu 1\n"
"uJq qu 1\n"
"qKt th 1\n"
"vjO ij 1\n"
"wuD qu 1\n"
"blQ le 1\n"
"yfB ny 1\n"
"Qsk st 1\n"
"Uwm me 1\n"
"Zqg qu 1\n"
"nmY an 1\n"
"pXw pr 1\n"
"yVj ij 1\n"
"gIw ng 1\n"
"Hxk ka 1\n"
"Pgy ng 1\n"
"lQv le 1\n"
"bnK an 1\n"
"xtZ th 1\n"
"Qce ch 1\n"
"Njq qu 1\n"
"mvq qu 1\n"
"Mwz sz 1\n"
"Gtn th 1\n"
"fJh th 1\n"
"vJz sz 1\n"
"gDk ng 1\n"
"dLw de 1\n"
"oeU er 1\n"
"cvY ch 1\n"
"Gbb be 1\n"
"Tqd qu 1\n"
"aTp an 1\n"
"Ywg ng 1\n"
"jdT de 1\n"
"Wkm ka 1\n"
"pxA pr 1\n"
"vDl le 1\n"
"sfD st 1\n"
"rqV qu 1\n"
"cHb ch 1\n"
"iVc ch 1\n"
"Mfh th 1\n"
"sVm st 1\n"
"nzR an 1\n"
"Qvs st 1\n"
"kZg ng 1\n"
"Wnw an 1\n"
"qZb qu 1\n"
"Gvq qu 1\n"
"vPk ka 1\n"
"Sxq qu 1\n"
"vNg ng 1\n"
"qrH qu 1\n"
"fLc ch 1\n"
"wVs st 1\n"
"qEh th 1\n"
"uqC qu 1\n"
"tZx th 1\n"
"yhI th 1\n"
"wNh th 1\n"
"rFj er 1\n"
"xPq qu 1\n"
"pqW qu 1\n"
"Pjc ch 1\n"
"jYj ij 1\n"
"pFv va 1\n"
"vLr er 1\n"
"lqq qu 1\n"
"xJg ng 1\n"
"lVz le 1\n"
"cZc ch 1\n"
"hcF th 1\n"
"uhJ th 1\n"
"cLj ch 1\n"
"qyW qu 1\n"
"zhT th 1\n"
"mtK th 1\n"
"pRb pr 1\n"
"bCx be 1\n"
"nJf an 1\n"
"jwF ij 1\n"
"Pdj de 1\n"
"jxE ij 1\n"
"slZ le 1\n"
"Lxn an 1\n"
"znL an 1\n"
"mzV sz 1\n"
"lGq le 1\n"
"Qbw wa 1\n"
"jbY ij 1\n"
"zSm sz 1\n"
"Qqx qu 1\n"
"ypR pr 1\n"
"gCc ch 1\n"
"Yvx va 1\n"
"ihI th 1\n"
"Zfx fo 1\n"
"njI nd 1\n"
"Ypt th 1\n"
"lxT le 1\n"
"fVv va 1\n"
"Jzm sz 1\n"
"jxA ij 1\n"
"gDl ng 1\n"
"Eaq an 1\n"
"Qcn an 1\n"
"zGb sz 1\n"
"jLh th 1\n"
"qkX qu 1\n"
"wbK wa 1\n"
"nNx an 1\n"
"sqW qu 1\n"
"wRx wa 1\n"
"xrU er 1\n"
"fnQ an 1\n"
"kzB sz 1\n"
"Rcn ch 1\n"
"qbL qu 1\n"
"srD er 1\n"
"Vxu qu 1\n"
"qvF qu 1\n"
"wJr er 1\n"
"Yxg ng 1\n"
"qiY qu 1\n"
"fMc ch 1\n"
"hbY th 1\n"
"hgH th 1\n"
"dmS de 1\n"
"jTn an 1\n"
"Zjm ij 1\n"
"Njl le 1\n"
"dqV qu 1\n"
"Yjh th 1\n"
"rKw er 1\n"
"cxU ch 1\n"
"Ckj ij 1\n"
"zfJ sz 1\n"
"ytF th 1\n"
"xrP er 1\n"
"qEj qu 1\n"
"rxO er 1\n"
"rZn an 1\n"
"bZq qu 1\n"
"cXq ch 1\n"
"wvD va 1\n"
"hcX th 1\n"
"zkO sz 1\n"
"hNx th 1\n"
"wFg ng 1\n"
"kXu qu 1\n"
"Vkn an 1\n"
"Gjz sz 1\n"
"Qcd ch 1\n"
"yvF va 1\n"
"xFx xe 1\n"
"dSj de 1\n"
"xPb be 1\n"
"oFp on 1\n"
"qAk qu 1\n"
"rqU qu 1\n"
"pGv va 1\n"
"hzC th 1\n"
"qIk qu 1\n"
"Lhl th 1\n"
"Fwb wa 1\n"
"pgE ng 1\n"
"Awz sz 1\n"
"fBk ka 1\n"
"xKd de 1\n"
"Pfw wa 1\n"
"uqK qu 1\n"
"pJc ch 1\n"
"bTc ch 1\n"
"tWg th 1\n"
"gdN ng 1\n"
"jrN er 1\n"
"klS le 1\n"
"qEi qu 1\n"
"sFn an 1\n"
"tqR th 1\n"
"Fnm an 1\n"
"hXv th 1\n"
"fxN fo 1\n"
"bvL va 1\n"
"oGf on 1\n"
"hZm th 1\n"
"yfH ny 1\n"
"dcE ch 1\n"
"pgW ng 1\n"
"wrB er 1\n"
"kWm ka 1\n"
"Shx th 1\n"
"twP th 1\n"
"Qvd de 1\n"
"Qgu qu 1\n"
"pJt th 1\n"
"zNv sz 1\n"
"Hph th 1\n"
"klF le 1\n"
"vqz qu 1\n"
"sgG ng 1\n"
"kdZ de 1\n"
"ejX er 1\n"
"Pxu qu 1\n"
"pvT va 1\n"
"Kqx qu 1\n"
"Qmb me 1\n"
"xFk ka 1\n"
"wQb wa 1\n"
"Pgx ng 1\n"
"ypL pr 1\n"
"bwE wa 1\n"
"xHt th 1\n"
"kVz sz 1\n"
"jmF ij 1\n"
"Ixq qu 1\n"
"qyP qu 1\n"
"rVv er 1\n"
"Ytw th 1\n"
"qpZ qu 1\n"
"tpZ th 1\n"
"zjX sz 1\n"
"Khg th 1\n"
"qfV qu 1\n"
"Jzx sz 1\n"
"kTj ij 1\n"
"Bzq qu 1\n"
"njR an 1\n"
"cgW ch 1\n"
"cmI ch 1\n"
"kCb ka 1\n"
"pYp pr 1\n"
"vkZ ka 1\n"
"wvk ka 1\n"
"Vfq qu 1\n"
"nlZ an 1\n"
"qNj qu 1\n"
"rCq qu 1\n"
"kbV ka 1\n"
"Dqj qu 1\n"
"brD er 1\n"
"lbG le 1\n"
"xhF th 1\n"
"kxZ ka 1\n"
"Iuq qu 1\n"
"yFx ny 1\n"
"qVl qu 1\n"
"lcG ch 1\n"
"vWr er 1\n"
"aBq an 1\n"
"yJk ka 1\n"
"czL ch 1\n"
"jIu qu 1\n"
"vUl le 1\n"
"pZq qu 1\n"
"vtW th 1\n"
"Qxw wa 1\n"
"dYv de 1\n"
"iqH qu 1\n"
"Xws st 1\n"
"fDj ij 1\n"
"xVz sz 1\n"
"dKq qu 1\n"
"vfQ va 1\n"
"hvD th 1\n"
"wdY de 1\n"
"Hzz sz 1\n"
"cYs ch 1\n"
"Ftj th 1\n"
"dpU de 1\n"
"Lld le 1\n"
"Gqw qu 1\n"
"kdR de 1\n"
"vXg ng 1\n"
"qsY qu 1\n"
"jNf ij 1\n"
"Qjj ij 1\n"
"pVl le 1\n"
"Jmx me 1\n"
"pDj ij 1\n"
"iBc ch 1\n"
"kLj ij 1\n"
"xnG an 1\n"
"vTl le 1\n"
"Ndg ng 1\n"
"pqU qu 1\n"
"Uaw an 1\n"
"fzN sz 1\n"
"gNq qu 1\n"
"kjM ij 1\n"
"lnK an 1\n"
"zxb sz 1\n"
"kcS ch 1\n"
"njM an 1\n"
"Gdw de 1\n"
"lnZ an 1\n"
"Ygj ng 1\n"
"hKd th 1\n"
"gpT ng 1\n"
"yqP qu 1\n"
"ijX in 1\n"
"jGf ij 1\n"
"bxI be 1\n"
"vXx va 1\n"
"Vrw er 1\n"
"Cwx wa 1\n"
"nBh th 1\n"
"qvy qu 1\n"
"sxB st 1\n"
"mVk ka 1\n"
"Czx sz 1\n"
"fyV ny 1\n"
"cXw ch 1\n"
"Qnf an 1\n"
"Yqd qu 1\n"
"lqH qu 1\n"
"dbY de 1\n"
"Sqb qu 1\n"
"Kqw qu 1\n"
"zpJ sz 1\n"
"cbM ch 1\n"
"zFg ng 1\n"
"sKb st 1\n"
"qrK qu 1\n"
"zJc ch 1\n"
"nRn an 1\n"
"fqN qu 1\n"
"hfA th 1\n"
"qoG qu 1\n"
"Owz sz 1\n"
"nlG an 1\n"
"wIx wa 1\n"
"qrP qu 1\n"
"Nwg ng 1\n"
"qaW an 1\n"
"hcT th 1\n"
"wkB ka 1\n"
"Ndt th 1\n"
"Kzq qu 1\n"
"gxB ng 1\n"
"Bjz sz 1\n"
"vTf va 1\n"
"jFq qu 1\n"
"qMe qu 1\n"
"ufQ qu 1\n"
"npG an 1\n"
"uZk qu 1\n"
"qTw qu 1\n"
"Glw le 1\n"
"Kqq qu 1\n"
"Cxr er 1\n"
"jZs st 1\n"
"Sqv qu 1\n"
"yPm me 1\n"
"eQj er 1\n"
"aIh th 1\n"
"gDq qu 1\n"
"lIp le 1\n"
"jNj ij 1\n"
"qOd qu 1\n"
"vkM ka 1\n"
"vFy va 1\n"
"cfV ch 1\n"
"Kjh th 1\n"
"gkP ng 1\n"
"rJc ch 1\n"
"uPq qu 1\n"
"ozQ on 1\n"
"Dlk le 1\n"
"vXh th 1\n"
"ktY th 1\n"
"vWy va 1\n"
"gQv ng 1\n"
"Yww wa 1\n"
"Tpz sz 1\n"
"Qhc th 1\n"
"xuT qu 1\n"
"nbS an 1\n"
"zQg ng 1\n"
"vgZ ng 1\n"
"pUo on 1\n"
"uWb qu 1\n"
"mMf me 1\n"
"Zcd ch 1\n"
"iBp in 1\n"
"fwp pr 1\n"
"zYf sz 1\n"
"wCp pr 1\n"
"Cqy qu 1\n"
"cjF ch 1\n"
"Gfh th 1\n"
"mcW ch 1\n"
"cqV ch 1\n"
"uJd qu 1\n"
"iUj in 1\n"
"vkR ka 1\n"
"wgI ng 1\n"
"vUg ng 1\n"
"Wdn de 1\n"
"sjF st 1\n"
"tPv th 1\n"
"xRn an 1\n"
"klV le 1\n"
"sbM st 1\n"
"mfT me 1\n"
"dbV de 1\n"
"Fmn an 1\n"
"gfU ng 1\n"
"cbB ch 1\n"
"Yxz sz 1\n"
"Kxk ka 1\n"
"Dwq qu 1\n"
"wgX ng 1\n"
"sPv st 1\n"
"vHd de 1\n"
"nbH an 1\n"
"cFn an 1\n"
"qqX qu 1\n"
"jFe er 1\n"
"qEb qu 1\n"
"dFh th 1\n"
"uEo qu 1\n"
"lcI ch 1\n"
"bMm me 1\n"
"zZw sz 1\n"
"hjO th 1\n"
"hKx th 1\n"
"jgC ng 1\n"
"cnL an 1\n"
"Fdg ng 1\n"
"bGf be 1\n"
"Sjz sz 1\n"
"bMj ij 1\n"
"vXw va 1\n"
"Gff fo 1\n"
"Cww wa 1\n"
"jsQ st 1\n"
"Zgv ng 1\n"
"lPf le 1\n"
"nmQ an 1\n"
"Vdq qu 1\n"
"lcX ch 1\n"
"gjT ng 1\n"
"mwE me 1\n"
"qLm qu 1\n"
"cHq ch 1\n"
"Xtn th 1\n"
"Ntq th 1\n"
"gWk ng 1\n"
"Pqd qu 1\n"
"qpP qu 1\n"
"sRf st 1\n"
"qpL qu 1\n"
"cnD an 1\n"
"qpG qu 1\n"
"dzS sz 1\n"
"tZb th 1\n"
"ygM ng 1\n"
"bxC be 1\n"
"dfU de 1\n"
"bmB me 1\n"
"lBz le 1\n"
"gJx ng 1\n"
"Ykv ka 1\n"
"Zdk de 1\n"
"wnQ an 1\n"
"tZj th 1\n"
"Zzm sz 1\n"
"Vfh th 1\n"
"Mwc ch 1\n"
"rUo on 1\n"
"qwp qu 1\n"
"tcI th 1\n"
"tfD th 1\n"
"uoZ qu 1\n"
"fCw wa 1\n"
"iQq qu 1\n"
"qBg qu 1\n"
"sVb st 1\n"
"pjU ij 1\n"
"scQ ch 1\n"
"pqQ qu 1\n"
"svZ st 1\n"
"Zpj ij 1\n"
"piV in 1\n"
"kbP ka 1\n"
"wqM qu 1\n"
"rVb er 1\n"
"qZr qu 1\n"
"hxO th 1\n"
"wTn an 1\n"
"Jzf sz 1\n"
"Qjb ij 1\n"
"uYv qu 1\n"
"pwK pr 1\n"
"hvH th 1\n"
"Dqe qu 1\n"
"pfI pr 1\n"
"mhV th 1\n"
"jgE ng 1\n"
"rcQ ch 1\n"
"kmT ka 1\n"
"Wzj sz 1\n"
"xNs st 1\n"
"Pbj ij 1\n"
"zvB sz 1\n"
"xhJ th 1\n"
"svq qu 1\n"
"Nvn an 1\n"
"swZ st 1\n"
"jgF ng 1\n"
"mfL me 1\n"
"zkL sz 1\n"
"jVp ij 1\n"
"Dkj ij 1\n"
"xuY qu 1\n"
"hHq th 1\n"
"cSf ch 1\n"
"Jzd sz 1\n"
"lqU qu 1\n"
"qMd qu 1\n"
"Qgj ng 1\n"
"fxk ka 1\n"
"tRt th 1\n"
"zFk sz 1\n"
"qEo qu 1\n"
"voY on 1\n"
"Awj ij 1\n"
"Txj ij 1\n"
"cIg ch 1\n"
"xUu qu 1\n"
"sRr er 1\n"
"Jxn an 1\n"
"iPf in 1\n"
"ejY er 1\n"
"Xts th 1\n"
"pfT pr 1\n"
"Pqa an 1\n"
"zsV st 1\n"
"ypC pr 1\n"
"wMs st 1\n"
"qEc ch 1\n"
"vxY va 1\n"
"fUg ng 1\n"
"Dff fo 1\n"
"gqQ qu 1\n"
"zMv sz 1\n"
"vJi in 1\n"
"fPv va 1\n"
"dLz sz 1\n"
"cdM ch 1\n"
"gNx ng 1\n"
"aGv an 1\n"
"vvD va 1\n"
"dJh th 1\n"
"rxY er 1\n"
"rWj er 1\n"
"Pvx va 1\n"
"rhD th 1\n"
"zRd sz 1\n"
"Kgv ng 1\n"
"Xvy va 1\n"
"kZj ij 1\n"
"kpK ka 1\n"
"Pfn an 1\n"
"wUe er 1\n"
"wWx wa 1\n"
"jPw ij 1\n"
"gLq qu 1\n"
"iJq qu 1\n"
"gPx ng 1\n"
"jHd de 1\n"
"vJb va 1\n"
"xhB th 1\n"
"xQv va 1\n"
"Eoa an 1\n"
"pjO ij 1\n"
"yFj ij 1\n"
"sXo on 1\n"
"wbY wa 1\n"
"cjO ch 1\n"
"mlZ le 1\n"
"bNv va 1\n"
"kjP ij 1\n"
"yXn an 1\n"
"qVj qu 1\n"
"fNv va 1\n"
"gjW ng 1\n"
"nXj an 1\n"
"dqJ qu 1\n"
"Hnh th 1\n"
"Qyk ka 1\n"
"kvB ka 1\n"
"qyB qu 1\n"
"mDt th 1\n"
"zgP ng 1\n"
"Zzk sz 1\n"
"fMk ka 1\n"
"xzY sz 1\n"
"qbT qu 1\n"
"xOt th 1\n"
"xsA st 1\n"
"gLj ng 1\n"
"zxH sz 1\n"
"cLm ch 1\n"
"Dnk an 1\n"
"zIu qu 1\n"
"kpJ ka 1\n"
"xrK er 1\n"
"eIb er 1\n"
"Jbp pr 1\n"
"Bqg qu 1\n"
"tXg th 1\n"
"Zjk ij 1\n"
"dRd de 1\n"
"tjZ th 1\n"
"hQl th 1\n"
"iyW in 1\n"
"Jwd de 1\n"
"qZt th 1\n"
"cJp ch 1\n"
"jBg ng 1\n"
"zrG er 1\n"
"hWf th 1\n"
"Zds st 1\n"
"qsZ qu 1\n"
"cQx ch 1\n"
"ccN ch 1\n"
"ywM wa 1\n"
"gbX ng 1\n"
"tfT th 1\n"
"vwt th 1\n"
"Qbp pr 1\n"
"yeY er 1\n"
"aUb an 1\n"
"qHw qu 1\n"
"Fhq th 1\n"
"Fng an 1\n"
"lvI le 1\n"
"jCf ij 1\n"
"hqH th 1\n"
"tTq th 1\n"
"sfI st 1\n"
"vsM st 1\n"
"lDp le 1\n"
"wJb wa 1\n"
"bhX th 1\n"
"rRq qu 1\n"
"qtS th 1\n"
"Zwp pr 1\n"
"Jbh th 1\n"
"hHb th 1\n"
"pDy pr 1\n"
"sjD st 1\n"
"Oyp pr 1\n"
"qwD qu 1\n"
"jbD ij 1\n"
"vpG va 1\n"
"Wjb ij 1\n"
"vpB va 1\n"
"aXq an 1\n"
"mWz sz 1\n"
"qHi qu 1\n"
"fyN ny 1\n"
"mbQ me 1\n"
"ywC wa 1\n"
"oVg ng 1\n"
"xmZ me 1\n"
"slO le 1\n"
"fXn an 1\n"
"kYs st 1\n"
"pVu qu 1\n"
"bkU ka 1\n"
"Brq qu 1\n"
"qCq qu 1\n"
"Xcx ch 1\n"
"zMt th 1\n"
"cRw ch 1\n"
"gzQ ng 1\n"
"Qbg ng 1\n"
"juU qu 1\n"
"xSz sz 1\n"
"Vgz ng 1\n"
"oMw on 1\n"
"fpE pr 1\n"
"xjX ij 1\n"
"qCg qu 1\n"
"zwM sz 1\n"
"uQl qu 1\n"
"qPk qu 1\n"
"pjD ij 1\n"
"Qzm sz 1\n"
"sIp st 1\n"
"uoG qu 1\n"
"rVl er 1\n"
"cbK ch 1\n"
"hXm th 1\n"
"Ksf st 1\n"
"kbF ka 1\n"
"wBm me 1\n"
"iYt th 1\n"
"sgH ng 1\n"
"Gzv sz 1\n"
"yvE va 1\n"
"xKq qu 1\n"
"sWf st 1\n"
"zBc ch 1\n"
"ykH ka 1\n"
"vjH ij 1\n"
"whI th 1\n"
"vPj ij 1\n"
"Zht th 1\n"
"iJx in 1\n"
"cZt th 1\n"
"dqU qu 1\n"
"hMd th 1\n"
"cUj ch 1\n"
"vMg ng 1\n"
"pcJ ch 1\n"
"Bcm ch 1\n"
"jXi in 1\n"
"xoI on 1\n"
"Zkq qu 1\n"
"Xzr er 1\n"
"yzM sz 1\n"
"qjX qu 1\n"
"mNq qu 1\n"
"hpX th 1\n"
"fBq qu 1\n"
"tXd th 1\n"
"Xki in 1\n"
"Hsq qu 1\n"
"bqU qu 1\n"
"sgF ng 1\n"
"dPc ch 1\n"
"Jxi in 1\n"
"Ugp ng 1\n"
"Rxi in 1\n"
"Kwm me 1\n"
"zkD sz 1\n"
"Rql qu 1\n"
"pJb pr 1\n"
"fcV ch 1\n"
"iVd in 1\n"
"bBp be 1\n"
"Ojw ij 1\n"
"vZl le 1\n"
"Iyj ij 1\n"
"fkU ka 1\n"
"Kcq ch 1\n"
"dBq qu 1\n"
"Mqq qu 1\n"
"iMg ng 1\n"
"Wws st 1\n"
"tqX th 1\n"
"xhD th 1\n"
"rNl er 1\n"
"pWd de 1\n"
"jrV er 1\n"
"Bmj ij 1\n"
"Hmq qu 1\n"
"vlH le 1\n"
"Mxb be 1\n"
"yyS ny 1\n"
"qvW qu 1\n"
"fvX va 1\n"
"Vfe er 1\n"
"Cdw de 1\n"
"Kge ng 1\n"
"Qej er 1\n"
"rvZ er 1\n"
"vzI sz 1\n"
"dDn an 1\n"
"nwS an 1\n"
"Qcb ch 1\n"
"wkV ka 1\n"
"uCx qu 1\n"
"Igk ng 1\n"
"Vpm me 1\n"
"hBm th 1\n"
"pdQ de 1\n"
"fgQ ng 1\n"
"yQm me 1\n"
"gxH ng 1\n"
"pqK qu 1\n"
"lRc ch 1\n"
"Xdv de 1\n"
"hDz th 1\n"
"dFw de 1\n"
"qQu un 1\n"
"xbD be 1\n"
"qmE qu 1\n"
"mWm me 1\n"
"jBb ij 1\n"
"jXt th 1\n"
"fxU fo 1\n"
"Xwc ch 1\n"
"Lqf qu 1\n"
"hcP th 1\n"
"pfB pr 1\n"
"vSg ng 1\n"
"xJw wa 1\n"
"mRf me 1\n"
"hqW th 1\n"
"nVb an 1\n"
"cEu ch 1\n"
"nfN an 1\n"
"nVj an 1\n"
"Rwk ka 1\n"
"nmG an 1\n"
"oDt th 1\n"
"kPb ka 1\n"
"gqW qu 1\n"
"Qhf th 1\n"
"qZl qu 1\n"
"zHq qu 1\n"
"iXl in 1\n"
#endif
};
#pragma GCC diagnostic pop
inline const int ksizeofUniversalAmbigsFile = sizeof(kUniversalAmbigsFile);
} // namespace tesseract
#endif // TESSERACT_CCUTIL_UNIVERSALAMBIGS_H_
|
2301_81045437/tesseract
|
src/ccutil/universalambigs.h
|
C++
|
apache-2.0
| 324,389
|
/******************************************************************************
** Filename: adaptive.c
** Purpose: Adaptive matcher.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "adaptive.h"
#include "classify.h"
#include <cassert>
#include <cstdio>
namespace tesseract {
/*----------------------------------------------------------------------------
Public Code
----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* This routine adds a new adapted class to an existing
* set of adapted templates.
*
* @param Templates set of templates to add new class to
* @param Class new class to add to templates
* @param ClassId class id to associate with new class
*
* @note Globals: none
*/
void AddAdaptedClass(ADAPT_TEMPLATES_STRUCT *Templates, ADAPT_CLASS_STRUCT *Class, CLASS_ID ClassId) {
assert(Templates != nullptr);
assert(Class != nullptr);
assert(LegalClassId(ClassId));
assert(UnusedClassIdIn(Templates->Templates, ClassId));
assert(Class->NumPermConfigs == 0);
auto IntClass = new INT_CLASS_STRUCT(1, 1);
AddIntClass(Templates->Templates, ClassId, IntClass);
assert(Templates->Class[ClassId] == nullptr);
Templates->Class[ClassId] = Class;
} /* AddAdaptedClass */
/*---------------------------------------------------------------------------*/
PERM_CONFIG_STRUCT::~PERM_CONFIG_STRUCT() {
delete[] Ambigs;
}
ADAPT_CLASS_STRUCT::ADAPT_CLASS_STRUCT() {
NumPermConfigs = 0;
MaxNumTimesSeen = 0;
TempProtos = NIL_LIST;
PermProtos = NewBitVector(MAX_NUM_PROTOS);
PermConfigs = NewBitVector(MAX_NUM_CONFIGS);
zero_all_bits(PermProtos, WordsInVectorOfSize(MAX_NUM_PROTOS));
zero_all_bits(PermConfigs, WordsInVectorOfSize(MAX_NUM_CONFIGS));
for (int i = 0; i < MAX_NUM_CONFIGS; i++) {
TempConfigFor(this, i) = nullptr;
}
}
ADAPT_CLASS_STRUCT::~ADAPT_CLASS_STRUCT() {
for (int i = 0; i < MAX_NUM_CONFIGS; i++) {
if (ConfigIsPermanent(this, i) && PermConfigFor(this, i) != nullptr) {
delete PermConfigFor(this, i);
} else if (!ConfigIsPermanent(this, i) && TempConfigFor(this, i) != nullptr) {
delete TempConfigFor(this, i);
}
}
FreeBitVector(PermProtos);
FreeBitVector(PermConfigs);
auto list = TempProtos;
while (list != nullptr) {
delete reinterpret_cast<TEMP_PROTO_STRUCT *>(list->node);
list = pop(list);
}
}
/// Constructor for adapted templates.
/// Add an empty class for each char in unicharset to the newly created templates.
ADAPT_TEMPLATES_STRUCT::ADAPT_TEMPLATES_STRUCT(UNICHARSET &unicharset) {
Templates = new INT_TEMPLATES_STRUCT;
NumPermClasses = 0;
NumNonEmptyClasses = 0;
/* Insert an empty class for each unichar id in unicharset */
for (unsigned i = 0; i < MAX_NUM_CLASSES; i++) {
Class[i] = nullptr;
if (i < unicharset.size()) {
AddAdaptedClass(this, new ADAPT_CLASS_STRUCT, i);
}
}
}
ADAPT_TEMPLATES_STRUCT::~ADAPT_TEMPLATES_STRUCT() {
for (unsigned i = 0; i < (Templates)->NumClasses; i++) {
delete Class[i];
}
delete Templates;
}
// Returns FontinfoId of the given config of the given adapted class.
int Classify::GetFontinfoId(ADAPT_CLASS_STRUCT *Class, uint8_t ConfigId) {
return (ConfigIsPermanent(Class, ConfigId) ? PermConfigFor(Class, ConfigId)->FontinfoId
: TempConfigFor(Class, ConfigId)->FontinfoId);
}
/// This constructor allocates and returns a new temporary config.
///
/// @param MaxProtoId max id of any proto in new config
/// @param FontinfoId font information from pre-trained templates
TEMP_CONFIG_STRUCT::TEMP_CONFIG_STRUCT(int maxProtoId, int fontinfoId) {
int NumProtos = maxProtoId + 1;
Protos = NewBitVector(NumProtos);
NumTimesSeen = 1;
MaxProtoId = maxProtoId;
ProtoVectorSize = WordsInVectorOfSize(NumProtos);
zero_all_bits(Protos, ProtoVectorSize);
FontinfoId = fontinfoId;
}
TEMP_CONFIG_STRUCT::~TEMP_CONFIG_STRUCT() {
FreeBitVector(Protos);
}
/*---------------------------------------------------------------------------*/
/**
* This routine prints a summary of the adapted templates
* in Templates to File.
*
* @param File open text file to print Templates to
* @param Templates adapted templates to print to File
*
* @note Globals: none
*/
void Classify::PrintAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templates) {
INT_CLASS_STRUCT *IClass;
ADAPT_CLASS_STRUCT *AClass;
fprintf(File, "\n\nSUMMARY OF ADAPTED TEMPLATES:\n\n");
fprintf(File, "Num classes = %d; Num permanent classes = %d\n\n", Templates->NumNonEmptyClasses,
Templates->NumPermClasses);
fprintf(File, " Id NC NPC NP NPP\n");
fprintf(File, "------------------------\n");
for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) {
IClass = Templates->Templates->Class[i];
AClass = Templates->Class[i];
if (!IsEmptyAdaptedClass(AClass)) {
fprintf(File, "%5u %s %3d %3d %3d %3zd\n", i, unicharset.id_to_unichar(i), IClass->NumConfigs,
AClass->NumPermConfigs, IClass->NumProtos,
IClass->NumProtos - AClass->TempProtos->size());
}
}
fprintf(File, "\n");
} /* PrintAdaptedTemplates */
/*---------------------------------------------------------------------------*/
/**
* Read an adapted class description from file and return
* a ptr to the adapted class.
*
* @param fp open file to read adapted class from
* @return Ptr to new adapted class.
*
* @note Globals: none
*/
ADAPT_CLASS_STRUCT *ReadAdaptedClass(TFile *fp) {
int NumTempProtos;
int NumConfigs;
int i;
ADAPT_CLASS_STRUCT *Class;
/* first read high level adapted class structure */
Class = new ADAPT_CLASS_STRUCT;
fp->FRead(Class, sizeof(ADAPT_CLASS_STRUCT), 1);
/* then read in the definitions of the permanent protos and configs */
Class->PermProtos = NewBitVector(MAX_NUM_PROTOS);
Class->PermConfigs = NewBitVector(MAX_NUM_CONFIGS);
fp->FRead(Class->PermProtos, sizeof(uint32_t), WordsInVectorOfSize(MAX_NUM_PROTOS));
fp->FRead(Class->PermConfigs, sizeof(uint32_t), WordsInVectorOfSize(MAX_NUM_CONFIGS));
/* then read in the list of temporary protos */
fp->FRead(&NumTempProtos, sizeof(int), 1);
Class->TempProtos = NIL_LIST;
for (i = 0; i < NumTempProtos; i++) {
auto TempProto = new TEMP_PROTO_STRUCT;
fp->FRead(TempProto, sizeof(TEMP_PROTO_STRUCT), 1);
Class->TempProtos = push_last(Class->TempProtos, TempProto);
}
/* then read in the adapted configs */
fp->FRead(&NumConfigs, sizeof(int), 1);
for (i = 0; i < NumConfigs; i++) {
if (test_bit(Class->PermConfigs, i)) {
Class->Config[i].Perm = ReadPermConfig(fp);
} else {
Class->Config[i].Temp = ReadTempConfig(fp);
}
}
return (Class);
} /* ReadAdaptedClass */
/*---------------------------------------------------------------------------*/
/**
* Read a set of adapted templates from file and return
* a ptr to the templates.
*
* @param fp open text file to read adapted templates from
* @return Ptr to adapted templates read from file.
*
* @note Globals: none
*/
ADAPT_TEMPLATES_STRUCT *Classify::ReadAdaptedTemplates(TFile *fp) {
auto Templates = new ADAPT_TEMPLATES_STRUCT;
/* first read the high level adaptive template struct */
fp->FRead(Templates, sizeof(ADAPT_TEMPLATES_STRUCT), 1);
/* then read in the basic integer templates */
Templates->Templates = ReadIntTemplates(fp);
/* then read in the adaptive info for each class */
for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) {
Templates->Class[i] = ReadAdaptedClass(fp);
}
return (Templates);
} /* ReadAdaptedTemplates */
/*---------------------------------------------------------------------------*/
/**
* Read a permanent configuration description from file
* and return a ptr to it.
*
* @param fp open file to read permanent config from
* @return Ptr to new permanent configuration description.
*
* @note Globals: none
*/
PERM_CONFIG_STRUCT *ReadPermConfig(TFile *fp) {
auto Config = new PERM_CONFIG_STRUCT;
uint8_t NumAmbigs;
fp->FRead(&NumAmbigs, sizeof(NumAmbigs), 1);
Config->Ambigs = new UNICHAR_ID[NumAmbigs + 1];
fp->FRead(Config->Ambigs, sizeof(UNICHAR_ID), NumAmbigs);
Config->Ambigs[NumAmbigs] = -1;
fp->FRead(&(Config->FontinfoId), sizeof(int), 1);
return (Config);
} /* ReadPermConfig */
/*---------------------------------------------------------------------------*/
/**
* Read a temporary configuration description from file
* and return a ptr to it.
*
* @param fp open file to read temporary config from
* @return Ptr to new temporary configuration description.
*
* @note Globals: none
*/
TEMP_CONFIG_STRUCT *ReadTempConfig(TFile *fp) {
auto Config = new TEMP_CONFIG_STRUCT;
fp->FRead(Config, sizeof(TEMP_CONFIG_STRUCT), 1);
Config->Protos = NewBitVector(Config->ProtoVectorSize * BITSINLONG);
fp->FRead(Config->Protos, sizeof(uint32_t), Config->ProtoVectorSize);
return (Config);
} /* ReadTempConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of Class
* to File.
*
* @param File open file to write Class to
* @param Class adapted class to write to File
* @param NumConfigs number of configs in Class
*
* @note Globals: none
*/
void WriteAdaptedClass(FILE *File, ADAPT_CLASS_STRUCT *Class, int NumConfigs) {
/* first write high level adapted class structure */
fwrite(Class, sizeof(ADAPT_CLASS_STRUCT), 1, File);
/* then write out the definitions of the permanent protos and configs */
fwrite(Class->PermProtos, sizeof(uint32_t), WordsInVectorOfSize(MAX_NUM_PROTOS), File);
fwrite(Class->PermConfigs, sizeof(uint32_t), WordsInVectorOfSize(MAX_NUM_CONFIGS), File);
/* then write out the list of temporary protos */
uint32_t NumTempProtos = Class->TempProtos->size();
fwrite(&NumTempProtos, sizeof(NumTempProtos), 1, File);
auto TempProtos = Class->TempProtos;
iterate(TempProtos) {
void *proto = TempProtos->node;
fwrite(proto, sizeof(TEMP_PROTO_STRUCT), 1, File);
}
/* then write out the adapted configs */
fwrite(&NumConfigs, sizeof(int), 1, File);
for (int i = 0; i < NumConfigs; i++) {
if (test_bit(Class->PermConfigs, i)) {
WritePermConfig(File, Class->Config[i].Perm);
} else {
WriteTempConfig(File, Class->Config[i].Temp);
}
}
} /* WriteAdaptedClass */
/*---------------------------------------------------------------------------*/
/**
* This routine saves Templates to File in a binary format.
*
* @param File open text file to write Templates to
* @param Templates set of adapted templates to write to File
*
* @note Globals: none
*/
void Classify::WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templates) {
/* first write the high level adaptive template struct */
fwrite(Templates, sizeof(ADAPT_TEMPLATES_STRUCT), 1, File);
/* then write out the basic integer templates */
WriteIntTemplates(File, Templates->Templates, unicharset);
/* then write out the adaptive info for each class */
for (unsigned i = 0; i < (Templates->Templates)->NumClasses; i++) {
WriteAdaptedClass(File, Templates->Class[i], Templates->Templates->Class[i]->NumConfigs);
}
} /* WriteAdaptedTemplates */
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of a
* permanent configuration to File.
*
* @param File open file to write Config to
* @param Config permanent config to write to File
*
* @note Globals: none
*/
void WritePermConfig(FILE *File, PERM_CONFIG_STRUCT *Config) {
uint8_t NumAmbigs = 0;
assert(Config != nullptr);
while (Config->Ambigs[NumAmbigs] > 0) {
++NumAmbigs;
}
fwrite(&NumAmbigs, sizeof(uint8_t), 1, File);
fwrite(Config->Ambigs, sizeof(UNICHAR_ID), NumAmbigs, File);
fwrite(&(Config->FontinfoId), sizeof(int), 1, File);
} /* WritePermConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine writes a binary representation of a
* temporary configuration to File.
*
* @param File open file to write Config to
* @param Config temporary config to write to File
*
* @note Globals: none
*/
void WriteTempConfig(FILE *File, TEMP_CONFIG_STRUCT *Config) {
assert(Config != nullptr);
fwrite(Config, sizeof(TEMP_CONFIG_STRUCT), 1, File);
fwrite(Config->Protos, sizeof(uint32_t), Config->ProtoVectorSize, File);
} /* WriteTempConfig */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/adaptive.cpp
|
C++
|
apache-2.0
| 13,307
|
/******************************************************************************
** Filename: adaptive.h
** Purpose: Interface to adaptive matcher.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef ADAPTIVE_H
#define ADAPTIVE_H
#include "intproto.h"
#include "oldlist.h"
#include <cstdio>
namespace tesseract {
struct TEMP_PROTO_STRUCT {
uint16_t ProtoId;
PROTO_STRUCT Proto;
};
struct TEMP_CONFIG_STRUCT {
TEMP_CONFIG_STRUCT() = default;
TEMP_CONFIG_STRUCT(int MaxProtoId, int FontinfoId);
~TEMP_CONFIG_STRUCT();
uint8_t NumTimesSeen;
uint8_t ProtoVectorSize;
PROTO_ID MaxProtoId;
BIT_VECTOR Protos;
int FontinfoId; // font information inferred from pre-trained templates
};
struct PERM_CONFIG_STRUCT {
PERM_CONFIG_STRUCT() = default;
~PERM_CONFIG_STRUCT();
UNICHAR_ID *Ambigs;
int FontinfoId; // font information inferred from pre-trained templates
};
union ADAPTED_CONFIG {
TEMP_CONFIG_STRUCT *Temp;
PERM_CONFIG_STRUCT *Perm;
};
struct ADAPT_CLASS_STRUCT {
ADAPT_CLASS_STRUCT();
~ADAPT_CLASS_STRUCT();
uint8_t NumPermConfigs;
uint8_t MaxNumTimesSeen; // maximum number of times any TEMP_CONFIG_STRUCT was seen
// (cut at matcher_min_examples_for_prototyping)
BIT_VECTOR PermProtos;
BIT_VECTOR PermConfigs;
LIST TempProtos;
ADAPTED_CONFIG Config[MAX_NUM_CONFIGS];
};
class ADAPT_TEMPLATES_STRUCT {
public:
ADAPT_TEMPLATES_STRUCT() = default;
ADAPT_TEMPLATES_STRUCT(UNICHARSET &unicharset);
~ADAPT_TEMPLATES_STRUCT();
INT_TEMPLATES_STRUCT *Templates;
int NumNonEmptyClasses;
uint8_t NumPermClasses;
ADAPT_CLASS_STRUCT *Class[MAX_NUM_CLASSES];
};
/*----------------------------------------------------------------------------
Public Function Prototypes
----------------------------------------------------------------------------*/
#define NumNonEmptyClassesIn(Template) ((Template)->NumNonEmptyClasses)
#define IsEmptyAdaptedClass(Class) ((Class)->NumPermConfigs == 0 && (Class)->TempProtos == NIL_LIST)
#define ConfigIsPermanent(Class, ConfigId) (test_bit((Class)->PermConfigs, ConfigId))
#define MakeConfigPermanent(Class, ConfigId) (SET_BIT((Class)->PermConfigs, ConfigId))
#define MakeProtoPermanent(Class, ProtoId) (SET_BIT((Class)->PermProtos, ProtoId))
#define TempConfigFor(Class, ConfigId) ((Class)->Config[ConfigId].Temp)
#define PermConfigFor(Class, ConfigId) ((Class)->Config[ConfigId].Perm)
#define IncreaseConfidence(TempConfig) ((TempConfig)->NumTimesSeen++)
void AddAdaptedClass(ADAPT_TEMPLATES_STRUCT *Templates, ADAPT_CLASS_STRUCT *Class, CLASS_ID ClassId);
ADAPT_CLASS_STRUCT *ReadAdaptedClass(tesseract::TFile *File);
PERM_CONFIG_STRUCT *ReadPermConfig(tesseract::TFile *File);
TEMP_CONFIG_STRUCT *ReadTempConfig(tesseract::TFile *File);
void WriteAdaptedClass(FILE *File, ADAPT_CLASS_STRUCT *Class, int NumConfigs);
void WritePermConfig(FILE *File, PERM_CONFIG_STRUCT *Config);
void WriteTempConfig(FILE *File, TEMP_CONFIG_STRUCT *Config);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/adaptive.h
|
C++
|
apache-2.0
| 3,699
|
/******************************************************************************
** Filename: adaptmatch.cpp
** Purpose: High level adaptive matcher.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*-----------------------------------------------------------------------------
Include Files and Type Defines
-----------------------------------------------------------------------------*/
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#include "adaptive.h" // for ADAPT_CLASS
#include "ambigs.h" // for UnicharIdVector, UnicharAmbigs
#include "bitvec.h" // for FreeBitVector, NewBitVector, BIT_VECTOR
#include "blobs.h" // for TBLOB, TWERD
#include "classify.h" // for Classify, CST_FRAGMENT, CST_WHOLE
#include "dict.h" // for Dict
#include "errcode.h" // for ASSERT_HOST
#include "featdefs.h" // for CharNormDesc
#include "float2int.h" // for BASELINE_Y_SHIFT
#include "fontinfo.h" // for ScoredFont, FontSet
#include "intfx.h" // for BlobToTrainingSample, INT_FX_RESULT_S...
#include "intmatcher.h" // for CP_RESULT_STRUCT, IntegerMatcher
#include "intproto.h" // for INT_FEATURE_STRUCT, (anonymous), Clas...
#include "matchdefs.h" // for CLASS_ID, FEATURE_ID, PROTO_ID, NO_PROTO
#include "mfoutline.h" // for baseline, character, MF_SCALE_FACTOR
#include "normalis.h" // for DENORM, kBlnBaselineOffset, kBlnXHeight
#include "normfeat.h" // for ActualOutlineLength, CharNormLength
#include "ocrfeatures.h" // for FEATURE_STRUCT, FEATURE
#include "oldlist.h" // for push, delete_d
#include "outfeat.h" // for OutlineFeatDir, OutlineFeatLength
#include "pageres.h" // for WERD_RES
#include "params.h" // for IntParam, BoolParam, DoubleParam, Str...
#include "picofeat.h" // for PicoFeatDir, PicoFeatX, PicoFeatY
#include "protos.h" // for PROTO_STRUCT, FillABC
#include "ratngs.h" // for BLOB_CHOICE_IT, BLOB_CHOICE_LIST, BLO...
#include "rect.h" // for TBOX
#include "scrollview.h" // for ScrollView, ScrollView::BROWN, Scroll...
#include "seam.h" // for SEAM
#include "shapeclassifier.h" // for ShapeClassifier
#include "shapetable.h" // for UnicharRating, ShapeTable, Shape, Uni...
#include "tessclassifier.h" // for TessClassifier
#include "tessdatamanager.h" // for TessdataManager, TESSDATA_INTTEMP
#include "tprintf.h" // for tprintf
#include "trainingsample.h" // for TrainingSample
#include "unicharset.h" // for UNICHARSET, CHAR_FRAGMENT, UNICHAR_SPACE
#include "unicity_table.h" // for UnicityTable
#include <tesseract/unichar.h> // for UNICHAR_ID, INVALID_UNICHAR_ID
#include "helpers.h" // for IntCastRounded, ClipToRange
#include "serialis.h" // for TFile
#include <algorithm> // for max, min
#include <cassert> // for assert
#include <cmath> // for fabs
#include <cstdint> // for INT32_MAX, UINT8_MAX
#include <cstdio> // for fflush, fclose, fopen, stdout, FILE
#include <cstring> // for strstr, memset, strcmp
namespace tesseract {
// TODO: The parameter classify_enable_adaptive_matcher can cause
// a segmentation fault if it is set to false (issue #256),
// so override it here.
#define classify_enable_adaptive_matcher true
#define ADAPT_TEMPLATE_SUFFIX ".a"
#define MAX_MATCHES 10
#define UNLIKELY_NUM_FEAT 200
#define NO_DEBUG 0
#define MAX_ADAPTABLE_WERD_SIZE 40
#define ADAPTABLE_WERD_ADJUSTMENT (0.05)
#define Y_DIM_OFFSET (Y_SHIFT - BASELINE_Y_SHIFT)
#define WORST_POSSIBLE_RATING (0.0f)
struct ADAPT_RESULTS {
int32_t BlobLength;
bool HasNonfragment;
UNICHAR_ID best_unichar_id;
int best_match_index;
float best_rating;
std::vector<UnicharRating> match;
std::vector<CP_RESULT_STRUCT> CPResults;
/// Initializes data members to the default values. Sets the initial
/// rating of each class to be the worst possible rating (1.0).
inline void Initialize() {
BlobLength = INT32_MAX;
HasNonfragment = false;
ComputeBest();
}
// Computes best_unichar_id, best_match_index and best_rating.
void ComputeBest() {
best_unichar_id = INVALID_UNICHAR_ID;
best_match_index = -1;
best_rating = WORST_POSSIBLE_RATING;
for (unsigned i = 0; i < match.size(); ++i) {
if (match[i].rating > best_rating) {
best_rating = match[i].rating;
best_unichar_id = match[i].unichar_id;
best_match_index = i;
}
}
}
};
struct PROTO_KEY {
ADAPT_TEMPLATES_STRUCT *Templates;
CLASS_ID ClassId;
int ConfigId;
};
// Sort function to sort ratings appropriately by descending rating.
static bool SortDescendingRating(const UnicharRating &a, const UnicharRating &b) {
if (a.rating != b.rating) {
return a.rating > b.rating;
} else {
return a.unichar_id < b.unichar_id;
}
}
/*-----------------------------------------------------------------------------
Private Macros
-----------------------------------------------------------------------------*/
inline bool MarginalMatch(float confidence, float matcher_great_threshold) {
return (1.0f - confidence) > matcher_great_threshold;
}
/*-----------------------------------------------------------------------------
Private Function Prototypes
-----------------------------------------------------------------------------*/
// Returns the index of the given id in results, if present, or the size of the
// vector (index it will go at) if not present.
static unsigned FindScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) {
for (unsigned i = 0; i < results.match.size(); i++) {
if (results.match[i].unichar_id == id) {
return i;
}
}
return results.match.size();
}
// Returns the current rating for a unichar id if we have rated it, defaulting
// to WORST_POSSIBLE_RATING.
static float ScoredUnichar(UNICHAR_ID id, const ADAPT_RESULTS &results) {
unsigned index = FindScoredUnichar(id, results);
if (index >= results.match.size()) {
return WORST_POSSIBLE_RATING;
}
return results.match[index].rating;
}
void InitMatcherRatings(float *Rating);
int MakeTempProtoPerm(void *item1, void *item2);
void SetAdaptiveThreshold(float Threshold);
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
/**
* This routine calls the adaptive matcher
* which returns (in an array) the class id of each
* class matched.
*
* It also returns the number of classes matched.
* For each class matched it places the best rating
* found for that class into the Ratings array.
*
* Bad matches are then removed so that they don't
* need to be sorted. The remaining good matches are
* then sorted and converted to choices.
*
* This routine also performs some simple speckle
* filtering.
*
* @param Blob blob to be classified
* @param[out] Choices List of choices found by adaptive matcher.
* filled on return with the choices found by the
* class pruner and the ratings there from. Also
* contains the detailed results of the integer matcher.
*
*/
void Classify::AdaptiveClassifier(TBLOB *Blob, BLOB_CHOICE_LIST *Choices) {
assert(Choices != nullptr);
auto *Results = new ADAPT_RESULTS;
Results->Initialize();
ASSERT_HOST(AdaptedTemplates != nullptr);
DoAdaptiveMatch(Blob, Results);
RemoveBadMatches(Results);
std::sort(Results->match.begin(), Results->match.end(), SortDescendingRating);
RemoveExtraPuncs(Results);
Results->ComputeBest();
ConvertMatchesToChoices(Blob->denorm(), Blob->bounding_box(), Results, Choices);
// TODO(rays) Move to before ConvertMatchesToChoices!
if (LargeSpeckle(*Blob) || Choices->empty()) {
AddLargeSpeckleTo(Results->BlobLength, Choices);
}
if (matcher_debug_level >= 1) {
tprintf("AD Matches = ");
PrintAdaptiveMatchResults(*Results);
}
#ifndef GRAPHICS_DISABLED
if (classify_enable_adaptive_debugger) {
DebugAdaptiveClassifier(Blob, Results);
}
#endif
delete Results;
} /* AdaptiveClassifier */
#ifndef GRAPHICS_DISABLED
// If *win is nullptr, sets it to a new ScrollView() object with title msg.
// Clears the window and draws baselines.
void Classify::RefreshDebugWindow(ScrollView **win, const char *msg, int y_offset,
const TBOX &wbox) {
const int kSampleSpaceWidth = 500;
if (*win == nullptr) {
*win = new ScrollView(msg, 100, y_offset, kSampleSpaceWidth * 2, 200, kSampleSpaceWidth * 2,
200, true);
}
(*win)->Clear();
(*win)->Pen(64, 64, 64);
(*win)->Line(-kSampleSpaceWidth, kBlnBaselineOffset, kSampleSpaceWidth, kBlnBaselineOffset);
(*win)->Line(-kSampleSpaceWidth, kBlnXHeight + kBlnBaselineOffset, kSampleSpaceWidth,
kBlnXHeight + kBlnBaselineOffset);
(*win)->ZoomToRectangle(wbox.left(), wbox.top(), wbox.right(), wbox.bottom());
}
#endif // !GRAPHICS_DISABLED
// Learns the given word using its chopped_word, seam_array, denorm,
// box_word, best_state, and correct_text to learn both correctly and
// incorrectly segmented blobs. If fontname is not nullptr, then LearnBlob
// is called and the data will be saved in an internal buffer.
// Otherwise AdaptToBlob is called for adaption within a document.
void Classify::LearnWord(const char *fontname, WERD_RES *word) {
int word_len = word->correct_text.size();
if (word_len == 0) {
return;
}
float *thresholds = nullptr;
if (fontname == nullptr) {
// Adaption mode.
if (!EnableLearning || word->best_choice == nullptr) {
return; // Can't or won't adapt.
}
if (classify_learning_debug_level >= 1) {
tprintf("\n\nAdapting to word = %s\n", word->best_choice->debug_string().c_str());
}
thresholds = new float[word_len];
word->ComputeAdaptionThresholds(getDict().certainty_scale, matcher_perfect_threshold,
matcher_good_threshold, matcher_rating_margin, thresholds);
}
int start_blob = 0;
#ifndef GRAPHICS_DISABLED
if (classify_debug_character_fragments) {
if (learn_fragmented_word_debug_win_ != nullptr) {
learn_fragmented_word_debug_win_->Wait();
}
RefreshDebugWindow(&learn_fragments_debug_win_, "LearnPieces", 400,
word->chopped_word->bounding_box());
RefreshDebugWindow(&learn_fragmented_word_debug_win_, "LearnWord", 200,
word->chopped_word->bounding_box());
word->chopped_word->plot(learn_fragmented_word_debug_win_);
ScrollView::Update();
}
#endif // !GRAPHICS_DISABLED
for (int ch = 0; ch < word_len; ++ch) {
if (classify_debug_character_fragments) {
tprintf("\nLearning %s\n", word->correct_text[ch].c_str());
}
if (word->correct_text[ch].length() > 0) {
float threshold = thresholds != nullptr ? thresholds[ch] : 0.0f;
LearnPieces(fontname, start_blob, word->best_state[ch], threshold, CST_WHOLE,
word->correct_text[ch].c_str(), word);
if (word->best_state[ch] > 1 && !disable_character_fragments) {
// Check that the character breaks into meaningful fragments
// that each match a whole character with at least
// classify_character_fragments_garbage_certainty_threshold
bool garbage = false;
int frag;
for (frag = 0; frag < word->best_state[ch]; ++frag) {
TBLOB *frag_blob = word->chopped_word->blobs[start_blob + frag];
if (classify_character_fragments_garbage_certainty_threshold < 0) {
garbage |= LooksLikeGarbage(frag_blob);
}
}
// Learn the fragments.
if (!garbage) {
bool pieces_all_natural = word->PiecesAllNatural(start_blob, word->best_state[ch]);
if (pieces_all_natural || !prioritize_division) {
for (frag = 0; frag < word->best_state[ch]; ++frag) {
std::vector<std::string> tokens = split(word->correct_text[ch], ' ');
tokens[0] = CHAR_FRAGMENT::to_string(tokens[0].c_str(), frag, word->best_state[ch],
pieces_all_natural);
std::string full_string;
for (unsigned i = 0; i < tokens.size(); i++) {
full_string += tokens[i];
if (i != tokens.size() - 1) {
full_string += ' ';
}
}
LearnPieces(fontname, start_blob + frag, 1, threshold, CST_FRAGMENT,
full_string.c_str(), word);
}
}
}
}
// TODO(rays): re-enable this part of the code when we switch to the
// new classifier that needs to see examples of garbage.
/*
if (word->best_state[ch] > 1) {
// If the next blob is good, make junk with the rightmost fragment.
if (ch + 1 < word_len && word->correct_text[ch + 1].length() > 0) {
LearnPieces(fontname, start_blob + word->best_state[ch] - 1,
word->best_state[ch + 1] + 1,
threshold, CST_IMPROPER, INVALID_UNICHAR, word);
}
// If the previous blob is good, make junk with the leftmost fragment.
if (ch > 0 && word->correct_text[ch - 1].length() > 0) {
LearnPieces(fontname, start_blob - word->best_state[ch - 1],
word->best_state[ch - 1] + 1,
threshold, CST_IMPROPER, INVALID_UNICHAR, word);
}
}
// If the next blob is good, make a join with it.
if (ch + 1 < word_len && word->correct_text[ch + 1].length() > 0) {
std::string joined_text = word->correct_text[ch];
joined_text += word->correct_text[ch + 1];
LearnPieces(fontname, start_blob,
word->best_state[ch] + word->best_state[ch + 1],
threshold, CST_NGRAM, joined_text.c_str(), word);
}
*/
}
start_blob += word->best_state[ch];
}
delete[] thresholds;
} // LearnWord.
// Builds a blob of length fragments, from the word, starting at start,
// and then learns it, as having the given correct_text.
// If fontname is not nullptr, then LearnBlob is called and the data will be
// saved in an internal buffer for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
// threshold is a magic number required by AdaptToChar and generated by
// ComputeAdaptionThresholds.
// Although it can be partly inferred from the string, segmentation is
// provided to explicitly clarify the character segmentation.
void Classify::LearnPieces(const char *fontname, int start, int length, float threshold,
CharSegmentationType segmentation, const char *correct_text,
WERD_RES *word) {
// TODO(daria) Remove/modify this if/when we want
// to train and/or adapt to n-grams.
if (segmentation != CST_WHOLE && (segmentation != CST_FRAGMENT || disable_character_fragments)) {
return;
}
if (length > 1) {
SEAM::JoinPieces(word->seam_array, word->chopped_word->blobs, start, start + length - 1);
}
TBLOB *blob = word->chopped_word->blobs[start];
// Rotate the blob if needed for classification.
TBLOB *rotated_blob = blob->ClassifyNormalizeIfNeeded();
if (rotated_blob == nullptr) {
rotated_blob = blob;
}
#ifndef GRAPHICS_DISABLED
// Draw debug windows showing the blob that is being learned if needed.
if (strcmp(classify_learn_debug_str.c_str(), correct_text) == 0) {
RefreshDebugWindow(&learn_debug_win_, "LearnPieces", 600, word->chopped_word->bounding_box());
rotated_blob->plot(learn_debug_win_, ScrollView::GREEN, ScrollView::BROWN);
learn_debug_win_->Update();
learn_debug_win_->Wait();
}
if (classify_debug_character_fragments && segmentation == CST_FRAGMENT) {
ASSERT_HOST(learn_fragments_debug_win_ != nullptr); // set up in LearnWord
blob->plot(learn_fragments_debug_win_, ScrollView::BLUE, ScrollView::BROWN);
learn_fragments_debug_win_->Update();
}
#endif // !GRAPHICS_DISABLED
if (fontname != nullptr) {
classify_norm_method.set_value(character); // force char norm spc 30/11/93
tess_bn_matching.set_value(false); // turn it off
tess_cn_matching.set_value(false);
DENORM bl_denorm, cn_denorm;
INT_FX_RESULT_STRUCT fx_info;
SetupBLCNDenorms(*rotated_blob, classify_nonlinear_norm, &bl_denorm, &cn_denorm, &fx_info);
LearnBlob(fontname, rotated_blob, cn_denorm, fx_info, correct_text);
} else if (unicharset.contains_unichar(correct_text)) {
UNICHAR_ID class_id = unicharset.unichar_to_id(correct_text);
int font_id = word->fontinfo != nullptr ? fontinfo_table_.get_index(*word->fontinfo) : 0;
if (classify_learning_debug_level >= 1) {
tprintf("Adapting to char = %s, thr= %g font_id= %d\n", unicharset.id_to_unichar(class_id),
threshold, font_id);
}
// If filename is not nullptr we are doing recognition
// (as opposed to training), so we must have already set word fonts.
AdaptToChar(rotated_blob, class_id, font_id, threshold, AdaptedTemplates);
if (BackupAdaptedTemplates != nullptr) {
// Adapt the backup templates too. They will be used if the primary gets
// too full.
AdaptToChar(rotated_blob, class_id, font_id, threshold, BackupAdaptedTemplates);
}
} else if (classify_debug_level >= 1) {
tprintf("Can't adapt to %s not in unicharset\n", correct_text);
}
if (rotated_blob != blob) {
delete rotated_blob;
}
SEAM::BreakPieces(word->seam_array, word->chopped_word->blobs, start, start + length - 1);
} // LearnPieces.
/*---------------------------------------------------------------------------*/
/**
* This routine performs cleanup operations
* on the adaptive classifier. It should be called
* before the program is terminated. Its main function
* is to save the adapted templates to a file.
*
* Globals:
* - #AdaptedTemplates current set of adapted templates
* - #classify_save_adapted_templates true if templates should be saved
* - #classify_enable_adaptive_matcher true if adaptive matcher is enabled
*/
void Classify::EndAdaptiveClassifier() {
std::string Filename;
FILE *File;
if (AdaptedTemplates != nullptr && classify_enable_adaptive_matcher &&
classify_save_adapted_templates) {
Filename = imagefile + ADAPT_TEMPLATE_SUFFIX;
File = fopen(Filename.c_str(), "wb");
if (File == nullptr) {
tprintf("Unable to save adapted templates to %s!\n", Filename.c_str());
} else {
tprintf("\nSaving adapted templates to %s ...", Filename.c_str());
fflush(stdout);
WriteAdaptedTemplates(File, AdaptedTemplates);
tprintf("\n");
fclose(File);
}
}
delete AdaptedTemplates;
AdaptedTemplates = nullptr;
delete BackupAdaptedTemplates;
BackupAdaptedTemplates = nullptr;
if (PreTrainedTemplates != nullptr) {
delete PreTrainedTemplates;
PreTrainedTemplates = nullptr;
}
getDict().EndDangerousAmbigs();
FreeNormProtos();
if (AllProtosOn != nullptr) {
FreeBitVector(AllProtosOn);
FreeBitVector(AllConfigsOn);
FreeBitVector(AllConfigsOff);
FreeBitVector(TempProtoMask);
AllProtosOn = nullptr;
AllConfigsOn = nullptr;
AllConfigsOff = nullptr;
TempProtoMask = nullptr;
}
delete shape_table_;
shape_table_ = nullptr;
delete static_classifier_;
static_classifier_ = nullptr;
} /* EndAdaptiveClassifier */
/*---------------------------------------------------------------------------*/
/**
* This routine reads in the training
* information needed by the adaptive classifier
* and saves it into global variables.
* Parameters:
* load_pre_trained_templates Indicates whether the pre-trained
* templates (inttemp, normproto and pffmtable components)
* should be loaded. Should only be set to true if the
* necessary classifier components are present in the
* [lang].traineddata file.
* Globals:
* BuiltInTemplatesFile file to get built-in temps from
* BuiltInCutoffsFile file to get avg. feat per class from
* classify_use_pre_adapted_templates
* enables use of pre-adapted templates
*/
void Classify::InitAdaptiveClassifier(TessdataManager *mgr) {
if (!classify_enable_adaptive_matcher) {
return;
}
if (AllProtosOn != nullptr) {
EndAdaptiveClassifier(); // Don't leak with multiple inits.
}
// If there is no language_data_path_prefix, the classifier will be
// adaptive only.
if (language_data_path_prefix.length() > 0 && mgr != nullptr) {
TFile fp;
ASSERT_HOST(mgr->GetComponent(TESSDATA_INTTEMP, &fp));
PreTrainedTemplates = ReadIntTemplates(&fp);
if (mgr->GetComponent(TESSDATA_SHAPE_TABLE, &fp)) {
shape_table_ = new ShapeTable(unicharset);
if (!shape_table_->DeSerialize(&fp)) {
tprintf("Error loading shape table!\n");
delete shape_table_;
shape_table_ = nullptr;
}
}
ASSERT_HOST(mgr->GetComponent(TESSDATA_PFFMTABLE, &fp));
ReadNewCutoffs(&fp, CharNormCutoffs);
ASSERT_HOST(mgr->GetComponent(TESSDATA_NORMPROTO, &fp));
NormProtos = ReadNormProtos(&fp);
static_classifier_ = new TessClassifier(false, this);
}
InitIntegerFX();
AllProtosOn = NewBitVector(MAX_NUM_PROTOS);
AllConfigsOn = NewBitVector(MAX_NUM_CONFIGS);
AllConfigsOff = NewBitVector(MAX_NUM_CONFIGS);
TempProtoMask = NewBitVector(MAX_NUM_PROTOS);
set_all_bits(AllProtosOn, WordsInVectorOfSize(MAX_NUM_PROTOS));
set_all_bits(AllConfigsOn, WordsInVectorOfSize(MAX_NUM_CONFIGS));
zero_all_bits(AllConfigsOff, WordsInVectorOfSize(MAX_NUM_CONFIGS));
for (uint16_t &BaselineCutoff : BaselineCutoffs) {
BaselineCutoff = 0;
}
if (classify_use_pre_adapted_templates) {
TFile fp;
std::string Filename = imagefile;
Filename += ADAPT_TEMPLATE_SUFFIX;
if (!fp.Open(Filename.c_str(), nullptr)) {
AdaptedTemplates = new ADAPT_TEMPLATES_STRUCT(unicharset);
} else {
tprintf("\nReading pre-adapted templates from %s ...\n", Filename.c_str());
fflush(stdout);
AdaptedTemplates = ReadAdaptedTemplates(&fp);
tprintf("\n");
PrintAdaptedTemplates(stdout, AdaptedTemplates);
for (unsigned i = 0; i < AdaptedTemplates->Templates->NumClasses; i++) {
BaselineCutoffs[i] = CharNormCutoffs[i];
}
}
} else {
delete AdaptedTemplates;
AdaptedTemplates = new ADAPT_TEMPLATES_STRUCT(unicharset);
}
} /* InitAdaptiveClassifier */
void Classify::ResetAdaptiveClassifierInternal() {
if (classify_learning_debug_level > 0) {
tprintf("Resetting adaptive classifier (NumAdaptationsFailed=%d)\n", NumAdaptationsFailed);
}
delete AdaptedTemplates;
AdaptedTemplates = new ADAPT_TEMPLATES_STRUCT(unicharset);
delete BackupAdaptedTemplates;
BackupAdaptedTemplates = nullptr;
NumAdaptationsFailed = 0;
}
// If there are backup adapted templates, switches to those, otherwise resets
// the main adaptive classifier (because it is full.)
void Classify::SwitchAdaptiveClassifier() {
if (BackupAdaptedTemplates == nullptr) {
ResetAdaptiveClassifierInternal();
return;
}
if (classify_learning_debug_level > 0) {
tprintf("Switch to backup adaptive classifier (NumAdaptationsFailed=%d)\n",
NumAdaptationsFailed);
}
delete AdaptedTemplates;
AdaptedTemplates = BackupAdaptedTemplates;
BackupAdaptedTemplates = nullptr;
NumAdaptationsFailed = 0;
}
// Resets the backup adaptive classifier to empty.
void Classify::StartBackupAdaptiveClassifier() {
delete BackupAdaptedTemplates;
BackupAdaptedTemplates = new ADAPT_TEMPLATES_STRUCT(unicharset);
}
/*---------------------------------------------------------------------------*/
/**
* This routine prepares the adaptive
* matcher for the start
* of the first pass. Learning is enabled (unless it
* is disabled for the whole program).
*
* @note this is somewhat redundant, it simply says that if learning is
* enabled then it will remain enabled on the first pass. If it is
* disabled, then it will remain disabled. This is only put here to
* make it very clear that learning is controlled directly by the global
* setting of EnableLearning.
*
* Globals:
* - #EnableLearning
* set to true by this routine
*/
void Classify::SettupPass1() {
EnableLearning = classify_enable_learning;
getDict().SettupStopperPass1();
} /* SettupPass1 */
/*---------------------------------------------------------------------------*/
/**
* This routine prepares the adaptive
* matcher for the start of the second pass. Further
* learning is disabled.
*
* Globals:
* - #EnableLearning set to false by this routine
*/
void Classify::SettupPass2() {
EnableLearning = false;
getDict().SettupStopperPass2();
} /* SettupPass2 */
/*---------------------------------------------------------------------------*/
/**
* This routine creates a new adapted
* class and uses Blob as the model for the first
* config in that class.
*
* @param Blob blob to model new class after
* @param ClassId id of the class to be initialized
* @param FontinfoId font information inferred from pre-trained templates
* @param Class adapted class to be initialized
* @param Templates adapted templates to add new class to
*
* Globals:
* - #AllProtosOn dummy mask with all 1's
* - BaselineCutoffs kludge needed to get cutoffs
* - #PreTrainedTemplates kludge needed to get cutoffs
*/
void Classify::InitAdaptedClass(TBLOB *Blob, CLASS_ID ClassId, int FontinfoId, ADAPT_CLASS_STRUCT *Class,
ADAPT_TEMPLATES_STRUCT *Templates) {
FEATURE_SET Features;
int Fid, Pid;
FEATURE Feature;
int NumFeatures;
PROTO_STRUCT *Proto;
INT_CLASS_STRUCT *IClass;
TEMP_CONFIG_STRUCT *Config;
classify_norm_method.set_value(baseline);
Features = ExtractOutlineFeatures(Blob);
NumFeatures = Features->NumFeatures;
if (NumFeatures > UNLIKELY_NUM_FEAT || NumFeatures <= 0) {
delete Features;
return;
}
Config = new TEMP_CONFIG_STRUCT(NumFeatures - 1, FontinfoId);
TempConfigFor(Class, 0) = Config;
/* this is a kludge to construct cutoffs for adapted templates */
if (Templates == AdaptedTemplates) {
BaselineCutoffs[ClassId] = CharNormCutoffs[ClassId];
}
IClass = ClassForClassId(Templates->Templates, ClassId);
for (Fid = 0; Fid < Features->NumFeatures; Fid++) {
Pid = AddIntProto(IClass);
assert(Pid != NO_PROTO);
Feature = Features->Features[Fid];
auto TempProto = new TEMP_PROTO_STRUCT;
Proto = &(TempProto->Proto);
/* compute proto params - NOTE that Y_DIM_OFFSET must be used because
ConvertProto assumes that the Y dimension varies from -0.5 to 0.5
instead of the -0.25 to 0.75 used in baseline normalization */
Proto->Angle = Feature->Params[OutlineFeatDir];
Proto->X = Feature->Params[OutlineFeatX];
Proto->Y = Feature->Params[OutlineFeatY] - Y_DIM_OFFSET;
Proto->Length = Feature->Params[OutlineFeatLength];
FillABC(Proto);
TempProto->ProtoId = Pid;
SET_BIT(Config->Protos, Pid);
ConvertProto(Proto, Pid, IClass);
AddProtoToProtoPruner(Proto, Pid, IClass, classify_learning_debug_level >= 2);
Class->TempProtos = push(Class->TempProtos, TempProto);
}
delete Features;
AddIntConfig(IClass);
ConvertConfig(AllProtosOn, 0, IClass);
if (classify_learning_debug_level >= 1) {
tprintf("Added new class '%s' with class id %d and %d protos.\n",
unicharset.id_to_unichar(ClassId), ClassId, NumFeatures);
#ifndef GRAPHICS_DISABLED
if (classify_learning_debug_level > 1) {
DisplayAdaptedChar(Blob, IClass);
}
#endif
}
if (IsEmptyAdaptedClass(Class)) {
(Templates->NumNonEmptyClasses)++;
}
} /* InitAdaptedClass */
/*---------------------------------------------------------------------------*/
/**
* This routine sets up the feature
* extractor to extract baseline normalized
* pico-features.
*
* The extracted pico-features are converted
* to integer form and placed in IntFeatures. The
* original floating-pt. features are returned in
* FloatFeatures.
*
* Globals: none
* @param Blob blob to extract features from
* @param[out] IntFeatures array to fill with integer features
* @param[out] FloatFeatures place to return actual floating-pt features
*
* @return Number of pico-features returned (0 if
* an error occurred)
*/
int Classify::GetAdaptiveFeatures(TBLOB *Blob, INT_FEATURE_ARRAY IntFeatures,
FEATURE_SET *FloatFeatures) {
FEATURE_SET Features;
int NumFeatures;
classify_norm_method.set_value(baseline);
Features = ExtractPicoFeatures(Blob);
NumFeatures = Features->NumFeatures;
if (NumFeatures == 0 || NumFeatures > UNLIKELY_NUM_FEAT) {
delete Features;
return 0;
}
ComputeIntFeatures(Features, IntFeatures);
*FloatFeatures = Features;
return NumFeatures;
} /* GetAdaptiveFeatures */
/*-----------------------------------------------------------------------------
Private Code
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/**
* Return true if the specified word is acceptable for adaptation.
*
* Globals: none
*
* @param word current word
*
* @return true or false
*/
bool Classify::AdaptableWord(WERD_RES *word) {
if (word->best_choice == nullptr) {
return false;
}
auto BestChoiceLength = word->best_choice->length();
float adaptable_score = getDict().segment_penalty_dict_case_ok + ADAPTABLE_WERD_ADJUSTMENT;
return // rules that apply in general - simplest to compute first
BestChoiceLength > 0 && BestChoiceLength == word->rebuild_word->NumBlobs() &&
BestChoiceLength <= MAX_ADAPTABLE_WERD_SIZE &&
// This basically ensures that the word is at least a dictionary match
// (freq word, user word, system dawg word, etc).
// Since all the other adjustments will make adjust factor higher
// than higher than adaptable_score=1.1+0.05=1.15
// Since these are other flags that ensure that the word is dict word,
// this check could be at times redundant.
word->best_choice->adjust_factor() <= adaptable_score &&
// Make sure that alternative choices are not dictionary words.
word->AlternativeChoiceAdjustmentsWorseThan(adaptable_score);
}
/*---------------------------------------------------------------------------*/
/**
* @param Blob blob to add to templates for ClassId
* @param ClassId class to add blob to
* @param FontinfoId font information from pre-trained templates
* @param Threshold minimum match rating to existing template
* @param adaptive_templates current set of adapted templates
*
* Globals:
* - AllProtosOn dummy mask to match against all protos
* - AllConfigsOn dummy mask to match against all configs
*/
void Classify::AdaptToChar(TBLOB *Blob, CLASS_ID ClassId, int FontinfoId, float Threshold,
ADAPT_TEMPLATES_STRUCT *adaptive_templates) {
int NumFeatures;
INT_FEATURE_ARRAY IntFeatures;
UnicharRating int_result;
INT_CLASS_STRUCT *IClass;
ADAPT_CLASS_STRUCT *Class;
TEMP_CONFIG_STRUCT *TempConfig;
FEATURE_SET FloatFeatures;
int NewTempConfigId;
if (!LegalClassId(ClassId)) {
return;
}
int_result.unichar_id = ClassId;
Class = adaptive_templates->Class[ClassId];
assert(Class != nullptr);
if (IsEmptyAdaptedClass(Class)) {
InitAdaptedClass(Blob, ClassId, FontinfoId, Class, adaptive_templates);
} else {
IClass = ClassForClassId(adaptive_templates->Templates, ClassId);
NumFeatures = GetAdaptiveFeatures(Blob, IntFeatures, &FloatFeatures);
if (NumFeatures <= 0) {
return; // Features already freed by GetAdaptiveFeatures.
}
// Only match configs with the matching font.
BIT_VECTOR MatchingFontConfigs = NewBitVector(MAX_NUM_PROTOS);
for (int cfg = 0; cfg < IClass->NumConfigs; ++cfg) {
if (GetFontinfoId(Class, cfg) == FontinfoId) {
SET_BIT(MatchingFontConfigs, cfg);
} else {
reset_bit(MatchingFontConfigs, cfg);
}
}
im_.Match(IClass, AllProtosOn, MatchingFontConfigs, NumFeatures, IntFeatures, &int_result,
classify_adapt_feature_threshold, NO_DEBUG, matcher_debug_separate_windows);
FreeBitVector(MatchingFontConfigs);
SetAdaptiveThreshold(Threshold);
if (1.0f - int_result.rating <= Threshold) {
if (ConfigIsPermanent(Class, int_result.config)) {
if (classify_learning_debug_level >= 1) {
tprintf("Found good match to perm config %d = %4.1f%%.\n", int_result.config,
int_result.rating * 100.0);
}
delete FloatFeatures;
return;
}
TempConfig = TempConfigFor(Class, int_result.config);
IncreaseConfidence(TempConfig);
if (TempConfig->NumTimesSeen > Class->MaxNumTimesSeen) {
Class->MaxNumTimesSeen = TempConfig->NumTimesSeen;
}
if (classify_learning_debug_level >= 1) {
tprintf("Increasing reliability of temp config %d to %d.\n", int_result.config,
TempConfig->NumTimesSeen);
}
if (TempConfigReliable(ClassId, TempConfig)) {
MakePermanent(adaptive_templates, ClassId, int_result.config, Blob);
UpdateAmbigsGroup(ClassId, Blob);
}
} else {
if (classify_learning_debug_level >= 1) {
tprintf("Found poor match to temp config %d = %4.1f%%.\n", int_result.config,
int_result.rating * 100.0);
#ifndef GRAPHICS_DISABLED
if (classify_learning_debug_level > 2) {
DisplayAdaptedChar(Blob, IClass);
}
#endif
}
NewTempConfigId = MakeNewTemporaryConfig(adaptive_templates, ClassId, FontinfoId, NumFeatures,
IntFeatures, FloatFeatures);
if (NewTempConfigId >= 0 &&
TempConfigReliable(ClassId, TempConfigFor(Class, NewTempConfigId))) {
MakePermanent(adaptive_templates, ClassId, NewTempConfigId, Blob);
UpdateAmbigsGroup(ClassId, Blob);
}
#ifndef GRAPHICS_DISABLED
if (classify_learning_debug_level > 1) {
DisplayAdaptedChar(Blob, IClass);
}
#endif
}
delete FloatFeatures;
}
} /* AdaptToChar */
#ifndef GRAPHICS_DISABLED
void Classify::DisplayAdaptedChar(TBLOB *blob, INT_CLASS_STRUCT *int_class) {
INT_FX_RESULT_STRUCT fx_info;
std::vector<INT_FEATURE_STRUCT> bl_features;
TrainingSample *sample =
BlobToTrainingSample(*blob, classify_nonlinear_norm, &fx_info, &bl_features);
if (sample == nullptr) {
return;
}
UnicharRating int_result;
im_.Match(int_class, AllProtosOn, AllConfigsOn, bl_features.size(), &bl_features[0], &int_result,
classify_adapt_feature_threshold, NO_DEBUG, matcher_debug_separate_windows);
tprintf("Best match to temp config %d = %4.1f%%.\n", int_result.config,
int_result.rating * 100.0);
if (classify_learning_debug_level >= 2) {
uint32_t ConfigMask;
ConfigMask = 1 << int_result.config;
ShowMatchDisplay();
im_.Match(int_class, AllProtosOn, static_cast<BIT_VECTOR>(&ConfigMask), bl_features.size(),
&bl_features[0], &int_result, classify_adapt_feature_threshold, 6 | 0x19,
matcher_debug_separate_windows);
UpdateMatchDisplay();
}
delete sample;
}
#endif
/**
* This routine adds the result of a classification into
* Results. If the new rating is much worse than the current
* best rating, it is not entered into results because it
* would end up being stripped later anyway. If the new rating
* is better than the old rating for the class, it replaces the
* old rating. If this is the first rating for the class, the
* class is added to the list of matched classes in Results.
* If the new rating is better than the best so far, it
* becomes the best so far.
*
* Globals:
* - #matcher_bad_match_pad defines limits of an acceptable match
*
* @param new_result new result to add
* @param[out] results results to add new result to
*/
void Classify::AddNewResult(const UnicharRating &new_result, ADAPT_RESULTS *results) {
auto old_match = FindScoredUnichar(new_result.unichar_id, *results);
if (new_result.rating + matcher_bad_match_pad < results->best_rating ||
(old_match < results->match.size() &&
new_result.rating <= results->match[old_match].rating)) {
return; // New one not good enough.
}
if (!unicharset.get_fragment(new_result.unichar_id)) {
results->HasNonfragment = true;
}
if (old_match < results->match.size()) {
results->match[old_match].rating = new_result.rating;
} else {
results->match.push_back(new_result);
}
if (new_result.rating > results->best_rating &&
// Ensure that fragments do not affect best rating, class and config.
// This is needed so that at least one non-fragmented character is
// always present in the results.
// TODO(daria): verify that this helps accuracy and does not
// hurt performance.
!unicharset.get_fragment(new_result.unichar_id)) {
results->best_match_index = old_match;
results->best_rating = new_result.rating;
results->best_unichar_id = new_result.unichar_id;
}
} /* AddNewResult */
/*---------------------------------------------------------------------------*/
/**
* This routine is identical to CharNormClassifier()
* except that it does no class pruning. It simply matches
* the unknown blob against the classes listed in
* Ambiguities.
*
* Globals:
* - #AllProtosOn mask that enables all protos
* - #AllConfigsOn mask that enables all configs
*
* @param blob blob to be classified
* @param templates built-in templates to classify against
* @param classes adapted class templates
* @param ambiguities array of unichar id's to match against
* @param[out] results place to put match results
* @param int_features
* @param fx_info
*/
void Classify::AmbigClassifier(const std::vector<INT_FEATURE_STRUCT> &int_features,
const INT_FX_RESULT_STRUCT &fx_info, const TBLOB *blob,
INT_TEMPLATES_STRUCT *templates, ADAPT_CLASS_STRUCT **classes,
UNICHAR_ID *ambiguities, ADAPT_RESULTS *results) {
if (int_features.empty()) {
return;
}
auto *CharNormArray = new uint8_t[unicharset.size()];
UnicharRating int_result;
results->BlobLength = GetCharNormFeature(fx_info, templates, nullptr, CharNormArray);
bool debug = matcher_debug_level >= 2 || classify_debug_level > 1;
if (debug) {
tprintf("AM Matches = ");
}
int top = blob->bounding_box().top();
int bottom = blob->bounding_box().bottom();
while (*ambiguities >= 0) {
CLASS_ID class_id = *ambiguities;
int_result.unichar_id = class_id;
im_.Match(ClassForClassId(templates, class_id), AllProtosOn, AllConfigsOn, int_features.size(),
&int_features[0], &int_result, classify_adapt_feature_threshold, NO_DEBUG,
matcher_debug_separate_windows);
ExpandShapesAndApplyCorrections(nullptr, debug, class_id, bottom, top, 0, results->BlobLength,
classify_integer_matcher_multiplier, CharNormArray, &int_result,
results);
ambiguities++;
}
delete[] CharNormArray;
} /* AmbigClassifier */
/*---------------------------------------------------------------------------*/
/// Factored-out calls to IntegerMatcher based on class pruner results.
/// Returns integer matcher results inside CLASS_PRUNER_RESULTS structure.
void Classify::MasterMatcher(INT_TEMPLATES_STRUCT *templates, int16_t num_features,
const INT_FEATURE_STRUCT *features, const uint8_t *norm_factors,
ADAPT_CLASS_STRUCT **classes, int debug, int matcher_multiplier,
const TBOX &blob_box, const std::vector<CP_RESULT_STRUCT> &results,
ADAPT_RESULTS *final_results) {
int top = blob_box.top();
int bottom = blob_box.bottom();
UnicharRating int_result;
for (auto &&result : results) {
CLASS_ID class_id = result.Class;
BIT_VECTOR protos = classes != nullptr ? classes[class_id]->PermProtos : AllProtosOn;
BIT_VECTOR configs = classes != nullptr ? classes[class_id]->PermConfigs : AllConfigsOn;
int_result.unichar_id = class_id;
im_.Match(ClassForClassId(templates, class_id), protos, configs, num_features, features,
&int_result, classify_adapt_feature_threshold, debug, matcher_debug_separate_windows);
bool is_debug = matcher_debug_level >= 2 || classify_debug_level > 1;
ExpandShapesAndApplyCorrections(classes, is_debug, class_id, bottom, top, result.Rating,
final_results->BlobLength, matcher_multiplier, norm_factors,
&int_result, final_results);
}
}
// Converts configs to fonts, and if the result is not adapted, and a
// shape_table_ is present, the shape is expanded to include all
// unichar_ids represented, before applying a set of corrections to the
// distance rating in int_result, (see ComputeCorrectedRating.)
// The results are added to the final_results output.
void Classify::ExpandShapesAndApplyCorrections(ADAPT_CLASS_STRUCT **classes, bool debug, int class_id,
int bottom, int top, float cp_rating,
int blob_length, int matcher_multiplier,
const uint8_t *cn_factors, UnicharRating *int_result,
ADAPT_RESULTS *final_results) {
if (classes != nullptr) {
// Adapted result. Convert configs to fontinfo_ids.
int_result->adapted = true;
for (auto &font : int_result->fonts) {
font.fontinfo_id = GetFontinfoId(classes[class_id], font.fontinfo_id);
}
} else {
// Pre-trained result. Map fonts using font_sets_.
int_result->adapted = false;
for (auto &font : int_result->fonts) {
font.fontinfo_id = ClassAndConfigIDToFontOrShapeID(class_id, font.fontinfo_id);
}
if (shape_table_ != nullptr) {
// Two possible cases:
// 1. Flat shapetable. All unichar-ids of the shapes referenced by
// int_result->fonts are the same. In this case build a new vector of
// mapped fonts and replace the fonts in int_result.
// 2. Multi-unichar shapetable. Variable unichars in the shapes referenced
// by int_result. In this case, build a vector of UnicharRating to
// gather together different font-ids for each unichar. Also covers case1.
std::vector<UnicharRating> mapped_results;
for (auto &f : int_result->fonts) {
int shape_id = f.fontinfo_id;
const Shape &shape = shape_table_->GetShape(shape_id);
for (int c = 0; c < shape.size(); ++c) {
int unichar_id = shape[c].unichar_id;
if (!unicharset.get_enabled(unichar_id)) {
continue;
}
// Find the mapped_result for unichar_id.
unsigned r = 0;
for (r = 0; r < mapped_results.size() && mapped_results[r].unichar_id != unichar_id;
++r) {
}
if (r == mapped_results.size()) {
mapped_results.push_back(*int_result);
mapped_results[r].unichar_id = unichar_id;
mapped_results[r].fonts.clear();
}
for (int font_id : shape[c].font_ids) {
mapped_results[r].fonts.emplace_back(font_id, f.score);
}
}
}
for (auto &m : mapped_results) {
m.rating = ComputeCorrectedRating(debug, m.unichar_id, cp_rating, int_result->rating,
int_result->feature_misses, bottom, top, blob_length,
matcher_multiplier, cn_factors);
AddNewResult(m, final_results);
}
return;
}
}
if (unicharset.get_enabled(class_id)) {
int_result->rating = ComputeCorrectedRating(debug, class_id, cp_rating, int_result->rating,
int_result->feature_misses, bottom, top,
blob_length, matcher_multiplier, cn_factors);
AddNewResult(*int_result, final_results);
}
}
// Applies a set of corrections to the confidence im_rating,
// including the cn_correction, miss penalty and additional penalty
// for non-alnums being vertical misfits. Returns the corrected confidence.
double Classify::ComputeCorrectedRating(bool debug, int unichar_id, double cp_rating,
double im_rating, int feature_misses, int bottom, int top,
int blob_length, int matcher_multiplier,
const uint8_t *cn_factors) {
// Compute class feature corrections.
double cn_corrected = im_.ApplyCNCorrection(1.0 - im_rating, blob_length, cn_factors[unichar_id],
matcher_multiplier);
double miss_penalty = tessedit_class_miss_scale * feature_misses;
double vertical_penalty = 0.0;
// Penalize non-alnums for being vertical misfits.
if (!unicharset.get_isalpha(unichar_id) && !unicharset.get_isdigit(unichar_id) &&
cn_factors[unichar_id] != 0 && classify_misfit_junk_penalty > 0.0) {
int min_bottom, max_bottom, min_top, max_top;
unicharset.get_top_bottom(unichar_id, &min_bottom, &max_bottom, &min_top, &max_top);
if (debug) {
tprintf("top=%d, vs [%d, %d], bottom=%d, vs [%d, %d]\n", top, min_top, max_top, bottom,
min_bottom, max_bottom);
}
if (top < min_top || top > max_top || bottom < min_bottom || bottom > max_bottom) {
vertical_penalty = classify_misfit_junk_penalty;
}
}
double result = 1.0 - (cn_corrected + miss_penalty + vertical_penalty);
if (result < WORST_POSSIBLE_RATING) {
result = WORST_POSSIBLE_RATING;
}
if (debug) {
tprintf("%s: %2.1f%%(CP%2.1f, IM%2.1f + CN%.2f(%d) + MP%2.1f + VP%2.1f)\n",
unicharset.id_to_unichar(unichar_id), result * 100.0, cp_rating * 100.0,
(1.0 - im_rating) * 100.0, (cn_corrected - (1.0 - im_rating)) * 100.0,
cn_factors[unichar_id], miss_penalty * 100.0, vertical_penalty * 100.0);
}
return result;
}
/*---------------------------------------------------------------------------*/
/**
* This routine extracts baseline normalized features
* from the unknown character and matches them against the
* specified set of templates. The classes which match
* are added to Results.
*
* Globals:
* - BaselineCutoffs expected num features for each class
*
* @param Blob blob to be classified
* @param Templates current set of adapted templates
* @param Results place to put match results
* @param int_features
* @param fx_info
*
* @return Array of possible ambiguous chars that should be checked.
*/
UNICHAR_ID *Classify::BaselineClassifier(TBLOB *Blob,
const std::vector<INT_FEATURE_STRUCT> &int_features,
const INT_FX_RESULT_STRUCT &fx_info,
ADAPT_TEMPLATES_STRUCT *Templates, ADAPT_RESULTS *Results) {
if (int_features.empty()) {
return nullptr;
}
auto *CharNormArray = new uint8_t[unicharset.size()];
ClearCharNormArray(CharNormArray);
Results->BlobLength = IntCastRounded(fx_info.Length / kStandardFeatureLength);
PruneClasses(Templates->Templates, int_features.size(), -1, &int_features[0], CharNormArray,
BaselineCutoffs, &Results->CPResults);
if (matcher_debug_level >= 2 || classify_debug_level > 1) {
tprintf("BL Matches = ");
}
MasterMatcher(Templates->Templates, int_features.size(), &int_features[0], CharNormArray,
Templates->Class, matcher_debug_flags, 0, Blob->bounding_box(), Results->CPResults,
Results);
delete[] CharNormArray;
CLASS_ID ClassId = Results->best_unichar_id;
if (ClassId == INVALID_UNICHAR_ID || Results->best_match_index < 0) {
return nullptr;
}
return Templates->Class[ClassId]
->Config[Results->match[Results->best_match_index].config]
.Perm->Ambigs;
} /* BaselineClassifier */
/*---------------------------------------------------------------------------*/
/**
* This routine extracts character normalized features
* from the unknown character and matches them against the
* specified set of templates. The classes which match
* are added to Results.
*
* @param blob blob to be classified
* @param sample templates to classify unknown against
* @param adapt_results place to put match results
*
* Globals:
* - CharNormCutoffs expected num features for each class
* - AllProtosOn mask that enables all protos
* - AllConfigsOn mask that enables all configs
*/
int Classify::CharNormClassifier(TBLOB *blob, const TrainingSample &sample,
ADAPT_RESULTS *adapt_results) {
// This is the length that is used for scaling ratings vs certainty.
adapt_results->BlobLength = IntCastRounded(sample.outline_length() / kStandardFeatureLength);
std::vector<UnicharRating> unichar_results;
static_classifier_->UnicharClassifySample(sample, blob->denorm().pix(), 0, -1, &unichar_results);
// Convert results to the format used internally by AdaptiveClassifier.
for (auto &r : unichar_results) {
AddNewResult(r, adapt_results);
}
return sample.num_features();
} /* CharNormClassifier */
// As CharNormClassifier, but operates on a TrainingSample and outputs to
// a vector of ShapeRating without conversion to classes.
int Classify::CharNormTrainingSample(bool pruner_only, int keep_this, const TrainingSample &sample,
std::vector<UnicharRating> *results) {
results->clear();
std::unique_ptr<ADAPT_RESULTS> adapt_results(new ADAPT_RESULTS());
adapt_results->Initialize();
// Compute the bounding box of the features.
uint32_t num_features = sample.num_features();
// Only the top and bottom of the blob_box are used by MasterMatcher, so
// fabricate right and left using top and bottom.
TBOX blob_box(sample.geo_feature(GeoBottom), sample.geo_feature(GeoBottom),
sample.geo_feature(GeoTop), sample.geo_feature(GeoTop));
// Compute the char_norm_array from the saved cn_feature.
FEATURE norm_feature = sample.GetCNFeature();
std::vector<uint8_t> char_norm_array(unicharset.size());
auto num_pruner_classes = std::max(static_cast<unsigned>(unicharset.size()), PreTrainedTemplates->NumClasses);
std::vector<uint8_t> pruner_norm_array(num_pruner_classes);
adapt_results->BlobLength = static_cast<int>(ActualOutlineLength(norm_feature) * 20 + 0.5f);
ComputeCharNormArrays(norm_feature, PreTrainedTemplates, &char_norm_array[0], &pruner_norm_array[0]);
PruneClasses(PreTrainedTemplates, num_features, keep_this, sample.features(), &pruner_norm_array[0],
shape_table_ != nullptr ? &shapetable_cutoffs_[0] : CharNormCutoffs,
&adapt_results->CPResults);
if (keep_this >= 0) {
adapt_results->CPResults[0].Class = keep_this;
adapt_results->CPResults.resize(1);
}
if (pruner_only) {
// Convert pruner results to output format.
for (auto &it : adapt_results->CPResults) {
int class_id = it.Class;
results->push_back(UnicharRating(class_id, 1.0f - it.Rating));
}
} else {
MasterMatcher(PreTrainedTemplates, num_features, sample.features(), &char_norm_array[0], nullptr,
matcher_debug_flags, classify_integer_matcher_multiplier, blob_box,
adapt_results->CPResults, adapt_results.get());
// Convert master matcher results to output format.
for (auto &i : adapt_results->match) {
results->push_back(i);
}
if (results->size() > 1) {
std::sort(results->begin(), results->end(), SortDescendingRating);
}
}
return num_features;
} /* CharNormTrainingSample */
/*---------------------------------------------------------------------------*/
/**
* This routine computes a rating which reflects the
* likelihood that the blob being classified is a noise
* blob. NOTE: assumes that the blob length has already been
* computed and placed into Results.
*
* @param results results to add noise classification to
*
* Globals:
* - matcher_avg_noise_size avg. length of a noise blob
*/
void Classify::ClassifyAsNoise(ADAPT_RESULTS *results) {
float rating = results->BlobLength / matcher_avg_noise_size;
rating *= rating;
rating /= 1 + rating;
AddNewResult(UnicharRating(UNICHAR_SPACE, 1.0f - rating), results);
} /* ClassifyAsNoise */
/// The function converts the given match ratings to the list of blob
/// choices with ratings and certainties (used by the context checkers).
/// If character fragments are present in the results, this function also makes
/// sure that there is at least one non-fragmented classification included.
/// For each classification result check the unicharset for "definite"
/// ambiguities and modify the resulting Choices accordingly.
void Classify::ConvertMatchesToChoices(const DENORM &denorm, const TBOX &box,
ADAPT_RESULTS *Results, BLOB_CHOICE_LIST *Choices) {
assert(Choices != nullptr);
float Rating;
float Certainty;
BLOB_CHOICE_IT temp_it;
bool contains_nonfrag = false;
temp_it.set_to_list(Choices);
int choices_length = 0;
// With no shape_table_ maintain the previous MAX_MATCHES as the maximum
// number of returned results, but with a shape_table_ we want to have room
// for at least the biggest shape (which might contain hundreds of Indic
// grapheme fragments) and more, so use double the size of the biggest shape
// if that is more than the default.
int max_matches = MAX_MATCHES;
if (shape_table_ != nullptr) {
max_matches = shape_table_->MaxNumUnichars() * 2;
if (max_matches < MAX_MATCHES) {
max_matches = MAX_MATCHES;
}
}
float best_certainty = -FLT_MAX;
for (auto &it : Results->match) {
const UnicharRating &result = it;
bool adapted = result.adapted;
bool current_is_frag = (unicharset.get_fragment(result.unichar_id) != nullptr);
if (temp_it.length() + 1 == max_matches && !contains_nonfrag && current_is_frag) {
continue; // look for a non-fragmented character to fill the
// last spot in Choices if only fragments are present
}
// BlobLength can never be legally 0, this means recognition failed.
// But we must return a classification result because some invoking
// functions (chopper/permuter) do not anticipate a null blob choice.
// So we need to assign a poor, but not infinitely bad score.
if (Results->BlobLength == 0) {
Certainty = -20;
Rating = 100; // should be -certainty * real_blob_length
} else {
Rating = Certainty = (1.0f - result.rating);
Rating *= rating_scale * Results->BlobLength;
Certainty *= -(getDict().certainty_scale);
}
// Adapted results, by their very nature, should have good certainty.
// Those that don't are at best misleading, and often lead to errors,
// so don't accept adapted results that are too far behind the best result,
// whether adapted or static.
// TODO(rays) find some way of automatically tuning these constants.
if (Certainty > best_certainty) {
best_certainty = std::min(Certainty, static_cast<float>(classify_adapted_pruning_threshold));
} else if (adapted && Certainty / classify_adapted_pruning_factor < best_certainty) {
continue; // Don't accept bad adapted results.
}
float min_xheight, max_xheight, yshift;
denorm.XHeightRange(result.unichar_id, unicharset, box, &min_xheight, &max_xheight, &yshift);
auto *choice = new BLOB_CHOICE(
result.unichar_id, Rating, Certainty, unicharset.get_script(result.unichar_id), min_xheight,
max_xheight, yshift, adapted ? BCC_ADAPTED_CLASSIFIER : BCC_STATIC_CLASSIFIER);
choice->set_fonts(result.fonts);
temp_it.add_to_end(choice);
contains_nonfrag |= !current_is_frag; // update contains_nonfrag
choices_length++;
if (choices_length >= max_matches) {
break;
}
}
Results->match.resize(choices_length);
} // ConvertMatchesToChoices
/*---------------------------------------------------------------------------*/
#ifndef GRAPHICS_DISABLED
/**
*
* @param blob blob whose classification is being debugged
* @param Results results of match being debugged
*
* Globals: none
*/
void Classify::DebugAdaptiveClassifier(TBLOB *blob, ADAPT_RESULTS *Results) {
if (static_classifier_ == nullptr) {
return;
}
INT_FX_RESULT_STRUCT fx_info;
std::vector<INT_FEATURE_STRUCT> bl_features;
TrainingSample *sample = BlobToTrainingSample(*blob, false, &fx_info, &bl_features);
if (sample == nullptr) {
return;
}
static_classifier_->DebugDisplay(*sample, blob->denorm().pix(), Results->best_unichar_id);
} /* DebugAdaptiveClassifier */
#endif
/*---------------------------------------------------------------------------*/
/**
* This routine performs an adaptive classification.
* If we have not yet adapted to enough classes, a simple
* classification to the pre-trained templates is performed.
* Otherwise, we match the blob against the adapted templates.
* If the adapted templates do not match well, we try a
* match against the pre-trained templates. If an adapted
* template match is found, we do a match to any pre-trained
* templates which could be ambiguous. The results from all
* of these classifications are merged together into Results.
*
* @param Blob blob to be classified
* @param Results place to put match results
*
* Globals:
* - PreTrainedTemplates built-in training templates
* - AdaptedTemplates templates adapted for this page
* - matcher_reliable_adaptive_result rating limit for a great match
*/
void Classify::DoAdaptiveMatch(TBLOB *Blob, ADAPT_RESULTS *Results) {
UNICHAR_ID *Ambiguities;
INT_FX_RESULT_STRUCT fx_info;
std::vector<INT_FEATURE_STRUCT> bl_features;
TrainingSample *sample =
BlobToTrainingSample(*Blob, classify_nonlinear_norm, &fx_info, &bl_features);
if (sample == nullptr) {
return;
}
// TODO: With LSTM, static_classifier_ is nullptr.
// Return to avoid crash in CharNormClassifier.
if (static_classifier_ == nullptr) {
delete sample;
return;
}
if (AdaptedTemplates->NumPermClasses < matcher_permanent_classes_min || tess_cn_matching) {
CharNormClassifier(Blob, *sample, Results);
} else {
Ambiguities = BaselineClassifier(Blob, bl_features, fx_info, AdaptedTemplates, Results);
if ((!Results->match.empty() &&
MarginalMatch(Results->best_rating, matcher_reliable_adaptive_result) &&
!tess_bn_matching) ||
Results->match.empty()) {
CharNormClassifier(Blob, *sample, Results);
} else if (Ambiguities && *Ambiguities >= 0 && !tess_bn_matching) {
AmbigClassifier(bl_features, fx_info, Blob, PreTrainedTemplates, AdaptedTemplates->Class,
Ambiguities, Results);
}
}
// Force the blob to be classified as noise
// if the results contain only fragments.
// TODO(daria): verify that this is better than
// just adding a nullptr classification.
if (!Results->HasNonfragment || Results->match.empty()) {
ClassifyAsNoise(Results);
}
delete sample;
} /* DoAdaptiveMatch */
/*---------------------------------------------------------------------------*/
/**
* This routine matches blob to the built-in templates
* to find out if there are any classes other than the correct
* class which are potential ambiguities.
*
* @param Blob blob to get classification ambiguities for
* @param CorrectClass correct class for Blob
*
* Globals:
* - CurrentRatings used by qsort compare routine
* - PreTrainedTemplates built-in templates
*
* @return String containing all possible ambiguous classes.
*/
UNICHAR_ID *Classify::GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass) {
auto *Results = new ADAPT_RESULTS();
UNICHAR_ID *Ambiguities;
Results->Initialize();
INT_FX_RESULT_STRUCT fx_info;
std::vector<INT_FEATURE_STRUCT> bl_features;
TrainingSample *sample =
BlobToTrainingSample(*Blob, classify_nonlinear_norm, &fx_info, &bl_features);
if (sample == nullptr) {
delete Results;
return nullptr;
}
CharNormClassifier(Blob, *sample, Results);
delete sample;
RemoveBadMatches(Results);
std::sort(Results->match.begin(), Results->match.end(), SortDescendingRating);
/* copy the class id's into an string of ambiguities - don't copy if
the correct class is the only class id matched */
Ambiguities = new UNICHAR_ID[Results->match.size() + 1];
if (Results->match.size() > 1 ||
(Results->match.size() == 1 && Results->match[0].unichar_id != CorrectClass)) {
unsigned i;
for (i = 0; i < Results->match.size(); i++) {
Ambiguities[i] = Results->match[i].unichar_id;
}
Ambiguities[i] = -1;
} else {
Ambiguities[0] = -1;
}
delete Results;
return Ambiguities;
} /* GetAmbiguities */
// Returns true if the given blob looks too dissimilar to any character
// present in the classifier templates.
bool Classify::LooksLikeGarbage(TBLOB *blob) {
auto *ratings = new BLOB_CHOICE_LIST();
AdaptiveClassifier(blob, ratings);
BLOB_CHOICE_IT ratings_it(ratings);
const UNICHARSET &unicharset = getDict().getUnicharset();
if (classify_debug_character_fragments) {
print_ratings_list("======================\nLooksLikeGarbage() got ", ratings, unicharset);
}
for (ratings_it.mark_cycle_pt(); !ratings_it.cycled_list(); ratings_it.forward()) {
if (unicharset.get_fragment(ratings_it.data()->unichar_id()) != nullptr) {
continue;
}
float certainty = ratings_it.data()->certainty();
delete ratings;
return certainty < classify_character_fragments_garbage_certainty_threshold;
}
delete ratings;
return true; // no whole characters in ratings
}
/*---------------------------------------------------------------------------*/
/**
* This routine calls the integer (Hardware) feature
* extractor if it has not been called before for this blob.
*
* The results from the feature extractor are placed into
* globals so that they can be used in other routines without
* re-extracting the features.
*
* It then copies the char norm features into the IntFeatures
* array provided by the caller.
*
* @param templates used to compute char norm adjustments
* @param pruner_norm_array Array of factors from blob normalization
* process
* @param char_norm_array array to fill with dummy char norm adjustments
* @param fx_info
*
* Globals:
*
* @return Number of features extracted or 0 if an error occurred.
*/
int Classify::GetCharNormFeature(const INT_FX_RESULT_STRUCT &fx_info, INT_TEMPLATES_STRUCT *templates,
uint8_t *pruner_norm_array, uint8_t *char_norm_array) {
auto norm_feature = new FEATURE_STRUCT(&CharNormDesc);
float baseline = kBlnBaselineOffset;
float scale = MF_SCALE_FACTOR;
norm_feature->Params[CharNormY] = (fx_info.Ymean - baseline) * scale;
norm_feature->Params[CharNormLength] = fx_info.Length * scale / LENGTH_COMPRESSION;
norm_feature->Params[CharNormRx] = fx_info.Rx * scale;
norm_feature->Params[CharNormRy] = fx_info.Ry * scale;
// Deletes norm_feature.
ComputeCharNormArrays(norm_feature, templates, char_norm_array, pruner_norm_array);
return IntCastRounded(fx_info.Length / kStandardFeatureLength);
} /* GetCharNormFeature */
// Computes the char_norm_array for the unicharset and, if not nullptr, the
// pruner_array as appropriate according to the existence of the shape_table.
void Classify::ComputeCharNormArrays(FEATURE_STRUCT *norm_feature, INT_TEMPLATES_STRUCT *templates,
uint8_t *char_norm_array, uint8_t *pruner_array) {
ComputeIntCharNormArray(*norm_feature, char_norm_array);
//if (pruner_array != nullptr) {
if (shape_table_ == nullptr) {
ComputeIntCharNormArray(*norm_feature, pruner_array);
} else {
memset(&pruner_array[0], UINT8_MAX, templates->NumClasses * sizeof(pruner_array[0]));
// Each entry in the pruner norm array is the MIN of all the entries of
// the corresponding unichars in the CharNormArray.
for (unsigned id = 0; id < templates->NumClasses; ++id) {
int font_set_id = templates->Class[id]->font_set_id;
const FontSet &fs = fontset_table_.at(font_set_id);
for (auto f : fs) {
const Shape &shape = shape_table_->GetShape(f);
for (int c = 0; c < shape.size(); ++c) {
if (char_norm_array[shape[c].unichar_id] < pruner_array[id]) {
pruner_array[id] = char_norm_array[shape[c].unichar_id];
}
}
}
}
}
//}
delete norm_feature;
}
/*---------------------------------------------------------------------------*/
/**
*
* @param Templates adapted templates to add new config to
* @param ClassId class id to associate with new config
* @param FontinfoId font information inferred from pre-trained templates
* @param NumFeatures number of features in IntFeatures
* @param Features features describing model for new config
* @param FloatFeatures floating-pt representation of features
*
* @return The id of the new config created, a negative integer in
* case of error.
*/
int Classify::MakeNewTemporaryConfig(ADAPT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, int FontinfoId,
int NumFeatures, INT_FEATURE_ARRAY Features,
FEATURE_SET FloatFeatures) {
INT_CLASS_STRUCT *IClass;
ADAPT_CLASS_STRUCT *Class;
PROTO_ID OldProtos[MAX_NUM_PROTOS];
FEATURE_ID BadFeatures[MAX_NUM_INT_FEATURES];
int NumOldProtos;
int NumBadFeatures;
int MaxProtoId, OldMaxProtoId;
int MaskSize;
int ConfigId;
int i;
int debug_level = NO_DEBUG;
if (classify_learning_debug_level >= 3) {
debug_level = PRINT_MATCH_SUMMARY | PRINT_FEATURE_MATCHES | PRINT_PROTO_MATCHES;
}
IClass = ClassForClassId(Templates->Templates, ClassId);
Class = Templates->Class[ClassId];
if (IClass->NumConfigs >= MAX_NUM_CONFIGS) {
++NumAdaptationsFailed;
if (classify_learning_debug_level >= 1) {
tprintf("Cannot make new temporary config: maximum number exceeded.\n");
}
return -1;
}
OldMaxProtoId = IClass->NumProtos - 1;
NumOldProtos = im_.FindGoodProtos(IClass, AllProtosOn, AllConfigsOff, NumFeatures, Features,
OldProtos, classify_adapt_proto_threshold, debug_level);
MaskSize = WordsInVectorOfSize(MAX_NUM_PROTOS);
zero_all_bits(TempProtoMask, MaskSize);
for (i = 0; i < NumOldProtos; i++) {
SET_BIT(TempProtoMask, OldProtos[i]);
}
NumBadFeatures = im_.FindBadFeatures(IClass, TempProtoMask, AllConfigsOn, NumFeatures, Features,
BadFeatures, classify_adapt_feature_threshold, debug_level);
MaxProtoId =
MakeNewTempProtos(FloatFeatures, NumBadFeatures, BadFeatures, IClass, Class, TempProtoMask);
if (MaxProtoId == NO_PROTO) {
++NumAdaptationsFailed;
if (classify_learning_debug_level >= 1) {
tprintf("Cannot make new temp protos: maximum number exceeded.\n");
}
return -1;
}
ConfigId = AddIntConfig(IClass);
ConvertConfig(TempProtoMask, ConfigId, IClass);
auto Config = new TEMP_CONFIG_STRUCT(MaxProtoId, FontinfoId);
TempConfigFor(Class, ConfigId) = Config;
copy_all_bits(TempProtoMask, Config->Protos, Config->ProtoVectorSize);
if (classify_learning_debug_level >= 1) {
tprintf(
"Making new temp config %d fontinfo id %d"
" using %d old and %d new protos.\n",
ConfigId, Config->FontinfoId, NumOldProtos, MaxProtoId - OldMaxProtoId);
}
return ConfigId;
} /* MakeNewTemporaryConfig */
/*---------------------------------------------------------------------------*/
/**
* This routine finds sets of sequential bad features
* that all have the same angle and converts each set into
* a new temporary proto. The temp proto is added to the
* proto pruner for IClass, pushed onto the list of temp
* protos in Class, and added to TempProtoMask.
*
* @param Features floating-pt features describing new character
* @param NumBadFeat number of bad features to turn into protos
* @param BadFeat feature id's of bad features
* @param IClass integer class templates to add new protos to
* @param Class adapted class templates to add new protos to
* @param TempProtoMask proto mask to add new protos to
*
* Globals: none
*
* @return Max proto id in class after all protos have been added.
*/
PROTO_ID Classify::MakeNewTempProtos(FEATURE_SET Features, int NumBadFeat, FEATURE_ID BadFeat[],
INT_CLASS_STRUCT *IClass, ADAPT_CLASS_STRUCT *Class,
BIT_VECTOR TempProtoMask) {
FEATURE_ID *ProtoStart;
FEATURE_ID *ProtoEnd;
FEATURE_ID *LastBad;
PROTO_STRUCT *Proto;
FEATURE F1, F2;
float X1, X2, Y1, Y2;
float A1, A2, AngleDelta;
float SegmentLength;
PROTO_ID Pid;
for (ProtoStart = BadFeat, LastBad = ProtoStart + NumBadFeat; ProtoStart < LastBad;
ProtoStart = ProtoEnd) {
F1 = Features->Features[*ProtoStart];
X1 = F1->Params[PicoFeatX];
Y1 = F1->Params[PicoFeatY];
A1 = F1->Params[PicoFeatDir];
for (ProtoEnd = ProtoStart + 1, SegmentLength = GetPicoFeatureLength(); ProtoEnd < LastBad;
ProtoEnd++, SegmentLength += GetPicoFeatureLength()) {
F2 = Features->Features[*ProtoEnd];
X2 = F2->Params[PicoFeatX];
Y2 = F2->Params[PicoFeatY];
A2 = F2->Params[PicoFeatDir];
AngleDelta = std::fabs(A1 - A2);
if (AngleDelta > 0.5f) {
AngleDelta = 1 - AngleDelta;
}
if (AngleDelta > matcher_clustering_max_angle_delta || std::fabs(X1 - X2) > SegmentLength ||
std::fabs(Y1 - Y2) > SegmentLength) {
break;
}
}
F2 = Features->Features[*(ProtoEnd - 1)];
X2 = F2->Params[PicoFeatX];
Y2 = F2->Params[PicoFeatY];
A2 = F2->Params[PicoFeatDir];
Pid = AddIntProto(IClass);
if (Pid == NO_PROTO) {
return (NO_PROTO);
}
auto TempProto = new TEMP_PROTO_STRUCT;
Proto = &(TempProto->Proto);
/* compute proto params - NOTE that Y_DIM_OFFSET must be used because
ConvertProto assumes that the Y dimension varies from -0.5 to 0.5
instead of the -0.25 to 0.75 used in baseline normalization */
Proto->Length = SegmentLength;
Proto->Angle = A1;
Proto->X = (X1 + X2) / 2;
Proto->Y = (Y1 + Y2) / 2 - Y_DIM_OFFSET;
FillABC(Proto);
TempProto->ProtoId = Pid;
SET_BIT(TempProtoMask, Pid);
ConvertProto(Proto, Pid, IClass);
AddProtoToProtoPruner(Proto, Pid, IClass, classify_learning_debug_level >= 2);
Class->TempProtos = push(Class->TempProtos, TempProto);
}
return IClass->NumProtos - 1;
} /* MakeNewTempProtos */
/*---------------------------------------------------------------------------*/
/**
*
* @param Templates current set of adaptive templates
* @param ClassId class containing config to be made permanent
* @param ConfigId config to be made permanent
* @param Blob current blob being adapted to
*
* Globals: none
*/
void Classify::MakePermanent(ADAPT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, int ConfigId,
TBLOB *Blob) {
UNICHAR_ID *Ambigs;
PROTO_KEY ProtoKey;
auto Class = Templates->Class[ClassId];
auto Config = TempConfigFor(Class, ConfigId);
MakeConfigPermanent(Class, ConfigId);
if (Class->NumPermConfigs == 0) {
Templates->NumPermClasses++;
}
Class->NumPermConfigs++;
// Initialize permanent config.
Ambigs = GetAmbiguities(Blob, ClassId);
auto Perm = new PERM_CONFIG_STRUCT;
Perm->Ambigs = Ambigs;
Perm->FontinfoId = Config->FontinfoId;
// Free memory associated with temporary config (since ADAPTED_CONFIG
// is a union we need to clean up before we record permanent config).
ProtoKey.Templates = Templates;
ProtoKey.ClassId = ClassId;
ProtoKey.ConfigId = ConfigId;
Class->TempProtos = delete_d(Class->TempProtos, &ProtoKey, MakeTempProtoPerm);
delete Config;
// Record permanent config.
PermConfigFor(Class, ConfigId) = Perm;
if (classify_learning_debug_level >= 1) {
tprintf(
"Making config %d for %s (ClassId %d) permanent:"
" fontinfo id %d, ambiguities '",
ConfigId, getDict().getUnicharset().debug_str(ClassId).c_str(), ClassId,
PermConfigFor(Class, ConfigId)->FontinfoId);
for (UNICHAR_ID *AmbigsPointer = Ambigs; *AmbigsPointer >= 0; ++AmbigsPointer) {
tprintf("%s", unicharset.id_to_unichar(*AmbigsPointer));
}
tprintf("'.\n");
}
} /* MakePermanent */
/*---------------------------------------------------------------------------*/
/**
* This routine converts TempProto to be permanent if
* its proto id is used by the configuration specified in
* ProtoKey.
*
* @param item1 (TEMP_PROTO) temporary proto to compare to key
* @param item2 (PROTO_KEY) defines which protos to make permanent
*
* Globals: none
*
* @return true if TempProto is converted, false otherwise
*/
int MakeTempProtoPerm(void *item1, void *item2) {
auto TempProto = static_cast<TEMP_PROTO_STRUCT *>(item1);
auto ProtoKey = static_cast<PROTO_KEY *>(item2);
auto Class = ProtoKey->Templates->Class[ProtoKey->ClassId];
auto Config = TempConfigFor(Class, ProtoKey->ConfigId);
if (TempProto->ProtoId > Config->MaxProtoId || !test_bit(Config->Protos, TempProto->ProtoId)) {
return false;
}
MakeProtoPermanent(Class, TempProto->ProtoId);
AddProtoToClassPruner(&(TempProto->Proto), ProtoKey->ClassId, ProtoKey->Templates->Templates);
delete TempProto;
return true;
} /* MakeTempProtoPerm */
/*---------------------------------------------------------------------------*/
/**
* This routine writes the matches in Results to File.
*
* @param results match results to write to File
*
* Globals: none
*/
void Classify::PrintAdaptiveMatchResults(const ADAPT_RESULTS &results) {
for (auto &it : results.match) {
tprintf("%s ", unicharset.debug_str(it.unichar_id).c_str());
it.Print();
}
} /* PrintAdaptiveMatchResults */
/*---------------------------------------------------------------------------*/
/**
* This routine steps through each matching class in Results
* and removes it from the match list if its rating
* is worse than the BestRating plus a pad. In other words,
* all good matches get moved to the front of the classes
* array.
*
* @param Results contains matches to be filtered
*
* Globals:
* - matcher_bad_match_pad defines a "bad match"
*/
void Classify::RemoveBadMatches(ADAPT_RESULTS *Results) {
unsigned Next, NextGood;
float BadMatchThreshold;
static const char *romans = "i v x I V X";
BadMatchThreshold = Results->best_rating - matcher_bad_match_pad;
if (classify_bln_numeric_mode) {
UNICHAR_ID unichar_id_one =
unicharset.contains_unichar("1") ? unicharset.unichar_to_id("1") : -1;
UNICHAR_ID unichar_id_zero =
unicharset.contains_unichar("0") ? unicharset.unichar_to_id("0") : -1;
float scored_one = ScoredUnichar(unichar_id_one, *Results);
float scored_zero = ScoredUnichar(unichar_id_zero, *Results);
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
const UnicharRating &match = Results->match[Next];
if (match.rating >= BadMatchThreshold) {
if (!unicharset.get_isalpha(match.unichar_id) ||
strstr(romans, unicharset.id_to_unichar(match.unichar_id)) != nullptr) {
} else if (unicharset.eq(match.unichar_id, "l") && scored_one < BadMatchThreshold) {
Results->match[Next].unichar_id = unichar_id_one;
} else if (unicharset.eq(match.unichar_id, "O") && scored_zero < BadMatchThreshold) {
Results->match[Next].unichar_id = unichar_id_zero;
} else {
Results->match[Next].unichar_id = INVALID_UNICHAR_ID; // Don't copy.
}
if (Results->match[Next].unichar_id != INVALID_UNICHAR_ID) {
if (NextGood == Next) {
++NextGood;
} else {
Results->match[NextGood++] = Results->match[Next];
}
}
}
}
} else {
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
if (Results->match[Next].rating >= BadMatchThreshold) {
if (NextGood == Next) {
++NextGood;
} else {
Results->match[NextGood++] = Results->match[Next];
}
}
}
}
Results->match.resize(NextGood);
} /* RemoveBadMatches */
/*----------------------------------------------------------------------------*/
/**
* This routine discards extra digits or punctuation from the results.
* We keep only the top 2 punctuation answers and the top 1 digit answer if
* present.
*
* @param Results contains matches to be filtered
*/
void Classify::RemoveExtraPuncs(ADAPT_RESULTS *Results) {
unsigned Next, NextGood;
int punc_count; /*no of garbage characters */
int digit_count;
/*garbage characters */
static char punc_chars[] = ". , ; : / ` ~ ' - = \\ | \" ! _ ^";
static char digit_chars[] = "0 1 2 3 4 5 6 7 8 9";
punc_count = 0;
digit_count = 0;
for (Next = NextGood = 0; Next < Results->match.size(); Next++) {
const UnicharRating &match = Results->match[Next];
bool keep = true;
if (strstr(punc_chars, unicharset.id_to_unichar(match.unichar_id)) != nullptr) {
if (punc_count >= 2) {
keep = false;
}
punc_count++;
} else {
if (strstr(digit_chars, unicharset.id_to_unichar(match.unichar_id)) != nullptr) {
if (digit_count >= 1) {
keep = false;
}
digit_count++;
}
}
if (keep) {
if (NextGood == Next) {
++NextGood;
} else {
Results->match[NextGood++] = match;
}
}
}
Results->match.resize(NextGood);
} /* RemoveExtraPuncs */
/*---------------------------------------------------------------------------*/
/**
* This routine resets the internal thresholds inside
* the integer matcher to correspond to the specified
* threshold.
*
* @param Threshold threshold for creating new templates
*
* Globals:
* - matcher_good_threshold default good match rating
*/
void Classify::SetAdaptiveThreshold(float Threshold) {
Threshold = (Threshold == matcher_good_threshold) ? 0.9f : (1 - Threshold);
classify_adapt_proto_threshold.set_value(ClipToRange<int>(255 * Threshold, 0, 255));
classify_adapt_feature_threshold.set_value(ClipToRange<int>(255 * Threshold, 0, 255));
} /* SetAdaptiveThreshold */
#ifndef GRAPHICS_DISABLED
/*---------------------------------------------------------------------------*/
/**
* This routine displays debug information for the best config
* of the given shape_id for the given set of features.
*
* @param shape_id classifier id to work with
* @param features features of the unknown character
* @param num_features Number of features in the features array.
*/
void Classify::ShowBestMatchFor(int shape_id, const INT_FEATURE_STRUCT *features,
int num_features) {
uint32_t config_mask;
if (UnusedClassIdIn(PreTrainedTemplates, shape_id)) {
tprintf("No built-in templates for class/shape %d\n", shape_id);
return;
}
if (num_features <= 0) {
tprintf("Illegal blob (char norm features)!\n");
return;
}
UnicharRating cn_result;
classify_norm_method.set_value(character);
im_.Match(ClassForClassId(PreTrainedTemplates, shape_id), AllProtosOn, AllConfigsOn, num_features,
features, &cn_result, classify_adapt_feature_threshold, NO_DEBUG,
matcher_debug_separate_windows);
tprintf("\n");
config_mask = 1 << cn_result.config;
tprintf("Static Shape ID: %d\n", shape_id);
ShowMatchDisplay();
im_.Match(ClassForClassId(PreTrainedTemplates, shape_id), AllProtosOn, &config_mask, num_features,
features, &cn_result, classify_adapt_feature_threshold, matcher_debug_flags,
matcher_debug_separate_windows);
UpdateMatchDisplay();
} /* ShowBestMatchFor */
#endif // !GRAPHICS_DISABLED
// Returns a string for the classifier class_id: either the corresponding
// unicharset debug_str or the shape_table_ debug str.
std::string Classify::ClassIDToDebugStr(const INT_TEMPLATES_STRUCT *templates, int class_id,
int config_id) const {
std::string class_string;
if (templates == PreTrainedTemplates && shape_table_ != nullptr) {
int shape_id = ClassAndConfigIDToFontOrShapeID(class_id, config_id);
class_string = shape_table_->DebugStr(shape_id);
} else {
class_string = unicharset.debug_str(class_id);
}
return class_string;
}
// Converts a classifier class_id index to a shape_table_ index
int Classify::ClassAndConfigIDToFontOrShapeID(int class_id, int int_result_config) const {
int font_set_id = PreTrainedTemplates->Class[class_id]->font_set_id;
// Older inttemps have no font_ids.
if (font_set_id < 0) {
return kBlankFontinfoId;
}
const FontSet &fs = fontset_table_.at(font_set_id);
return fs.at(int_result_config);
}
// Converts a shape_table_ index to a classifier class_id index (not a
// unichar-id!). Uses a search, so not fast.
int Classify::ShapeIDToClassID(int shape_id) const {
for (unsigned id = 0; id < PreTrainedTemplates->NumClasses; ++id) {
int font_set_id = PreTrainedTemplates->Class[id]->font_set_id;
ASSERT_HOST(font_set_id >= 0);
const FontSet &fs = fontset_table_.at(font_set_id);
for (auto f : fs) {
if (f == shape_id) {
return id;
}
}
}
tprintf("Shape %d not found\n", shape_id);
return -1;
}
// Returns true if the given TEMP_CONFIG_STRUCT is good enough to make it
// a permanent config.
bool Classify::TempConfigReliable(CLASS_ID class_id, const TEMP_CONFIG_STRUCT *config) {
if (classify_learning_debug_level >= 1) {
tprintf("NumTimesSeen for config of %s is %d\n",
getDict().getUnicharset().debug_str(class_id).c_str(), config->NumTimesSeen);
}
if (config->NumTimesSeen >= matcher_sufficient_examples_for_prototyping) {
return true;
} else if (config->NumTimesSeen < matcher_min_examples_for_prototyping) {
return false;
} else if (use_ambigs_for_adaption) {
// Go through the ambigs vector and see whether we have already seen
// enough times all the characters represented by the ambigs vector.
const UnicharIdVector *ambigs = getDict().getUnicharAmbigs().AmbigsForAdaption(class_id);
int ambigs_size = (ambigs == nullptr) ? 0 : ambigs->size();
for (int ambig = 0; ambig < ambigs_size; ++ambig) {
ADAPT_CLASS_STRUCT *ambig_class = AdaptedTemplates->Class[(*ambigs)[ambig]];
assert(ambig_class != nullptr);
if (ambig_class->NumPermConfigs == 0 &&
ambig_class->MaxNumTimesSeen < matcher_min_examples_for_prototyping) {
if (classify_learning_debug_level >= 1) {
tprintf(
"Ambig %s has not been seen enough times,"
" not making config for %s permanent\n",
getDict().getUnicharset().debug_str((*ambigs)[ambig]).c_str(),
getDict().getUnicharset().debug_str(class_id).c_str());
}
return false;
}
}
}
return true;
}
void Classify::UpdateAmbigsGroup(CLASS_ID class_id, TBLOB *Blob) {
const UnicharIdVector *ambigs = getDict().getUnicharAmbigs().ReverseAmbigsForAdaption(class_id);
int ambigs_size = (ambigs == nullptr) ? 0 : ambigs->size();
if (classify_learning_debug_level >= 1) {
tprintf("Running UpdateAmbigsGroup for %s class_id=%d\n",
getDict().getUnicharset().debug_str(class_id).c_str(), class_id);
}
for (int ambig = 0; ambig < ambigs_size; ++ambig) {
CLASS_ID ambig_class_id = (*ambigs)[ambig];
const ADAPT_CLASS_STRUCT *ambigs_class = AdaptedTemplates->Class[ambig_class_id];
for (int cfg = 0; cfg < MAX_NUM_CONFIGS; ++cfg) {
if (ConfigIsPermanent(ambigs_class, cfg)) {
continue;
}
const TEMP_CONFIG_STRUCT *config = TempConfigFor(AdaptedTemplates->Class[ambig_class_id], cfg);
if (config != nullptr && TempConfigReliable(ambig_class_id, config)) {
if (classify_learning_debug_level >= 1) {
tprintf("Making config %d of %s permanent\n", cfg,
getDict().getUnicharset().debug_str(ambig_class_id).c_str());
}
MakePermanent(AdaptedTemplates, ambig_class_id, cfg, Blob);
}
}
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/adaptmatch.cpp
|
C++
|
apache-2.0
| 85,688
|
/******************************************************************************
** Filename: blobclass.c
** Purpose: High level blob classification and training routines.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include <cstdio>
#include "classify.h"
#include "featdefs.h"
#include "mf.h"
#include "normfeat.h"
namespace tesseract {
/*---------------------------------------------------------------------------*/
// Extracts features from the given blob and saves them in the tr_file_data_
// member variable.
// fontname: Name of font that this blob was printed in.
// cn_denorm: Character normalization transformation to apply to the blob.
// fx_info: Character normalization parameters computed with cn_denorm.
// blob_text: Ground truth text for the blob.
void Classify::LearnBlob(const std::string &fontname, TBLOB *blob, const DENORM &cn_denorm,
const INT_FX_RESULT_STRUCT &fx_info, const char *blob_text) {
std::unique_ptr<CHAR_DESC_STRUCT> CharDesc(new CHAR_DESC_STRUCT(feature_defs_));
CharDesc->FeatureSets[0] = ExtractMicros(blob, cn_denorm);
CharDesc->FeatureSets[1] = ExtractCharNormFeatures(fx_info);
CharDesc->FeatureSets[2] = ExtractIntCNFeatures(*blob, fx_info);
CharDesc->FeatureSets[3] = ExtractIntGeoFeatures(*blob, fx_info);
if (ValidCharDescription(feature_defs_, CharDesc.get())) {
// Label the features with a class name and font name.
tr_file_data_ += "\n";
tr_file_data_ += fontname;
tr_file_data_ += " ";
tr_file_data_ += blob_text;
tr_file_data_ += "\n";
// write micro-features to file and clean up
WriteCharDescription(feature_defs_, CharDesc.get(), tr_file_data_);
} else {
tprintf("Blob learned was invalid!\n");
}
} // LearnBlob
// Writes stored training data to a .tr file based on the given filename.
// Returns false on error.
bool Classify::WriteTRFile(const char *filename) {
bool result = false;
std::string tr_filename = filename;
tr_filename += ".tr";
FILE *fp = fopen(tr_filename.c_str(), "wb");
if (fp) {
result = tesseract::Serialize(fp, &tr_file_data_[0], tr_file_data_.length());
fclose(fp);
}
tr_file_data_.resize(0);
return result;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/blobclass.cpp
|
C++
|
apache-2.0
| 2,935
|
///////////////////////////////////////////////////////////////////////
// File: classify.cpp
// Description: classify class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#include "classify.h"
#ifdef DISABLED_LEGACY_ENGINE
# include <string.h>
namespace tesseract {
Classify::Classify()
: INT_MEMBER(classify_debug_level, 0, "Classify debug level", this->params())
,
BOOL_MEMBER(classify_bln_numeric_mode, 0, "Assume the input is numbers [0-9].", this->params())
,
double_MEMBER(classify_max_rating_ratio, 1.5, "Veto ratio between classifier ratings",
this->params())
,
double_MEMBER(classify_max_certainty_margin, 5.5,
"Veto difference between classifier certainties", this->params())
,
dict_(this) {}
Classify::~Classify() {}
} // namespace tesseract
#else // DISABLED_LEGACY_ENGINE not defined
# include <cstring>
# include "fontinfo.h"
# include "intproto.h"
# include "mfoutline.h"
# include "scrollview.h"
# include "shapeclassifier.h"
# include "shapetable.h"
# include "unicity_table.h"
namespace tesseract {
Classify::Classify()
: BOOL_MEMBER(allow_blob_division, true, "Use divisible blobs chopping", this->params())
, BOOL_MEMBER(prioritize_division, false, "Prioritize blob division over chopping",
this->params())
, BOOL_MEMBER(classify_enable_learning, true, "Enable adaptive classifier", this->params())
, INT_MEMBER(classify_debug_level, 0, "Classify debug level", this->params())
, INT_MEMBER(classify_norm_method, character, "Normalization Method ...", this->params())
, double_MEMBER(classify_char_norm_range, 0.2, "Character Normalization Range ...",
this->params())
, double_MEMBER(classify_max_rating_ratio, 1.5, "Veto ratio between classifier ratings",
this->params())
, double_MEMBER(classify_max_certainty_margin, 5.5,
"Veto difference between classifier certainties", this->params())
, BOOL_MEMBER(tess_cn_matching, 0, "Character Normalized Matching", this->params())
, BOOL_MEMBER(tess_bn_matching, 0, "Baseline Normalized Matching", this->params())
, BOOL_MEMBER(classify_enable_adaptive_matcher, 1, "Enable adaptive classifier", this->params())
, BOOL_MEMBER(classify_use_pre_adapted_templates, 0, "Use pre-adapted classifier templates",
this->params())
, BOOL_MEMBER(classify_save_adapted_templates, 0, "Save adapted templates to a file",
this->params())
, BOOL_MEMBER(classify_enable_adaptive_debugger, 0, "Enable match debugger", this->params())
, BOOL_MEMBER(classify_nonlinear_norm, 0, "Non-linear stroke-density normalization",
this->params())
, INT_MEMBER(matcher_debug_level, 0, "Matcher Debug Level", this->params())
, INT_MEMBER(matcher_debug_flags, 0, "Matcher Debug Flags", this->params())
, INT_MEMBER(classify_learning_debug_level, 0, "Learning Debug Level: ", this->params())
, double_MEMBER(matcher_good_threshold, 0.125, "Good Match (0-1)", this->params())
, double_MEMBER(matcher_reliable_adaptive_result, 0.0, "Great Match (0-1)", this->params())
, double_MEMBER(matcher_perfect_threshold, 0.02, "Perfect Match (0-1)", this->params())
, double_MEMBER(matcher_bad_match_pad, 0.15, "Bad Match Pad (0-1)", this->params())
, double_MEMBER(matcher_rating_margin, 0.1, "New template margin (0-1)", this->params())
, double_MEMBER(matcher_avg_noise_size, 12.0, "Avg. noise blob length", this->params())
, INT_MEMBER(matcher_permanent_classes_min, 1, "Min # of permanent classes", this->params())
, INT_MEMBER(matcher_min_examples_for_prototyping, 3, "Reliable Config Threshold",
this->params())
, INT_MEMBER(matcher_sufficient_examples_for_prototyping, 5,
"Enable adaption even if the ambiguities have not been seen", this->params())
, double_MEMBER(matcher_clustering_max_angle_delta, 0.015,
"Maximum angle delta for prototype clustering", this->params())
, double_MEMBER(classify_misfit_junk_penalty, 0.0,
"Penalty to apply when a non-alnum is vertically out of "
"its expected textline position",
this->params())
, double_MEMBER(rating_scale, 1.5, "Rating scaling factor", this->params())
, double_MEMBER(tessedit_class_miss_scale, 0.00390625, "Scale factor for features not used",
this->params())
, double_MEMBER(classify_adapted_pruning_factor, 2.5,
"Prune poor adapted results this much worse than best result", this->params())
, double_MEMBER(classify_adapted_pruning_threshold, -1.0,
"Threshold at which classify_adapted_pruning_factor starts", this->params())
, INT_MEMBER(classify_adapt_proto_threshold, 230,
"Threshold for good protos during adaptive 0-255", this->params())
, INT_MEMBER(classify_adapt_feature_threshold, 230,
"Threshold for good features during adaptive 0-255", this->params())
, BOOL_MEMBER(disable_character_fragments, true,
"Do not include character fragments in the"
" results of the classifier",
this->params())
, double_MEMBER(classify_character_fragments_garbage_certainty_threshold, -3.0,
"Exclude fragments that do not look like whole"
" characters from training and adaption",
this->params())
, BOOL_MEMBER(classify_debug_character_fragments, false,
"Bring up graphical debugging windows for fragments training", this->params())
, BOOL_MEMBER(matcher_debug_separate_windows, false,
"Use two different windows for debugging the matching: "
"One for the protos and one for the features.",
this->params())
, STRING_MEMBER(classify_learn_debug_str, "", "Class str to debug learning", this->params())
, INT_MEMBER(classify_class_pruner_threshold, 229, "Class Pruner Threshold 0-255",
this->params())
, INT_MEMBER(classify_class_pruner_multiplier, 15,
"Class Pruner Multiplier 0-255: ", this->params())
, INT_MEMBER(classify_cp_cutoff_strength, 7,
"Class Pruner CutoffStrength: ", this->params())
, INT_MEMBER(classify_integer_matcher_multiplier, 10,
"Integer Matcher Multiplier 0-255: ", this->params())
, BOOL_MEMBER(classify_bln_numeric_mode, 0, "Assume the input is numbers [0-9].",
this->params())
, double_MEMBER(speckle_large_max_size, 0.30, "Max large speckle size", this->params())
, double_MEMBER(speckle_rating_penalty, 10.0, "Penalty to add to worst rating for noise",
this->params())
, im_(&classify_debug_level)
, dict_(this) {
using namespace std::placeholders; // for _1, _2
fontinfo_table_.set_clear_callback(std::bind(FontInfoDeleteCallback, _1));
InitFeatureDefs(&feature_defs_);
}
Classify::~Classify() {
EndAdaptiveClassifier();
#ifndef GRAPHICS_DISABLED
delete learn_debug_win_;
delete learn_fragmented_word_debug_win_;
delete learn_fragments_debug_win_;
#endif
}
// Takes ownership of the given classifier, and uses it for future calls
// to CharNormClassifier.
void Classify::SetStaticClassifier(ShapeClassifier *static_classifier) {
delete static_classifier_;
static_classifier_ = static_classifier;
}
// Moved from speckle.cpp
// Adds a noise classification result that is a bit worse than the worst
// current result, or the worst possible result if no current results.
void Classify::AddLargeSpeckleTo(int blob_length, BLOB_CHOICE_LIST *choices) {
BLOB_CHOICE_IT bc_it(choices);
// If there is no classifier result, we will use the worst possible certainty
// and corresponding rating.
float certainty = -getDict().certainty_scale;
float rating = rating_scale * blob_length;
if (!choices->empty() && blob_length > 0) {
bc_it.move_to_last();
BLOB_CHOICE *worst_choice = bc_it.data();
// Add speckle_rating_penalty to worst rating, matching old value.
rating = worst_choice->rating() + speckle_rating_penalty;
// Compute the rating to correspond to the certainty. (Used to be kept
// the same, but that messes up the language model search.)
certainty = -rating * getDict().certainty_scale / (rating_scale * blob_length);
}
auto *blob_choice = new BLOB_CHOICE(UNICHAR_SPACE, rating, certainty, -1, 0.0f, FLT_MAX, 0,
BCC_SPECKLE_CLASSIFIER);
bc_it.add_to_end(blob_choice);
}
// Returns true if the blob is small enough to be a large speckle.
bool Classify::LargeSpeckle(const TBLOB &blob) {
double speckle_size = kBlnXHeight * speckle_large_max_size;
TBOX bbox = blob.bounding_box();
return bbox.width() < speckle_size && bbox.height() < speckle_size;
}
} // namespace tesseract
#endif // def DISABLED_LEGACY_ENGINE
|
2301_81045437/tesseract
|
src/classify/classify.cpp
|
C++
|
apache-2.0
| 9,722
|
///////////////////////////////////////////////////////////////////////
// File: classify.h
// Description: classify class.
// Author: Samuel Charron
//
// (C) Copyright 2006, Google Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////
#ifndef TESSERACT_CLASSIFY_CLASSIFY_H_
#define TESSERACT_CLASSIFY_CLASSIFY_H_
// Include automatically generated configuration file if running autoconf.
#ifdef HAVE_CONFIG_H
# include "config_auto.h"
#endif
#ifdef DISABLED_LEGACY_ENGINE
# include "ccstruct.h"
# include "dict.h"
namespace tesseract {
class Classify : public CCStruct {
public:
Classify();
virtual ~Classify();
virtual Dict &getDict() {
return dict_;
}
// Member variables.
INT_VAR_H(classify_debug_level);
BOOL_VAR_H(classify_bln_numeric_mode);
double_VAR_H(classify_max_rating_ratio);
double_VAR_H(classify_max_certainty_margin);
private:
Dict dict_;
};
} // namespace tesseract
#else // DISABLED_LEGACY_ENGINE not defined
# include "adaptive.h"
# include "ccstruct.h"
# include "dict.h"
# include "featdefs.h"
# include "fontinfo.h"
# include "intfx.h"
# include "intmatcher.h"
# include "normalis.h"
# include "ocrfeatures.h"
# include "ratngs.h"
# include "unicity_table.h"
namespace tesseract {
class ScrollView;
class WERD_CHOICE;
class WERD_RES;
struct ADAPT_RESULTS;
struct NORM_PROTOS;
static const int kUnknownFontinfoId = -1;
static const int kBlankFontinfoId = -2;
class ShapeClassifier;
struct ShapeRating;
class ShapeTable;
struct UnicharRating;
// How segmented is a blob. In this enum, character refers to a classifiable
// unit, but that is too long and character is usually easier to understand.
enum CharSegmentationType {
CST_FRAGMENT, // A partial character.
CST_WHOLE, // A correctly segmented character.
CST_IMPROPER, // More than one but less than 2 characters.
CST_NGRAM // Multiple characters.
};
class TESS_API Classify : public CCStruct {
public:
Classify();
~Classify() override;
virtual Dict &getDict() {
return dict_;
}
const ShapeTable *shape_table() const {
return shape_table_;
}
// Takes ownership of the given classifier, and uses it for future calls
// to CharNormClassifier.
void SetStaticClassifier(ShapeClassifier *static_classifier);
// Adds a noise classification result that is a bit worse than the worst
// current result, or the worst possible result if no current results.
void AddLargeSpeckleTo(int blob_length, BLOB_CHOICE_LIST *choices);
// Returns true if the blob is small enough to be a large speckle.
bool LargeSpeckle(const TBLOB &blob);
/* adaptive.cpp ************************************************************/
int GetFontinfoId(ADAPT_CLASS_STRUCT *Class, uint8_t ConfigId);
// Runs the class pruner from int_templates on the given features, returning
// the number of classes output in results.
// int_templates Class pruner tables
// num_features Number of features in blob
// features Array of features
// normalization_factors (input) Array of int_templates->NumClasses fudge
// factors from blob normalization process.
// (Indexed by CLASS_INDEX)
// expected_num_features (input) Array of int_templates->NumClasses
// expected number of features for each class.
// (Indexed by CLASS_INDEX)
// results (output) Sorted Array of pruned classes.
// Array must be sized to take the maximum possible
// number of outputs : int_templates->NumClasses.
int PruneClasses(const INT_TEMPLATES_STRUCT *int_templates, int num_features, int keep_this,
const INT_FEATURE_STRUCT *features, const uint8_t *normalization_factors,
const uint16_t *expected_num_features, std::vector<CP_RESULT_STRUCT> *results);
void ReadNewCutoffs(TFile *fp, uint16_t *Cutoffs);
void PrintAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templates);
void WriteAdaptedTemplates(FILE *File, ADAPT_TEMPLATES_STRUCT *Templates);
ADAPT_TEMPLATES_STRUCT *ReadAdaptedTemplates(TFile *File);
/* normmatch.cpp ************************************************************/
float ComputeNormMatch(CLASS_ID ClassId, const FEATURE_STRUCT &feature, bool DebugMatch);
void FreeNormProtos();
NORM_PROTOS *ReadNormProtos(TFile *fp);
/* protos.cpp ***************************************************************/
void ConvertProto(PROTO_STRUCT *Proto, int ProtoId, INT_CLASS_STRUCT *Class);
INT_TEMPLATES_STRUCT *CreateIntTemplates(CLASSES FloatProtos, const UNICHARSET &target_unicharset);
/* adaptmatch.cpp ***********************************************************/
// Learns the given word using its chopped_word, seam_array, denorm,
// box_word, best_state, and correct_text to learn both correctly and
// incorrectly segmented blobs. If fontname is not nullptr, then LearnBlob
// is called and the data will be saved in an internal buffer.
// Otherwise AdaptToBlob is called for adaption within a document.
void LearnWord(const char *fontname, WERD_RES *word);
// Builds a blob of length fragments, from the word, starting at start,
// and then learns it, as having the given correct_text.
// If fontname is not nullptr, then LearnBlob is called and the data will be
// saved in an internal buffer for static training.
// Otherwise AdaptToBlob is called for adaption within a document.
// threshold is a magic number required by AdaptToChar and generated by
// ComputeAdaptionThresholds.
// Although it can be partly inferred from the string, segmentation is
// provided to explicitly clarify the character segmentation.
void LearnPieces(const char *fontname, int start, int length, float threshold,
CharSegmentationType segmentation, const char *correct_text, WERD_RES *word);
void InitAdaptiveClassifier(TessdataManager *mgr);
void InitAdaptedClass(TBLOB *Blob, CLASS_ID ClassId, int FontinfoId, ADAPT_CLASS_STRUCT *Class,
ADAPT_TEMPLATES_STRUCT *Templates);
void AmbigClassifier(const std::vector<INT_FEATURE_STRUCT> &int_features,
const INT_FX_RESULT_STRUCT &fx_info, const TBLOB *blob,
INT_TEMPLATES_STRUCT *templates, ADAPT_CLASS_STRUCT **classes, UNICHAR_ID *ambiguities,
ADAPT_RESULTS *results);
void MasterMatcher(INT_TEMPLATES_STRUCT *templates, int16_t num_features,
const INT_FEATURE_STRUCT *features, const uint8_t *norm_factors,
ADAPT_CLASS_STRUCT **classes, int debug, int matcher_multiplier, const TBOX &blob_box,
const std::vector<CP_RESULT_STRUCT> &results, ADAPT_RESULTS *final_results);
// Converts configs to fonts, and if the result is not adapted, and a
// shape_table_ is present, the shape is expanded to include all
// unichar_ids represented, before applying a set of corrections to the
// distance rating in int_result, (see ComputeCorrectedRating.)
// The results are added to the final_results output.
void ExpandShapesAndApplyCorrections(ADAPT_CLASS_STRUCT **classes, bool debug, int class_id, int bottom,
int top, float cp_rating, int blob_length,
int matcher_multiplier, const uint8_t *cn_factors,
UnicharRating *int_result, ADAPT_RESULTS *final_results);
// Applies a set of corrections to the distance im_rating,
// including the cn_correction, miss penalty and additional penalty
// for non-alnums being vertical misfits. Returns the corrected distance.
double ComputeCorrectedRating(bool debug, int unichar_id, double cp_rating, double im_rating,
int feature_misses, int bottom, int top, int blob_length,
int matcher_multiplier, const uint8_t *cn_factors);
void ConvertMatchesToChoices(const DENORM &denorm, const TBOX &box, ADAPT_RESULTS *Results,
BLOB_CHOICE_LIST *Choices);
void AddNewResult(const UnicharRating &new_result, ADAPT_RESULTS *results);
int GetAdaptiveFeatures(TBLOB *Blob, INT_FEATURE_ARRAY IntFeatures, FEATURE_SET *FloatFeatures);
# ifndef GRAPHICS_DISABLED
void DebugAdaptiveClassifier(TBLOB *Blob, ADAPT_RESULTS *Results);
# endif
PROTO_ID MakeNewTempProtos(FEATURE_SET Features, int NumBadFeat, FEATURE_ID BadFeat[],
INT_CLASS_STRUCT *IClass, ADAPT_CLASS_STRUCT *Class, BIT_VECTOR TempProtoMask);
int MakeNewTemporaryConfig(ADAPT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, int FontinfoId,
int NumFeatures, INT_FEATURE_ARRAY Features,
FEATURE_SET FloatFeatures);
void MakePermanent(ADAPT_TEMPLATES_STRUCT *Templates, CLASS_ID ClassId, int ConfigId, TBLOB *Blob);
void PrintAdaptiveMatchResults(const ADAPT_RESULTS &results);
void RemoveExtraPuncs(ADAPT_RESULTS *Results);
void RemoveBadMatches(ADAPT_RESULTS *Results);
void SetAdaptiveThreshold(float Threshold);
void ShowBestMatchFor(int shape_id, const INT_FEATURE_STRUCT *features, int num_features);
// Returns a string for the classifier class_id: either the corresponding
// unicharset debug_str or the shape_table_ debug str.
std::string ClassIDToDebugStr(const INT_TEMPLATES_STRUCT *templates, int class_id,
int config_id) const;
// Converts a classifier class_id index with a config ID to:
// shape_table_ present: a shape_table_ index OR
// No shape_table_: a font ID.
// Without shape training, each class_id, config pair represents a single
// unichar id/font combination, so this function looks up the corresponding
// font id.
// With shape training, each class_id, config pair represents a single
// shape table index, so the fontset_table stores the shape table index,
// and the shape_table_ must be consulted to obtain the actual unichar_id/
// font combinations that the shape represents.
int ClassAndConfigIDToFontOrShapeID(int class_id, int int_result_config) const;
// Converts a shape_table_ index to a classifier class_id index (not a
// unichar-id!). Uses a search, so not fast.
int ShapeIDToClassID(int shape_id) const;
UNICHAR_ID *BaselineClassifier(TBLOB *Blob, const std::vector<INT_FEATURE_STRUCT> &int_features,
const INT_FX_RESULT_STRUCT &fx_info, ADAPT_TEMPLATES_STRUCT *Templates,
ADAPT_RESULTS *Results);
int CharNormClassifier(TBLOB *blob, const TrainingSample &sample, ADAPT_RESULTS *adapt_results);
// As CharNormClassifier, but operates on a TrainingSample and outputs to
// a vector of ShapeRating without conversion to classes.
int CharNormTrainingSample(bool pruner_only, int keep_this, const TrainingSample &sample,
std::vector<UnicharRating> *results);
UNICHAR_ID *GetAmbiguities(TBLOB *Blob, CLASS_ID CorrectClass);
void DoAdaptiveMatch(TBLOB *Blob, ADAPT_RESULTS *Results);
void AdaptToChar(TBLOB *Blob, CLASS_ID ClassId, int FontinfoId, float Threshold,
ADAPT_TEMPLATES_STRUCT *adaptive_templates);
void DisplayAdaptedChar(TBLOB *blob, INT_CLASS_STRUCT *int_class);
bool AdaptableWord(WERD_RES *word);
void EndAdaptiveClassifier();
void SettupPass1();
void SettupPass2();
void AdaptiveClassifier(TBLOB *Blob, BLOB_CHOICE_LIST *Choices);
void ClassifyAsNoise(ADAPT_RESULTS *Results);
void ResetAdaptiveClassifierInternal();
void SwitchAdaptiveClassifier();
void StartBackupAdaptiveClassifier();
int GetCharNormFeature(const INT_FX_RESULT_STRUCT &fx_info, INT_TEMPLATES_STRUCT *templates,
uint8_t *pruner_norm_array, uint8_t *char_norm_array);
// Computes the char_norm_array for the unicharset and, if not nullptr, the
// pruner_array as appropriate according to the existence of the shape_table.
// The norm_feature is deleted as it is almost certainly no longer needed.
void ComputeCharNormArrays(FEATURE_STRUCT *norm_feature, INT_TEMPLATES_STRUCT *templates,
uint8_t *char_norm_array, uint8_t *pruner_array);
bool TempConfigReliable(CLASS_ID class_id, const TEMP_CONFIG_STRUCT *config);
void UpdateAmbigsGroup(CLASS_ID class_id, TBLOB *Blob);
bool AdaptiveClassifierIsFull() const {
return NumAdaptationsFailed > 0;
}
bool AdaptiveClassifierIsEmpty() const {
return AdaptedTemplates->NumPermClasses == 0;
}
bool LooksLikeGarbage(TBLOB *blob);
#ifndef GRAPHICS_DISABLED
void RefreshDebugWindow(ScrollView **win, const char *msg, int y_offset, const TBOX &wbox);
#endif
// intfx.cpp
// Computes the DENORMS for bl(baseline) and cn(character) normalization
// during feature extraction. The input denorm describes the current state
// of the blob, which is usually a baseline-normalized word.
// The Transforms setup are as follows:
// Baseline Normalized (bl) Output:
// We center the grapheme by aligning the x-coordinate of its centroid with
// x=128 and leaving the already-baseline-normalized y as-is.
//
// Character Normalized (cn) Output:
// We align the grapheme's centroid at the origin and scale it
// asymmetrically in x and y so that the 2nd moments are a standard value
// (51.2) ie the result is vaguely square.
// If classify_nonlinear_norm is true:
// A non-linear normalization is setup that attempts to evenly distribute
// edges across x and y.
//
// Some of the fields of fx_info are also setup:
// Length: Total length of outline.
// Rx: Rounded y second moment. (Reversed by convention.)
// Ry: rounded x second moment.
// Xmean: Rounded x center of mass of the blob.
// Ymean: Rounded y center of mass of the blob.
static void SetupBLCNDenorms(const TBLOB &blob, bool nonlinear_norm, DENORM *bl_denorm,
DENORM *cn_denorm, INT_FX_RESULT_STRUCT *fx_info);
// Extracts sets of 3-D features of length kStandardFeatureLength (=12.8), as
// (x,y) position and angle as measured counterclockwise from the vector
// <-1, 0>, from blob using two normalizations defined by bl_denorm and
// cn_denorm. See SetpuBLCNDenorms for definitions.
// If outline_cn_counts is not nullptr, on return it contains the cumulative
// number of cn features generated for each outline in the blob (in order).
// Thus after the first outline, there were (*outline_cn_counts)[0] features,
// after the second outline, there were (*outline_cn_counts)[1] features etc.
static void ExtractFeatures(const TBLOB &blob, bool nonlinear_norm,
std::vector<INT_FEATURE_STRUCT> *bl_features,
std::vector<INT_FEATURE_STRUCT> *cn_features,
INT_FX_RESULT_STRUCT *results, std::vector<int> *outline_cn_counts);
/* float2int.cpp ************************************************************/
void ClearCharNormArray(uint8_t *char_norm_array);
void ComputeIntCharNormArray(const FEATURE_STRUCT &norm_feature, uint8_t *char_norm_array);
void ComputeIntFeatures(FEATURE_SET Features, INT_FEATURE_ARRAY IntFeatures);
/* intproto.cpp *************************************************************/
INT_TEMPLATES_STRUCT *ReadIntTemplates(TFile *fp);
void WriteIntTemplates(FILE *File, INT_TEMPLATES_STRUCT *Templates, const UNICHARSET &target_unicharset);
CLASS_ID GetClassToDebug(const char *Prompt, bool *adaptive_on, bool *pretrained_on,
int *shape_id);
void ShowMatchDisplay();
/* font detection ***********************************************************/
UnicityTable<FontInfo> &get_fontinfo_table() {
return fontinfo_table_;
}
const UnicityTable<FontInfo> &get_fontinfo_table() const {
return fontinfo_table_;
}
UnicityTable<FontSet> &get_fontset_table() {
return fontset_table_;
}
/* mfoutline.cpp ***********************************************************/
void NormalizeOutlines(LIST Outlines, float *XScale, float *YScale);
/* outfeat.cpp ***********************************************************/
FEATURE_SET ExtractOutlineFeatures(TBLOB *Blob);
/* picofeat.cpp ***********************************************************/
FEATURE_SET ExtractPicoFeatures(TBLOB *Blob);
FEATURE_SET ExtractIntCNFeatures(const TBLOB &blob, const INT_FX_RESULT_STRUCT &fx_info);
FEATURE_SET ExtractIntGeoFeatures(const TBLOB &blob, const INT_FX_RESULT_STRUCT &fx_info);
/* blobclass.cpp ***********************************************************/
// Extracts features from the given blob and saves them in the tr_file_data_
// member variable.
// fontname: Name of font that this blob was printed in.
// cn_denorm: Character normalization transformation to apply to the blob.
// fx_info: Character normalization parameters computed with cn_denorm.
// blob_text: Ground truth text for the blob.
void LearnBlob(const std::string &fontname, TBLOB *Blob, const DENORM &cn_denorm,
const INT_FX_RESULT_STRUCT &fx_info, const char *blob_text);
// Writes stored training data to a .tr file based on the given filename.
// Returns false on error.
bool WriteTRFile(const char *filename);
// Member variables.
// Parameters.
// Set during training (in lang.config) to indicate whether the divisible
// blobs chopper should be used (true for latin script.)
BOOL_VAR_H(allow_blob_division);
// Set during training (in lang.config) to indicate whether the divisible
// blobs chopper should be used in preference to chopping. Set to true for
// southern Indic scripts.
BOOL_VAR_H(prioritize_division);
BOOL_VAR_H(classify_enable_learning);
INT_VAR_H(classify_debug_level);
/* mfoutline.cpp ***********************************************************/
/* control knobs used to control normalization of outlines */
INT_VAR_H(classify_norm_method);
double_VAR_H(classify_char_norm_range);
double_VAR_H(classify_max_rating_ratio);
double_VAR_H(classify_max_certainty_margin);
/* adaptmatch.cpp ***********************************************************/
BOOL_VAR_H(tess_cn_matching);
BOOL_VAR_H(tess_bn_matching);
BOOL_VAR_H(classify_enable_adaptive_matcher);
BOOL_VAR_H(classify_use_pre_adapted_templates);
BOOL_VAR_H(classify_save_adapted_templates);
BOOL_VAR_H(classify_enable_adaptive_debugger);
BOOL_VAR_H(classify_nonlinear_norm);
INT_VAR_H(matcher_debug_level);
INT_VAR_H(matcher_debug_flags);
INT_VAR_H(classify_learning_debug_level);
double_VAR_H(matcher_good_threshold);
double_VAR_H(matcher_reliable_adaptive_result);
double_VAR_H(matcher_perfect_threshold);
double_VAR_H(matcher_bad_match_pad);
double_VAR_H(matcher_rating_margin);
double_VAR_H(matcher_avg_noise_size);
INT_VAR_H(matcher_permanent_classes_min);
INT_VAR_H(matcher_min_examples_for_prototyping);
INT_VAR_H(matcher_sufficient_examples_for_prototyping);
double_VAR_H(matcher_clustering_max_angle_delta);
double_VAR_H(classify_misfit_junk_penalty);
double_VAR_H(rating_scale);
double_VAR_H(tessedit_class_miss_scale);
double_VAR_H(classify_adapted_pruning_factor);
double_VAR_H(classify_adapted_pruning_threshold);
INT_VAR_H(classify_adapt_proto_threshold);
INT_VAR_H(classify_adapt_feature_threshold);
BOOL_VAR_H(disable_character_fragments);
double_VAR_H(classify_character_fragments_garbage_certainty_threshold);
BOOL_VAR_H(classify_debug_character_fragments);
BOOL_VAR_H(matcher_debug_separate_windows);
STRING_VAR_H(classify_learn_debug_str);
/* intmatcher.cpp **********************************************************/
INT_VAR_H(classify_class_pruner_threshold);
INT_VAR_H(classify_class_pruner_multiplier);
INT_VAR_H(classify_cp_cutoff_strength);
INT_VAR_H(classify_integer_matcher_multiplier);
BOOL_VAR_H(classify_bln_numeric_mode);
double_VAR_H(speckle_large_max_size);
double_VAR_H(speckle_rating_penalty);
// Use class variables to hold onto built-in templates and adapted templates.
INT_TEMPLATES_STRUCT *PreTrainedTemplates = nullptr;
ADAPT_TEMPLATES_STRUCT *AdaptedTemplates = nullptr;
// The backup adapted templates are created from the previous page (only)
// so they are always ready and reasonably well trained if the primary
// adapted templates become full.
ADAPT_TEMPLATES_STRUCT *BackupAdaptedTemplates = nullptr;
// Create dummy proto and config masks for use with the built-in templates.
BIT_VECTOR AllProtosOn = nullptr;
BIT_VECTOR AllConfigsOn = nullptr;
BIT_VECTOR AllConfigsOff = nullptr;
BIT_VECTOR TempProtoMask = nullptr;
/* normmatch.cpp */
NORM_PROTOS *NormProtos = nullptr;
/* font detection ***********************************************************/
UnicityTable<FontInfo> fontinfo_table_;
// Without shape training, each class_id, config pair represents a single
// unichar id/font combination, so each fontset_table_ entry holds font ids
// for each config in the class.
// With shape training, each class_id, config pair represents a single
// shape_table_ index, so the fontset_table_ stores the shape_table_ index,
// and the shape_table_ must be consulted to obtain the actual unichar_id/
// font combinations that the shape represents.
UnicityTable<FontSet> fontset_table_;
protected:
IntegerMatcher im_;
FEATURE_DEFS_STRUCT feature_defs_;
// If a shape_table_ is present, it is used to remap classifier output in
// ExpandShapesAndApplyCorrections. font_ids referenced by configs actually
// mean an index to the shape_table_ and the choices returned are *all* the
// shape_table_ entries at that index.
ShapeTable *shape_table_ = nullptr;
private:
// The currently active static classifier.
ShapeClassifier *static_classifier_ = nullptr;
#ifndef GRAPHICS_DISABLED
ScrollView *learn_debug_win_ = nullptr;
ScrollView *learn_fragmented_word_debug_win_ = nullptr;
ScrollView *learn_fragments_debug_win_ = nullptr;
#endif
// Training data gathered here for all the images in a document.
std::string tr_file_data_;
Dict dict_;
std::vector<uint16_t> shapetable_cutoffs_;
/* variables used to hold performance statistics */
int NumAdaptationsFailed = 0;
// Expected number of features in the class pruner, used to penalize
// unknowns that have too few features (like a c being classified as e) so
// it doesn't recognize everything as '@' or '#'.
// CharNormCutoffs is for the static classifier (with no shapetable).
// BaselineCutoffs gets a copy of CharNormCutoffs as an estimate of the real
// value in the adaptive classifier. Both are indexed by unichar_id.
// shapetable_cutoffs_ provides a similar value for each shape in the
// shape_table_
uint16_t CharNormCutoffs[MAX_NUM_CLASSES];
uint16_t BaselineCutoffs[MAX_NUM_CLASSES];
public:
bool EnableLearning = true;
};
} // namespace tesseract
#endif // DISABLED_LEGACY_ENGINE
#endif // TESSERACT_CLASSIFY_CLASSIFY_H_
|
2301_81045437/tesseract
|
src/classify/classify.h
|
C++
|
apache-2.0
| 23,787
|
/******************************************************************************
** Filename: cluster.cpp
** Purpose: Routines for clustering points in N-D space
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#define _USE_MATH_DEFINES // for M_PI
#include "cluster.h"
#include "genericheap.h"
#include "kdpair.h"
#include "matrix.h"
#include "tprintf.h"
#include "helpers.h"
#include <cfloat> // for FLT_MAX
#include <cmath> // for M_PI
#include <vector> // for std::vector
namespace tesseract {
#define HOTELLING 1 // If true use Hotelling's test to decide where to split.
#define FTABLE_X 10 // Size of FTable.
#define FTABLE_Y 100 // Size of FTable.
// Table of values approximating the cumulative F-distribution for a confidence
// of 1%.
const double FTable[FTABLE_Y][FTABLE_X] = {
{
4052.19,
4999.52,
5403.34,
5624.62,
5763.65,
5858.97,
5928.33,
5981.10,
6022.50,
6055.85,
},
{
98.502,
99.000,
99.166,
99.249,
99.300,
99.333,
99.356,
99.374,
99.388,
99.399,
},
{
34.116,
30.816,
29.457,
28.710,
28.237,
27.911,
27.672,
27.489,
27.345,
27.229,
},
{
21.198,
18.000,
16.694,
15.977,
15.522,
15.207,
14.976,
14.799,
14.659,
14.546,
},
{
16.258,
13.274,
12.060,
11.392,
10.967,
10.672,
10.456,
10.289,
10.158,
10.051,
},
{
13.745,
10.925,
9.780,
9.148,
8.746,
8.466,
8.260,
8.102,
7.976,
7.874,
},
{
12.246,
9.547,
8.451,
7.847,
7.460,
7.191,
6.993,
6.840,
6.719,
6.620,
},
{
11.259,
8.649,
7.591,
7.006,
6.632,
6.371,
6.178,
6.029,
5.911,
5.814,
},
{
10.561,
8.022,
6.992,
6.422,
6.057,
5.802,
5.613,
5.467,
5.351,
5.257,
},
{
10.044,
7.559,
6.552,
5.994,
5.636,
5.386,
5.200,
5.057,
4.942,
4.849,
},
{
9.646,
7.206,
6.217,
5.668,
5.316,
5.069,
4.886,
4.744,
4.632,
4.539,
},
{
9.330,
6.927,
5.953,
5.412,
5.064,
4.821,
4.640,
4.499,
4.388,
4.296,
},
{
9.074,
6.701,
5.739,
5.205,
4.862,
4.620,
4.441,
4.302,
4.191,
4.100,
},
{
8.862,
6.515,
5.564,
5.035,
4.695,
4.456,
4.278,
4.140,
4.030,
3.939,
},
{
8.683,
6.359,
5.417,
4.893,
4.556,
4.318,
4.142,
4.004,
3.895,
3.805,
},
{
8.531,
6.226,
5.292,
4.773,
4.437,
4.202,
4.026,
3.890,
3.780,
3.691,
},
{
8.400,
6.112,
5.185,
4.669,
4.336,
4.102,
3.927,
3.791,
3.682,
3.593,
},
{
8.285,
6.013,
5.092,
4.579,
4.248,
4.015,
3.841,
3.705,
3.597,
3.508,
},
{
8.185,
5.926,
5.010,
4.500,
4.171,
3.939,
3.765,
3.631,
3.523,
3.434,
},
{
8.096,
5.849,
4.938,
4.431,
4.103,
3.871,
3.699,
3.564,
3.457,
3.368,
},
{
8.017,
5.780,
4.874,
4.369,
4.042,
3.812,
3.640,
3.506,
3.398,
3.310,
},
{
7.945,
5.719,
4.817,
4.313,
3.988,
3.758,
3.587,
3.453,
3.346,
3.258,
},
{
7.881,
5.664,
4.765,
4.264,
3.939,
3.710,
3.539,
3.406,
3.299,
3.211,
},
{
7.823,
5.614,
4.718,
4.218,
3.895,
3.667,
3.496,
3.363,
3.256,
3.168,
},
{
7.770,
5.568,
4.675,
4.177,
3.855,
3.627,
3.457,
3.324,
3.217,
3.129,
},
{
7.721,
5.526,
4.637,
4.140,
3.818,
3.591,
3.421,
3.288,
3.182,
3.094,
},
{
7.677,
5.488,
4.601,
4.106,
3.785,
3.558,
3.388,
3.256,
3.149,
3.062,
},
{
7.636,
5.453,
4.568,
4.074,
3.754,
3.528,
3.358,
3.226,
3.120,
3.032,
},
{
7.598,
5.420,
4.538,
4.045,
3.725,
3.499,
3.330,
3.198,
3.092,
3.005,
},
{
7.562,
5.390,
4.510,
4.018,
3.699,
3.473,
3.305,
3.173,
3.067,
2.979,
},
{
7.530,
5.362,
4.484,
3.993,
3.675,
3.449,
3.281,
3.149,
3.043,
2.955,
},
{
7.499,
5.336,
4.459,
3.969,
3.652,
3.427,
3.258,
3.127,
3.021,
2.934,
},
{
7.471,
5.312,
4.437,
3.948,
3.630,
3.406,
3.238,
3.106,
3.000,
2.913,
},
{
7.444,
5.289,
4.416,
3.927,
3.611,
3.386,
3.218,
3.087,
2.981,
2.894,
},
{
7.419,
5.268,
4.396,
3.908,
3.592,
3.368,
3.200,
3.069,
2.963,
2.876,
},
{
7.396,
5.248,
4.377,
3.890,
3.574,
3.351,
3.183,
3.052,
2.946,
2.859,
},
{
7.373,
5.229,
4.360,
3.873,
3.558,
3.334,
3.167,
3.036,
2.930,
2.843,
},
{
7.353,
5.211,
4.343,
3.858,
3.542,
3.319,
3.152,
3.021,
2.915,
2.828,
},
{
7.333,
5.194,
4.327,
3.843,
3.528,
3.305,
3.137,
3.006,
2.901,
2.814,
},
{
7.314,
5.179,
4.313,
3.828,
3.514,
3.291,
3.124,
2.993,
2.888,
2.801,
},
{
7.296,
5.163,
4.299,
3.815,
3.501,
3.278,
3.111,
2.980,
2.875,
2.788,
},
{
7.280,
5.149,
4.285,
3.802,
3.488,
3.266,
3.099,
2.968,
2.863,
2.776,
},
{
7.264,
5.136,
4.273,
3.790,
3.476,
3.254,
3.087,
2.957,
2.851,
2.764,
},
{
7.248,
5.123,
4.261,
3.778,
3.465,
3.243,
3.076,
2.946,
2.840,
2.754,
},
{
7.234,
5.110,
4.249,
3.767,
3.454,
3.232,
3.066,
2.935,
2.830,
2.743,
},
{
7.220,
5.099,
4.238,
3.757,
3.444,
3.222,
3.056,
2.925,
2.820,
2.733,
},
{
7.207,
5.087,
4.228,
3.747,
3.434,
3.213,
3.046,
2.916,
2.811,
2.724,
},
{
7.194,
5.077,
4.218,
3.737,
3.425,
3.204,
3.037,
2.907,
2.802,
2.715,
},
{
7.182,
5.066,
4.208,
3.728,
3.416,
3.195,
3.028,
2.898,
2.793,
2.706,
},
{
7.171,
5.057,
4.199,
3.720,
3.408,
3.186,
3.020,
2.890,
2.785,
2.698,
},
{
7.159,
5.047,
4.191,
3.711,
3.400,
3.178,
3.012,
2.882,
2.777,
2.690,
},
{
7.149,
5.038,
4.182,
3.703,
3.392,
3.171,
3.005,
2.874,
2.769,
2.683,
},
{
7.139,
5.030,
4.174,
3.695,
3.384,
3.163,
2.997,
2.867,
2.762,
2.675,
},
{
7.129,
5.021,
4.167,
3.688,
3.377,
3.156,
2.990,
2.860,
2.755,
2.668,
},
{
7.119,
5.013,
4.159,
3.681,
3.370,
3.149,
2.983,
2.853,
2.748,
2.662,
},
{
7.110,
5.006,
4.152,
3.674,
3.363,
3.143,
2.977,
2.847,
2.742,
2.655,
},
{
7.102,
4.998,
4.145,
3.667,
3.357,
3.136,
2.971,
2.841,
2.736,
2.649,
},
{
7.093,
4.991,
4.138,
3.661,
3.351,
3.130,
2.965,
2.835,
2.730,
2.643,
},
{
7.085,
4.984,
4.132,
3.655,
3.345,
3.124,
2.959,
2.829,
2.724,
2.637,
},
{
7.077,
4.977,
4.126,
3.649,
3.339,
3.119,
2.953,
2.823,
2.718,
2.632,
},
{
7.070,
4.971,
4.120,
3.643,
3.333,
3.113,
2.948,
2.818,
2.713,
2.626,
},
{
7.062,
4.965,
4.114,
3.638,
3.328,
3.108,
2.942,
2.813,
2.708,
2.621,
},
{
7.055,
4.959,
4.109,
3.632,
3.323,
3.103,
2.937,
2.808,
2.703,
2.616,
},
{
7.048,
4.953,
4.103,
3.627,
3.318,
3.098,
2.932,
2.803,
2.698,
2.611,
},
{
7.042,
4.947,
4.098,
3.622,
3.313,
3.093,
2.928,
2.798,
2.693,
2.607,
},
{
7.035,
4.942,
4.093,
3.618,
3.308,
3.088,
2.923,
2.793,
2.689,
2.602,
},
{
7.029,
4.937,
4.088,
3.613,
3.304,
3.084,
2.919,
2.789,
2.684,
2.598,
},
{
7.023,
4.932,
4.083,
3.608,
3.299,
3.080,
2.914,
2.785,
2.680,
2.593,
},
{
7.017,
4.927,
4.079,
3.604,
3.295,
3.075,
2.910,
2.781,
2.676,
2.589,
},
{
7.011,
4.922,
4.074,
3.600,
3.291,
3.071,
2.906,
2.777,
2.672,
2.585,
},
{
7.006,
4.917,
4.070,
3.596,
3.287,
3.067,
2.902,
2.773,
2.668,
2.581,
},
{
7.001,
4.913,
4.066,
3.591,
3.283,
3.063,
2.898,
2.769,
2.664,
2.578,
},
{
6.995,
4.908,
4.062,
3.588,
3.279,
3.060,
2.895,
2.765,
2.660,
2.574,
},
{
6.990,
4.904,
4.058,
3.584,
3.275,
3.056,
2.891,
2.762,
2.657,
2.570,
},
{
6.985,
4.900,
4.054,
3.580,
3.272,
3.052,
2.887,
2.758,
2.653,
2.567,
},
{
6.981,
4.896,
4.050,
3.577,
3.268,
3.049,
2.884,
2.755,
2.650,
2.563,
},
{
6.976,
4.892,
4.047,
3.573,
3.265,
3.046,
2.881,
2.751,
2.647,
2.560,
},
{
6.971,
4.888,
4.043,
3.570,
3.261,
3.042,
2.877,
2.748,
2.644,
2.557,
},
{
6.967,
4.884,
4.040,
3.566,
3.258,
3.039,
2.874,
2.745,
2.640,
2.554,
},
{
6.963,
4.881,
4.036,
3.563,
3.255,
3.036,
2.871,
2.742,
2.637,
2.551,
},
{
6.958,
4.877,
4.033,
3.560,
3.252,
3.033,
2.868,
2.739,
2.634,
2.548,
},
{
6.954,
4.874,
4.030,
3.557,
3.249,
3.030,
2.865,
2.736,
2.632,
2.545,
},
{
6.950,
4.870,
4.027,
3.554,
3.246,
3.027,
2.863,
2.733,
2.629,
2.542,
},
{
6.947,
4.867,
4.024,
3.551,
3.243,
3.025,
2.860,
2.731,
2.626,
2.539,
},
{
6.943,
4.864,
4.021,
3.548,
3.240,
3.022,
2.857,
2.728,
2.623,
2.537,
},
{
6.939,
4.861,
4.018,
3.545,
3.238,
3.019,
2.854,
2.725,
2.621,
2.534,
},
{
6.935,
4.858,
4.015,
3.543,
3.235,
3.017,
2.852,
2.723,
2.618,
2.532,
},
{
6.932,
4.855,
4.012,
3.540,
3.233,
3.014,
2.849,
2.720,
2.616,
2.529,
},
{
6.928,
4.852,
4.010,
3.538,
3.230,
3.012,
2.847,
2.718,
2.613,
2.527,
},
{
6.925,
4.849,
4.007,
3.535,
3.228,
3.009,
2.845,
2.715,
2.611,
2.524,
},
{
6.922,
4.846,
4.004,
3.533,
3.225,
3.007,
2.842,
2.713,
2.609,
2.522,
},
{
6.919,
4.844,
4.002,
3.530,
3.223,
3.004,
2.840,
2.711,
2.606,
2.520,
},
{
6.915,
4.841,
3.999,
3.528,
3.221,
3.002,
2.838,
2.709,
2.604,
2.518,
},
{
6.912,
4.838,
3.997,
3.525,
3.218,
3.000,
2.835,
2.706,
2.602,
2.515,
},
{
6.909,
4.836,
3.995,
3.523,
3.216,
2.998,
2.833,
2.704,
2.600,
2.513,
},
{
6.906,
4.833,
3.992,
3.521,
3.214,
2.996,
2.831,
2.702,
2.598,
2.511,
},
{
6.904,
4.831,
3.990,
3.519,
3.212,
2.994,
2.829,
2.700,
2.596,
2.509,
},
{
6.901,
4.829,
3.988,
3.517,
3.210,
2.992,
2.827,
2.698,
2.594,
2.507,
},
{
6.898,
4.826,
3.986,
3.515,
3.208,
2.990,
2.825,
2.696,
2.592,
2.505,
},
{6.895, 4.824, 3.984, 3.513, 3.206, 2.988, 2.823, 2.694, 2.590, 2.503}};
/** define the variance which will be used as a minimum variance for any
dimension of any feature. Since most features are calculated from numbers
with a precision no better than 1 in 128, the variance should never be
less than the square of this number for parameters whose range is 1. */
#define MINVARIANCE 0.0004
/** define the absolute minimum number of samples which must be present in
order to accurately test hypotheses about underlying probability
distributions. Define separately the minimum samples that are needed
before a statistical analysis is attempted; this number should be
equal to MINSAMPLES but can be set to a lower number for early testing
when very few samples are available. */
#define MINSAMPLESPERBUCKET 5
#define MINSAMPLES (MINBUCKETS * MINSAMPLESPERBUCKET)
#define MINSAMPLESNEEDED 1
/** define the size of the table which maps normalized samples to
histogram buckets. Also define the number of standard deviations
in a normal distribution which are considered to be significant.
The mapping table will be defined in such a way that it covers
the specified number of standard deviations on either side of
the mean. BUCKETTABLESIZE should always be even. */
#define BUCKETTABLESIZE 1024
#define NORMALEXTENT 3.0
struct TEMPCLUSTER {
CLUSTER *Cluster;
CLUSTER *Neighbor;
};
using ClusterPair = tesseract::KDPairInc<float, TEMPCLUSTER *>;
using ClusterHeap = tesseract::GenericHeap<ClusterPair>;
struct STATISTICS {
STATISTICS(size_t n) : CoVariance(n * n), Min(n), Max(n) {
}
float AvgVariance = 1.0f;
std::vector<float> CoVariance;
std::vector<float> Min; // largest negative distance from the mean
std::vector<float> Max; // largest positive distance from the mean
};
struct BUCKETS {
BUCKETS(size_t n) : NumberOfBuckets(n), Count(n), ExpectedCount(n) {
}
~BUCKETS() {
}
DISTRIBUTION Distribution = normal; // distribution being tested for
uint32_t SampleCount = 0; // # of samples in histogram
double Confidence = 0.0; // confidence level of test
double ChiSquared = 0.0; // test threshold
uint16_t NumberOfBuckets; // number of cells in histogram
uint16_t Bucket[BUCKETTABLESIZE]; // mapping to histogram buckets
std::vector<uint32_t> Count; // frequency of occurrence histogram
std::vector<float> ExpectedCount; // expected histogram
};
struct CHISTRUCT {
/// This constructor allocates a new data structure which is used
/// to hold a chi-squared value along with its associated
/// number of degrees of freedom and alpha value.
///
/// @param degreesOfFreedom degrees of freedom for new chi value
/// @param alpha confidence level for new chi value
CHISTRUCT(uint16_t degreesOfFreedom, double alpha) : DegreesOfFreedom(degreesOfFreedom), Alpha(alpha) {
}
uint16_t DegreesOfFreedom = 0;
double Alpha = 0.0;
double ChiSquared = 0.0;
};
// For use with KDWalk / MakePotentialClusters
struct ClusteringContext {
ClusterHeap *heap; // heap used to hold temp clusters, "best" on top
TEMPCLUSTER *candidates; // array of potential clusters
KDTREE *tree; // kd-tree to be searched for neighbors
int32_t next; // next candidate to be used
};
using DENSITYFUNC = double (*)(int32_t);
using SOLVEFUNC = double (*)(CHISTRUCT *, double);
#define Odd(N) ((N) % 2)
#define Mirror(N, R) ((R) - (N)-1)
#define Abs(N) (((N) < 0) ? (-(N)) : (N))
//--------------Global Data Definitions and Declarations----------------------
/** the following variables describe a discrete normal distribution
which is used by NormalDensity() and NormalBucket(). The
constant NORMALEXTENT determines how many standard
deviations of the distribution are mapped onto the fixed
discrete range of x. x=0 is mapped to -NORMALEXTENT standard
deviations and x=BUCKETTABLESIZE is mapped to
+NORMALEXTENT standard deviations. */
#define SqrtOf2Pi 2.506628275
static const double kNormalStdDev = BUCKETTABLESIZE / (2.0 * NORMALEXTENT);
static const double kNormalVariance =
(BUCKETTABLESIZE * BUCKETTABLESIZE) / (4.0 * NORMALEXTENT * NORMALEXTENT);
static const double kNormalMagnitude = (2.0 * NORMALEXTENT) / (SqrtOf2Pi * BUCKETTABLESIZE);
static const double kNormalMean = BUCKETTABLESIZE / 2;
/** define lookup tables used to compute the number of histogram buckets
that should be used for a given number of samples. */
#define LOOKUPTABLESIZE 8
#define MAXDEGREESOFFREEDOM MAXBUCKETS
static const uint32_t kCountTable[LOOKUPTABLESIZE] = {MINSAMPLES, 200, 400, 600, 800,
1000, 1500, 2000}; // number of samples
static const uint16_t kBucketsTable[LOOKUPTABLESIZE] = {
MINBUCKETS, 16, 20, 24, 27, 30, 35, MAXBUCKETS}; // number of buckets
/*-------------------------------------------------------------------------
Private Function Prototypes
--------------------------------------------------------------------------*/
static void CreateClusterTree(CLUSTERER *Clusterer);
static void MakePotentialClusters(ClusteringContext *context, CLUSTER *Cluster, int32_t Level);
static CLUSTER *FindNearestNeighbor(KDTREE *Tree, CLUSTER *Cluster, float *Distance);
static CLUSTER *MakeNewCluster(CLUSTERER *Clusterer, TEMPCLUSTER *TempCluster);
static void ComputePrototypes(CLUSTERER *Clusterer, CLUSTERCONFIG *Config);
static PROTOTYPE *MakePrototype(CLUSTERER *Clusterer, CLUSTERCONFIG *Config, CLUSTER *Cluster);
static PROTOTYPE *MakeDegenerateProto(uint16_t N, CLUSTER *Cluster, STATISTICS *Statistics,
PROTOSTYLE Style, int32_t MinSamples);
static PROTOTYPE *TestEllipticalProto(CLUSTERER *Clusterer, CLUSTERCONFIG *Config, CLUSTER *Cluster,
STATISTICS *Statistics);
static PROTOTYPE *MakeSphericalProto(CLUSTERER *Clusterer, CLUSTER *Cluster, STATISTICS *Statistics,
BUCKETS *Buckets);
static PROTOTYPE *MakeEllipticalProto(CLUSTERER *Clusterer, CLUSTER *Cluster,
STATISTICS *Statistics, BUCKETS *Buckets);
static PROTOTYPE *MakeMixedProto(CLUSTERER *Clusterer, CLUSTER *Cluster, STATISTICS *Statistics,
BUCKETS *NormalBuckets, double Confidence);
static void MakeDimRandom(uint16_t i, PROTOTYPE *Proto, PARAM_DESC *ParamDesc);
static void MakeDimUniform(uint16_t i, PROTOTYPE *Proto, STATISTICS *Statistics);
static STATISTICS *ComputeStatistics(int16_t N, PARAM_DESC ParamDesc[], CLUSTER *Cluster);
static PROTOTYPE *NewSphericalProto(uint16_t N, CLUSTER *Cluster, STATISTICS *Statistics);
static PROTOTYPE *NewEllipticalProto(int16_t N, CLUSTER *Cluster, STATISTICS *Statistics);
static PROTOTYPE *NewMixedProto(int16_t N, CLUSTER *Cluster, STATISTICS *Statistics);
static PROTOTYPE *NewSimpleProto(int16_t N, CLUSTER *Cluster);
static bool Independent(PARAM_DESC *ParamDesc, int16_t N, float *CoVariance, float Independence);
static BUCKETS *GetBuckets(CLUSTERER *clusterer, DISTRIBUTION Distribution, uint32_t SampleCount,
double Confidence);
static BUCKETS *MakeBuckets(DISTRIBUTION Distribution, uint32_t SampleCount, double Confidence);
static uint16_t OptimumNumberOfBuckets(uint32_t SampleCount);
static double ComputeChiSquared(uint16_t DegreesOfFreedom, double Alpha);
static double NormalDensity(int32_t x);
static double UniformDensity(int32_t x);
static double Integral(double f1, double f2, double Dx);
static void FillBuckets(BUCKETS *Buckets, CLUSTER *Cluster, uint16_t Dim, PARAM_DESC *ParamDesc,
float Mean, float StdDev);
static uint16_t NormalBucket(PARAM_DESC *ParamDesc, float x, float Mean, float StdDev);
static uint16_t UniformBucket(PARAM_DESC *ParamDesc, float x, float Mean, float StdDev);
static bool DistributionOK(BUCKETS *Buckets);
static uint16_t DegreesOfFreedom(DISTRIBUTION Distribution, uint16_t HistogramBuckets);
static void AdjustBuckets(BUCKETS *Buckets, uint32_t NewSampleCount);
static void InitBuckets(BUCKETS *Buckets);
static int AlphaMatch(void *arg1, // CHISTRUCT *ChiStruct,
void *arg2); // CHISTRUCT *SearchKey);
static double Solve(SOLVEFUNC Function, void *FunctionParams, double InitialGuess, double Accuracy);
static double ChiArea(CHISTRUCT *ChiParams, double x);
static bool MultipleCharSamples(CLUSTERER *Clusterer, CLUSTER *Cluster, float MaxIllegal);
static double InvertMatrix(const float *input, int size, float *inv);
//--------------------------Public Code--------------------------------------
/**
* This routine creates a new clusterer data structure,
* initializes it, and returns a pointer to it.
*
* @param SampleSize number of dimensions in feature space
* @param ParamDesc description of each dimension
* @return pointer to the new clusterer data structure
*/
CLUSTERER *MakeClusterer(int16_t SampleSize, const PARAM_DESC ParamDesc[]) {
int i;
// allocate main clusterer data structure and init simple fields
auto Clusterer = new CLUSTERER;
Clusterer->SampleSize = SampleSize;
Clusterer->NumberOfSamples = 0;
Clusterer->NumChar = 0;
// init fields which will not be used initially
Clusterer->Root = nullptr;
Clusterer->ProtoList = NIL_LIST;
// maintain a copy of param descriptors in the clusterer data structure
Clusterer->ParamDesc = new PARAM_DESC[SampleSize];
for (i = 0; i < SampleSize; i++) {
Clusterer->ParamDesc[i].Circular = ParamDesc[i].Circular;
Clusterer->ParamDesc[i].NonEssential = ParamDesc[i].NonEssential;
Clusterer->ParamDesc[i].Min = ParamDesc[i].Min;
Clusterer->ParamDesc[i].Max = ParamDesc[i].Max;
Clusterer->ParamDesc[i].Range = ParamDesc[i].Max - ParamDesc[i].Min;
Clusterer->ParamDesc[i].HalfRange = Clusterer->ParamDesc[i].Range / 2;
Clusterer->ParamDesc[i].MidRange = (ParamDesc[i].Max + ParamDesc[i].Min) / 2;
}
// allocate a kd tree to hold the samples
Clusterer->KDTree = MakeKDTree(SampleSize, ParamDesc);
// Initialize cache of histogram buckets to minimize recomputing them.
for (auto &d : Clusterer->bucket_cache) {
for (auto &c : d) {
c = nullptr;
}
}
return Clusterer;
} // MakeClusterer
/**
* This routine creates a new sample data structure to hold
* the specified feature. This sample is added to the clusterer
* data structure (so that it knows which samples are to be
* clustered later), and a pointer to the sample is returned to
* the caller.
*
* @param Clusterer clusterer data structure to add sample to
* @param Feature feature to be added to clusterer
* @param CharID unique ident. of char that sample came from
*
* @return Pointer to the new sample data structure
*/
SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, uint32_t CharID) {
int i;
// see if the samples have already been clustered - if so trap an error
// Can't add samples after they have been clustered.
ASSERT_HOST(Clusterer->Root == nullptr);
// allocate the new sample and initialize it
auto Sample = new SAMPLE(Clusterer->SampleSize);
Sample->Clustered = false;
Sample->Prototype = false;
Sample->SampleCount = 1;
Sample->Left = nullptr;
Sample->Right = nullptr;
Sample->CharID = CharID;
for (i = 0; i < Clusterer->SampleSize; i++) {
Sample->Mean[i] = Feature[i];
}
// add the sample to the KD tree - keep track of the total # of samples
Clusterer->NumberOfSamples++;
KDStore(Clusterer->KDTree, &Sample->Mean[0], Sample);
if (CharID >= Clusterer->NumChar) {
Clusterer->NumChar = CharID + 1;
}
// execute hook for monitoring clustering operation
// (*SampleCreationHook)(Sample);
return (Sample);
} // MakeSample
/**
* This routine first checks to see if the samples in this
* clusterer have already been clustered before; if so, it does
* not bother to recreate the cluster tree. It simply recomputes
* the prototypes based on the new Config info.
*
* If the samples have not been clustered before, the
* samples in the KD tree are formed into a cluster tree and then
* the prototypes are computed from the cluster tree.
*
* In either case this routine returns a pointer to a
* list of prototypes that best represent the samples given
* the constraints specified in Config.
*
* @param Clusterer data struct containing samples to be clustered
* @param Config parameters which control clustering process
*
* @return Pointer to a list of prototypes
*/
LIST ClusterSamples(CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
// only create cluster tree if samples have never been clustered before
if (Clusterer->Root == nullptr) {
CreateClusterTree(Clusterer);
}
// deallocate the old prototype list if one exists
FreeProtoList(&Clusterer->ProtoList);
Clusterer->ProtoList = NIL_LIST;
// compute prototypes starting at the root node in the tree
ComputePrototypes(Clusterer, Config);
// We don't need the cluster pointers in the protos any more, so null them
// out, which makes it safe to delete the clusterer.
LIST proto_list = Clusterer->ProtoList;
iterate(proto_list) {
auto *proto = reinterpret_cast<PROTOTYPE *>(proto_list->first_node());
proto->Cluster = nullptr;
}
return Clusterer->ProtoList;
} // ClusterSamples
/**
* This routine frees all of the memory allocated to the
* specified data structure. It will not, however, free
* the memory used by the prototype list. The pointers to
* the clusters for each prototype in the list will be set
* to nullptr to indicate that the cluster data structures no
* longer exist. Any sample lists that have been obtained
* via calls to GetSamples are no longer valid.
* @param Clusterer pointer to data structure to be freed
*/
void FreeClusterer(CLUSTERER *Clusterer) {
if (Clusterer != nullptr) {
delete[] Clusterer->ParamDesc;
delete Clusterer->KDTree;
delete Clusterer->Root;
// Free up all used buckets structures.
for (auto &d : Clusterer->bucket_cache) {
for (auto &c : d) {
delete c;
}
}
delete Clusterer;
}
} // FreeClusterer
/**
* This routine frees all of the memory allocated to the
* specified list of prototypes. The clusters which are
* pointed to by the prototypes are not freed.
* @param ProtoList pointer to list of prototypes to be freed
*/
void FreeProtoList(LIST *ProtoList) {
destroy_nodes(*ProtoList, FreePrototype);
} // FreeProtoList
/**
* This routine deallocates the memory consumed by the specified
* prototype and modifies the corresponding cluster so that it
* is no longer marked as a prototype. The cluster is NOT
* deallocated by this routine.
* @param arg prototype data structure to be deallocated
*/
void FreePrototype(void *arg) { // PROTOTYPE *Prototype)
auto *Prototype = static_cast<PROTOTYPE *>(arg);
// unmark the corresponding cluster (if there is one
if (Prototype->Cluster != nullptr) {
Prototype->Cluster->Prototype = false;
}
// deallocate the prototype statistics and then the prototype itself
if (Prototype->Style != spherical) {
delete[] Prototype->Variance.Elliptical;
delete[] Prototype->Magnitude.Elliptical;
delete[] Prototype->Weight.Elliptical;
}
delete Prototype;
} // FreePrototype
/**
* This routine is used to find all of the samples which
* belong to a cluster. It starts by removing the top
* cluster on the cluster list (SearchState). If this cluster is
* a leaf it is returned. Otherwise, the right subcluster
* is pushed on the list and we continue the search in the
* left subcluster. This continues until a leaf is found.
* If all samples have been found, nullptr is returned.
* InitSampleSearch() must be called
* before NextSample() to initialize the search.
* @param SearchState ptr to list containing clusters to be searched
* @return Pointer to the next leaf cluster (sample) or nullptr.
*/
CLUSTER *NextSample(LIST *SearchState) {
CLUSTER *Cluster;
if (*SearchState == NIL_LIST) {
return (nullptr);
}
Cluster = reinterpret_cast<CLUSTER *>((*SearchState)->first_node());
*SearchState = pop(*SearchState);
for (;;) {
if (Cluster->Left == nullptr) {
return (Cluster);
}
*SearchState = push(*SearchState, Cluster->Right);
Cluster = Cluster->Left;
}
} // NextSample
/**
* This routine returns the mean of the specified
* prototype in the indicated dimension.
* @param Proto prototype to return mean of
* @param Dimension dimension whose mean is to be returned
* @return Mean of Prototype in Dimension
*/
float Mean(PROTOTYPE *Proto, uint16_t Dimension) {
return (Proto->Mean[Dimension]);
} // Mean
/**
* This routine returns the standard deviation of the
* prototype in the indicated dimension.
* @param Proto prototype to return standard deviation of
* @param Dimension dimension whose stddev is to be returned
* @return Standard deviation of Prototype in Dimension
*/
float StandardDeviation(PROTOTYPE *Proto, uint16_t Dimension) {
switch (Proto->Style) {
case spherical:
return std::sqrt(Proto->Variance.Spherical);
case elliptical:
return std::sqrt(Proto->Variance.Elliptical[Dimension]);
case mixed:
switch (Proto->Distrib[Dimension]) {
case normal:
return std::sqrt(Proto->Variance.Elliptical[Dimension]);
case uniform:
case D_random:
return Proto->Variance.Elliptical[Dimension];
case DISTRIBUTION_COUNT:
ASSERT_HOST(!"Distribution count not allowed!");
}
}
return 0.0f;
} // StandardDeviation
/*---------------------------------------------------------------------------
Private Code
----------------------------------------------------------------------------*/
/**
* This routine performs a bottoms-up clustering on the samples
* held in the kd-tree of the Clusterer data structure. The
* result is a cluster tree. Each node in the tree represents
* a cluster which conceptually contains a subset of the samples.
* More precisely, the cluster contains all of the samples which
* are contained in its two sub-clusters. The leaves of the
* tree are the individual samples themselves; they have no
* sub-clusters. The root node of the tree conceptually contains
* all of the samples.
* The Clusterer data structure is changed.
* @param Clusterer data structure holdings samples to be clustered
*/
static void CreateClusterTree(CLUSTERER *Clusterer) {
ClusteringContext context;
ClusterPair HeapEntry;
// each sample and its nearest neighbor form a "potential" cluster
// save these in a heap with the "best" potential clusters on top
context.tree = Clusterer->KDTree;
context.candidates = new TEMPCLUSTER[Clusterer->NumberOfSamples];
context.next = 0;
context.heap = new ClusterHeap(Clusterer->NumberOfSamples);
KDWalk(context.tree, MakePotentialClusters, &context);
// form potential clusters into actual clusters - always do "best" first
while (context.heap->Pop(&HeapEntry)) {
TEMPCLUSTER *PotentialCluster = HeapEntry.data();
// if main cluster of potential cluster is already in another cluster
// then we don't need to worry about it
if (PotentialCluster->Cluster->Clustered) {
continue;
}
// if main cluster is not yet clustered, but its nearest neighbor is
// then we must find a new nearest neighbor
else if (PotentialCluster->Neighbor->Clustered) {
PotentialCluster->Neighbor =
FindNearestNeighbor(context.tree, PotentialCluster->Cluster, &HeapEntry.key());
if (PotentialCluster->Neighbor != nullptr) {
context.heap->Push(&HeapEntry);
}
}
// if neither cluster is already clustered, form permanent cluster
else {
PotentialCluster->Cluster = MakeNewCluster(Clusterer, PotentialCluster);
PotentialCluster->Neighbor =
FindNearestNeighbor(context.tree, PotentialCluster->Cluster, &HeapEntry.key());
if (PotentialCluster->Neighbor != nullptr) {
context.heap->Push(&HeapEntry);
}
}
}
// the root node in the cluster tree is now the only node in the kd-tree
Clusterer->Root = static_cast<CLUSTER *> RootOf(Clusterer->KDTree);
// free up the memory used by the K-D tree, heap, and temp clusters
delete context.tree;
Clusterer->KDTree = nullptr;
delete context.heap;
delete[] context.candidates;
} // CreateClusterTree
/**
* This routine is designed to be used in concert with the
* KDWalk routine. It will create a potential cluster for
* each sample in the kd-tree that is being walked. This
* potential cluster will then be pushed on the heap.
* @param context ClusteringContext (see definition above)
* @param Cluster current cluster being visited in kd-tree walk
* @param Level level of this cluster in the kd-tree
*/
static void MakePotentialClusters(ClusteringContext *context, CLUSTER *Cluster, int32_t /*Level*/) {
ClusterPair HeapEntry;
int next = context->next;
context->candidates[next].Cluster = Cluster;
HeapEntry.data() = &(context->candidates[next]);
context->candidates[next].Neighbor =
FindNearestNeighbor(context->tree, context->candidates[next].Cluster, &HeapEntry.key());
if (context->candidates[next].Neighbor != nullptr) {
context->heap->Push(&HeapEntry);
context->next++;
}
} // MakePotentialClusters
/**
* This routine searches the specified kd-tree for the nearest
* neighbor of the specified cluster. It actually uses the
* kd routines to find the 2 nearest neighbors since one of them
* will be the original cluster. A pointer to the nearest
* neighbor is returned, if it can be found, otherwise nullptr is
* returned. The distance between the 2 nodes is placed
* in the specified variable.
* @param Tree kd-tree to search in for nearest neighbor
* @param Cluster cluster whose nearest neighbor is to be found
* @param Distance ptr to variable to report distance found
* @return Pointer to the nearest neighbor of Cluster, or nullptr
*/
static CLUSTER *FindNearestNeighbor(KDTREE *Tree, CLUSTER *Cluster, float *Distance)
#define MAXNEIGHBORS 2
#define MAXDISTANCE FLT_MAX
{
CLUSTER *Neighbor[MAXNEIGHBORS];
float Dist[MAXNEIGHBORS];
int NumberOfNeighbors;
int32_t i;
CLUSTER *BestNeighbor;
// find the 2 nearest neighbors of the cluster
KDNearestNeighborSearch(Tree, &Cluster->Mean[0], MAXNEIGHBORS, MAXDISTANCE, &NumberOfNeighbors,
reinterpret_cast<void **>(Neighbor), Dist);
// search for the nearest neighbor that is not the cluster itself
*Distance = MAXDISTANCE;
BestNeighbor = nullptr;
for (i = 0; i < NumberOfNeighbors; i++) {
if ((Dist[i] < *Distance) && (Neighbor[i] != Cluster)) {
*Distance = Dist[i];
BestNeighbor = Neighbor[i];
}
}
return BestNeighbor;
} // FindNearestNeighbor
/**
* This routine creates a new permanent cluster from the
* clusters specified in TempCluster. The 2 clusters in
* TempCluster are marked as "clustered" and deleted from
* the kd-tree. The new cluster is then added to the kd-tree.
* @param Clusterer current clustering environment
* @param TempCluster potential cluster to make permanent
* @return Pointer to the new permanent cluster
*/
static CLUSTER *MakeNewCluster(CLUSTERER *Clusterer, TEMPCLUSTER *TempCluster) {
// allocate the new cluster and initialize it
auto Cluster = new CLUSTER(Clusterer->SampleSize);
Cluster->Clustered = false;
Cluster->Prototype = false;
Cluster->Left = TempCluster->Cluster;
Cluster->Right = TempCluster->Neighbor;
Cluster->CharID = -1;
// mark the old clusters as "clustered" and delete them from the kd-tree
Cluster->Left->Clustered = true;
Cluster->Right->Clustered = true;
KDDelete(Clusterer->KDTree, &Cluster->Left->Mean[0], Cluster->Left);
KDDelete(Clusterer->KDTree, &Cluster->Right->Mean[0], Cluster->Right);
// compute the mean and sample count for the new cluster
Cluster->SampleCount = MergeClusters(Clusterer->SampleSize, Clusterer->ParamDesc,
Cluster->Left->SampleCount, Cluster->Right->SampleCount,
&Cluster->Mean[0], &Cluster->Left->Mean[0], &Cluster->Right->Mean[0]);
// add the new cluster to the KD tree
KDStore(Clusterer->KDTree, &Cluster->Mean[0], Cluster);
return Cluster;
} // MakeNewCluster
/**
* This routine merges two clusters into one larger cluster.
* To do this it computes the number of samples in the new
* cluster and the mean of the new cluster. The ParamDesc
* information is used to ensure that circular dimensions
* are handled correctly.
* @param N # of dimensions (size of arrays)
* @param ParamDesc array of dimension descriptions
* @param n1, n2 number of samples in each old cluster
* @param m array to hold mean of new cluster
* @param m1, m2 arrays containing means of old clusters
* @return The number of samples in the new cluster.
*/
int32_t MergeClusters(int16_t N, PARAM_DESC ParamDesc[], int32_t n1, int32_t n2, float m[],
float m1[], float m2[]) {
int32_t i, n;
n = n1 + n2;
for (i = N; i > 0; i--, ParamDesc++, m++, m1++, m2++) {
if (ParamDesc->Circular) {
// if distance between means is greater than allowed
// reduce upper point by one "rotation" to compute mean
// then normalize the mean back into the accepted range
if ((*m2 - *m1) > ParamDesc->HalfRange) {
*m = (n1 * *m1 + n2 * (*m2 - ParamDesc->Range)) / n;
if (*m < ParamDesc->Min) {
*m += ParamDesc->Range;
}
} else if ((*m1 - *m2) > ParamDesc->HalfRange) {
*m = (n1 * (*m1 - ParamDesc->Range) + n2 * *m2) / n;
if (*m < ParamDesc->Min) {
*m += ParamDesc->Range;
}
} else {
*m = (n1 * *m1 + n2 * *m2) / n;
}
} else {
*m = (n1 * *m1 + n2 * *m2) / n;
}
}
return n;
} // MergeClusters
/**
* This routine decides which clusters in the cluster tree
* should be represented by prototypes, forms a list of these
* prototypes, and places the list in the Clusterer data
* structure.
* @param Clusterer data structure holding cluster tree
* @param Config parameters used to control prototype generation
*/
static void ComputePrototypes(CLUSTERER *Clusterer, CLUSTERCONFIG *Config) {
LIST ClusterStack = NIL_LIST;
CLUSTER *Cluster;
PROTOTYPE *Prototype;
// use a stack to keep track of clusters waiting to be processed
// initially the only cluster on the stack is the root cluster
if (Clusterer->Root != nullptr) {
ClusterStack = push(NIL_LIST, Clusterer->Root);
}
// loop until we have analyzed all clusters which are potential prototypes
while (ClusterStack != NIL_LIST) {
// remove the next cluster to be analyzed from the stack
// try to make a prototype from the cluster
// if successful, put it on the proto list, else split the cluster
Cluster = reinterpret_cast<CLUSTER *>(ClusterStack->first_node());
ClusterStack = pop(ClusterStack);
Prototype = MakePrototype(Clusterer, Config, Cluster);
if (Prototype != nullptr) {
Clusterer->ProtoList = push(Clusterer->ProtoList, Prototype);
} else {
ClusterStack = push(ClusterStack, Cluster->Right);
ClusterStack = push(ClusterStack, Cluster->Left);
}
}
} // ComputePrototypes
/**
* This routine attempts to create a prototype from the
* specified cluster that conforms to the distribution
* specified in Config. If there are too few samples in the
* cluster to perform a statistical analysis, then a prototype
* is generated but labelled as insignificant. If the
* dimensions of the cluster are not independent, no prototype
* is generated and nullptr is returned. If a prototype can be
* found that matches the desired distribution then a pointer
* to it is returned, otherwise nullptr is returned.
* @param Clusterer data structure holding cluster tree
* @param Config parameters used to control prototype generation
* @param Cluster cluster to be made into a prototype
* @return Pointer to new prototype or nullptr
*/
static PROTOTYPE *MakePrototype(CLUSTERER *Clusterer, CLUSTERCONFIG *Config, CLUSTER *Cluster) {
PROTOTYPE *Proto;
BUCKETS *Buckets;
// filter out clusters which contain samples from the same character
if (MultipleCharSamples(Clusterer, Cluster, Config->MaxIllegal)) {
return nullptr;
}
// compute the covariance matrix and ranges for the cluster
auto Statistics = ComputeStatistics(Clusterer->SampleSize, Clusterer->ParamDesc, Cluster);
// check for degenerate clusters which need not be analyzed further
// note that the MinSamples test assumes that all clusters with multiple
// character samples have been removed (as above)
Proto = MakeDegenerateProto(Clusterer->SampleSize, Cluster, Statistics, Config->ProtoStyle,
static_cast<int32_t>(Config->MinSamples * Clusterer->NumChar));
if (Proto != nullptr) {
delete Statistics;
return Proto;
}
// check to ensure that all dimensions are independent
if (!Independent(Clusterer->ParamDesc, Clusterer->SampleSize, &Statistics->CoVariance[0],
Config->Independence)) {
delete Statistics;
return nullptr;
}
if (HOTELLING && Config->ProtoStyle == elliptical) {
Proto = TestEllipticalProto(Clusterer, Config, Cluster, Statistics);
if (Proto != nullptr) {
delete Statistics;
return Proto;
}
}
// create a histogram data structure used to evaluate distributions
Buckets = GetBuckets(Clusterer, normal, Cluster->SampleCount, Config->Confidence);
// create a prototype based on the statistics and test it
switch (Config->ProtoStyle) {
case spherical:
Proto = MakeSphericalProto(Clusterer, Cluster, Statistics, Buckets);
break;
case elliptical:
Proto = MakeEllipticalProto(Clusterer, Cluster, Statistics, Buckets);
break;
case mixed:
Proto = MakeMixedProto(Clusterer, Cluster, Statistics, Buckets, Config->Confidence);
break;
case automatic:
Proto = MakeSphericalProto(Clusterer, Cluster, Statistics, Buckets);
if (Proto != nullptr) {
break;
}
Proto = MakeEllipticalProto(Clusterer, Cluster, Statistics, Buckets);
if (Proto != nullptr) {
break;
}
Proto = MakeMixedProto(Clusterer, Cluster, Statistics, Buckets, Config->Confidence);
break;
}
delete Statistics;
return Proto;
} // MakePrototype
/**
* This routine checks for clusters which are degenerate and
* therefore cannot be analyzed in a statistically valid way.
* A cluster is defined as degenerate if it does not have at
* least MINSAMPLESNEEDED samples in it. If the cluster is
* found to be degenerate, a prototype of the specified style
* is generated and marked as insignificant. A cluster is
* also degenerate if it does not have at least MinSamples
* samples in it.
*
* If the cluster is not degenerate, nullptr is returned.
*
* @param N number of dimensions
* @param Cluster cluster being analyzed
* @param Statistics statistical info about cluster
* @param Style type of prototype to be generated
* @param MinSamples minimum number of samples in a cluster
* @return Pointer to degenerate prototype or nullptr.
*/
static PROTOTYPE *MakeDegenerateProto( // this was MinSample
uint16_t N, CLUSTER *Cluster, STATISTICS *Statistics, PROTOSTYLE Style, int32_t MinSamples) {
PROTOTYPE *Proto = nullptr;
if (MinSamples < MINSAMPLESNEEDED) {
MinSamples = MINSAMPLESNEEDED;
}
if (Cluster->SampleCount < MinSamples) {
switch (Style) {
case spherical:
Proto = NewSphericalProto(N, Cluster, Statistics);
break;
case elliptical:
case automatic:
Proto = NewEllipticalProto(N, Cluster, Statistics);
break;
case mixed:
Proto = NewMixedProto(N, Cluster, Statistics);
break;
}
Proto->Significant = false;
}
return (Proto);
} // MakeDegenerateProto
/**
* This routine tests the specified cluster to see if **
* there is a statistically significant difference between
* the sub-clusters that would be made if the cluster were to
* be split. If not, then a new prototype is formed and
* returned to the caller. If there is, then nullptr is returned
* to the caller.
* @param Clusterer data struct containing samples being clustered
* @param Config provides the magic number of samples that make a good cluster
* @param Cluster cluster to be made into an elliptical prototype
* @param Statistics statistical info about cluster
* @return Pointer to new elliptical prototype or nullptr.
*/
static PROTOTYPE *TestEllipticalProto(CLUSTERER *Clusterer, CLUSTERCONFIG *Config, CLUSTER *Cluster,
STATISTICS *Statistics) {
// Fraction of the number of samples used as a range around 1 within
// which a cluster has the magic size that allows a boost to the
// FTable by kFTableBoostMargin, thus allowing clusters near the
// magic size (equal to the number of sample characters) to be more
// likely to stay together.
const double kMagicSampleMargin = 0.0625;
const double kFTableBoostMargin = 2.0;
int N = Clusterer->SampleSize;
CLUSTER *Left = Cluster->Left;
CLUSTER *Right = Cluster->Right;
if (Left == nullptr || Right == nullptr) {
return nullptr;
}
int TotalDims = Left->SampleCount + Right->SampleCount;
if (TotalDims < N + 1 || TotalDims < 2) {
return nullptr;
}
std::vector<float> Covariance(static_cast<size_t>(N) * N);
std::vector<float> Inverse(static_cast<size_t>(N) * N);
std::vector<float> Delta(N);
// Compute a new covariance matrix that only uses essential features.
for (int i = 0; i < N; ++i) {
int row_offset = i * N;
if (!Clusterer->ParamDesc[i].NonEssential) {
for (int j = 0; j < N; ++j) {
if (!Clusterer->ParamDesc[j].NonEssential) {
Covariance[j + row_offset] = Statistics->CoVariance[j + row_offset];
} else {
Covariance[j + row_offset] = 0.0f;
}
}
} else {
for (int j = 0; j < N; ++j) {
if (i == j) {
Covariance[j + row_offset] = 1.0f;
} else {
Covariance[j + row_offset] = 0.0f;
}
}
}
}
double err = InvertMatrix(&Covariance[0], N, &Inverse[0]);
if (err > 1) {
tprintf("Clustering error: Matrix inverse failed with error %g\n", err);
}
int EssentialN = 0;
for (int dim = 0; dim < N; ++dim) {
if (!Clusterer->ParamDesc[dim].NonEssential) {
Delta[dim] = Left->Mean[dim] - Right->Mean[dim];
++EssentialN;
} else {
Delta[dim] = 0.0f;
}
}
// Compute Hotelling's T-squared.
double Tsq = 0.0;
for (int x = 0; x < N; ++x) {
double temp = 0.0;
for (int y = 0; y < N; ++y) {
temp += static_cast<double>(Inverse[y + N * x]) * Delta[y];
}
Tsq += Delta[x] * temp;
}
// Changed this function to match the formula in
// Statistical Methods in Medical Research p 473
// By Peter Armitage, Geoffrey Berry, J. N. S. Matthews.
// Tsq *= Left->SampleCount * Right->SampleCount / TotalDims;
double F = Tsq * (TotalDims - EssentialN - 1) / ((TotalDims - 2) * EssentialN);
int Fx = EssentialN;
if (Fx > FTABLE_X) {
Fx = FTABLE_X;
}
--Fx;
int Fy = TotalDims - EssentialN - 1;
if (Fy > FTABLE_Y) {
Fy = FTABLE_Y;
}
--Fy;
double FTarget = FTable[Fy][Fx];
if (Config->MagicSamples > 0 && TotalDims >= Config->MagicSamples * (1.0 - kMagicSampleMargin) &&
TotalDims <= Config->MagicSamples * (1.0 + kMagicSampleMargin)) {
// Give magic-sized clusters a magic FTable boost.
FTarget += kFTableBoostMargin;
}
if (F < FTarget) {
return NewEllipticalProto(Clusterer->SampleSize, Cluster, Statistics);
}
return nullptr;
}
/**
* This routine tests the specified cluster to see if it can
* be approximated by a spherical normal distribution. If it
* can be, then a new prototype is formed and returned to the
* caller. If it can't be, then nullptr is returned to the caller.
* @param Clusterer data struct containing samples being clustered
* @param Cluster cluster to be made into a spherical prototype
* @param Statistics statistical info about cluster
* @param Buckets histogram struct used to analyze distribution
* @return Pointer to new spherical prototype or nullptr.
*/
static PROTOTYPE *MakeSphericalProto(CLUSTERER *Clusterer, CLUSTER *Cluster, STATISTICS *Statistics,
BUCKETS *Buckets) {
PROTOTYPE *Proto = nullptr;
int i;
// check that each dimension is a normal distribution
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential) {
continue;
}
FillBuckets(Buckets, Cluster, i, &(Clusterer->ParamDesc[i]), Cluster->Mean[i],
sqrt(static_cast<double>(Statistics->AvgVariance)));
if (!DistributionOK(Buckets)) {
break;
}
}
// if all dimensions matched a normal distribution, make a proto
if (i >= Clusterer->SampleSize) {
Proto = NewSphericalProto(Clusterer->SampleSize, Cluster, Statistics);
}
return (Proto);
} // MakeSphericalProto
/**
* This routine tests the specified cluster to see if it can
* be approximated by an elliptical normal distribution. If it
* can be, then a new prototype is formed and returned to the
* caller. If it can't be, then nullptr is returned to the caller.
* @param Clusterer data struct containing samples being clustered
* @param Cluster cluster to be made into an elliptical prototype
* @param Statistics statistical info about cluster
* @param Buckets histogram struct used to analyze distribution
* @return Pointer to new elliptical prototype or nullptr.
*/
static PROTOTYPE *MakeEllipticalProto(CLUSTERER *Clusterer, CLUSTER *Cluster,
STATISTICS *Statistics, BUCKETS *Buckets) {
PROTOTYPE *Proto = nullptr;
int i;
// check that each dimension is a normal distribution
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential) {
continue;
}
FillBuckets(Buckets, Cluster, i, &(Clusterer->ParamDesc[i]), Cluster->Mean[i],
sqrt(static_cast<double>(Statistics->CoVariance[i * (Clusterer->SampleSize + 1)])));
if (!DistributionOK(Buckets)) {
break;
}
}
// if all dimensions matched a normal distribution, make a proto
if (i >= Clusterer->SampleSize) {
Proto = NewEllipticalProto(Clusterer->SampleSize, Cluster, Statistics);
}
return (Proto);
} // MakeEllipticalProto
/**
* This routine tests each dimension of the specified cluster to
* see what distribution would best approximate that dimension.
* Each dimension is compared to the following distributions
* in order: normal, random, uniform. If each dimension can
* be represented by one of these distributions,
* then a new prototype is formed and returned to the
* caller. If it can't be, then nullptr is returned to the caller.
* @param Clusterer data struct containing samples being clustered
* @param Cluster cluster to be made into a prototype
* @param Statistics statistical info about cluster
* @param NormalBuckets histogram struct used to analyze distribution
* @param Confidence confidence level for alternate distributions
* @return Pointer to new mixed prototype or nullptr.
*/
static PROTOTYPE *MakeMixedProto(CLUSTERER *Clusterer, CLUSTER *Cluster, STATISTICS *Statistics,
BUCKETS *NormalBuckets, double Confidence) {
PROTOTYPE *Proto;
int i;
BUCKETS *UniformBuckets = nullptr;
BUCKETS *RandomBuckets = nullptr;
// create a mixed proto to work on - initially assume all dimensions normal
Proto = NewMixedProto(Clusterer->SampleSize, Cluster, Statistics);
// find the proper distribution for each dimension
for (i = 0; i < Clusterer->SampleSize; i++) {
if (Clusterer->ParamDesc[i].NonEssential) {
continue;
}
FillBuckets(NormalBuckets, Cluster, i, &(Clusterer->ParamDesc[i]), Proto->Mean[i],
std::sqrt(Proto->Variance.Elliptical[i]));
if (DistributionOK(NormalBuckets)) {
continue;
}
if (RandomBuckets == nullptr) {
RandomBuckets = GetBuckets(Clusterer, D_random, Cluster->SampleCount, Confidence);
}
MakeDimRandom(i, Proto, &(Clusterer->ParamDesc[i]));
FillBuckets(RandomBuckets, Cluster, i, &(Clusterer->ParamDesc[i]), Proto->Mean[i],
Proto->Variance.Elliptical[i]);
if (DistributionOK(RandomBuckets)) {
continue;
}
if (UniformBuckets == nullptr) {
UniformBuckets = GetBuckets(Clusterer, uniform, Cluster->SampleCount, Confidence);
}
MakeDimUniform(i, Proto, Statistics);
FillBuckets(UniformBuckets, Cluster, i, &(Clusterer->ParamDesc[i]), Proto->Mean[i],
Proto->Variance.Elliptical[i]);
if (DistributionOK(UniformBuckets)) {
continue;
}
break;
}
// if any dimension failed to match a distribution, discard the proto
if (i < Clusterer->SampleSize) {
FreePrototype(Proto);
Proto = nullptr;
}
return (Proto);
} // MakeMixedProto
/**
* This routine alters the ith dimension of the specified
* mixed prototype to be D_random.
* @param i index of dimension to be changed
* @param Proto prototype whose dimension is to be altered
* @param ParamDesc description of specified dimension
*/
static void MakeDimRandom(uint16_t i, PROTOTYPE *Proto, PARAM_DESC *ParamDesc) {
Proto->Distrib[i] = D_random;
Proto->Mean[i] = ParamDesc->MidRange;
Proto->Variance.Elliptical[i] = ParamDesc->HalfRange;
// subtract out the previous magnitude of this dimension from the total
Proto->TotalMagnitude /= Proto->Magnitude.Elliptical[i];
Proto->Magnitude.Elliptical[i] = 1.0 / ParamDesc->Range;
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
// note that the proto Weight is irrelevant for D_random protos
} // MakeDimRandom
/**
* This routine alters the ith dimension of the specified
* mixed prototype to be uniform.
* @param i index of dimension to be changed
* @param Proto prototype whose dimension is to be altered
* @param Statistics statistical info about prototype
*/
static void MakeDimUniform(uint16_t i, PROTOTYPE *Proto, STATISTICS *Statistics) {
Proto->Distrib[i] = uniform;
Proto->Mean[i] = Proto->Cluster->Mean[i] + (Statistics->Min[i] + Statistics->Max[i]) / 2;
Proto->Variance.Elliptical[i] = (Statistics->Max[i] - Statistics->Min[i]) / 2;
if (Proto->Variance.Elliptical[i] < MINVARIANCE) {
Proto->Variance.Elliptical[i] = MINVARIANCE;
}
// subtract out the previous magnitude of this dimension from the total
Proto->TotalMagnitude /= Proto->Magnitude.Elliptical[i];
Proto->Magnitude.Elliptical[i] = 1.0 / (2.0 * Proto->Variance.Elliptical[i]);
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
// note that the proto Weight is irrelevant for uniform protos
} // MakeDimUniform
/**
* This routine searches the cluster tree for all leaf nodes
* which are samples in the specified cluster. It computes
* a full covariance matrix for these samples as well as
* keeping track of the ranges (min and max) for each
* dimension. A special data structure is allocated to
* return this information to the caller. An incremental
* algorithm for computing statistics is not used because
* it will not work with circular dimensions.
* @param N number of dimensions
* @param ParamDesc array of dimension descriptions
* @param Cluster cluster whose stats are to be computed
* @return Pointer to new data structure containing statistics
*/
static STATISTICS *ComputeStatistics(int16_t N, PARAM_DESC ParamDesc[], CLUSTER *Cluster) {
int i, j;
LIST SearchState;
SAMPLE *Sample;
uint32_t SampleCountAdjustedForBias;
// allocate memory to hold the statistics results
auto Statistics = new STATISTICS(N);
// allocate temporary memory to hold the sample to mean distances
std::vector<float> Distance(N);
// find each sample in the cluster and merge it into the statistics
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample(&SearchState)) != nullptr) {
for (i = 0; i < N; i++) {
Distance[i] = Sample->Mean[i] - Cluster->Mean[i];
if (ParamDesc[i].Circular) {
if (Distance[i] > ParamDesc[i].HalfRange) {
Distance[i] -= ParamDesc[i].Range;
}
if (Distance[i] < -ParamDesc[i].HalfRange) {
Distance[i] += ParamDesc[i].Range;
}
}
if (Distance[i] < Statistics->Min[i]) {
Statistics->Min[i] = Distance[i];
}
if (Distance[i] > Statistics->Max[i]) {
Statistics->Max[i] = Distance[i];
}
}
auto CoVariance = &Statistics->CoVariance[0];
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++, CoVariance++) {
*CoVariance += Distance[i] * Distance[j];
}
}
}
// normalize the variances by the total number of samples
// use SampleCount-1 instead of SampleCount to get an unbiased estimate
// also compute the geometic mean of the diagonal variances
// ensure that clusters with only 1 sample are handled correctly
if (Cluster->SampleCount > 1) {
SampleCountAdjustedForBias = Cluster->SampleCount - 1;
} else {
SampleCountAdjustedForBias = 1;
}
auto CoVariance = &Statistics->CoVariance[0];
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++, CoVariance++) {
*CoVariance /= SampleCountAdjustedForBias;
if (j == i) {
if (*CoVariance < MINVARIANCE) {
*CoVariance = MINVARIANCE;
}
Statistics->AvgVariance *= *CoVariance;
}
}
}
Statistics->AvgVariance =
static_cast<float>(pow(static_cast<double>(Statistics->AvgVariance), 1.0 / N));
return Statistics;
} // ComputeStatistics
/**
* This routine creates a spherical prototype data structure to
* approximate the samples in the specified cluster.
* Spherical prototypes have a single variance which is
* common across all dimensions. All dimensions are normally
* distributed and independent.
* @param N number of dimensions
* @param Cluster cluster to be made into a spherical prototype
* @param Statistics statistical info about samples in cluster
* @return Pointer to a new spherical prototype data structure
*/
static PROTOTYPE *NewSphericalProto(uint16_t N, CLUSTER *Cluster, STATISTICS *Statistics) {
PROTOTYPE *Proto;
Proto = NewSimpleProto(N, Cluster);
Proto->Variance.Spherical = Statistics->AvgVariance;
if (Proto->Variance.Spherical < MINVARIANCE) {
Proto->Variance.Spherical = MINVARIANCE;
}
Proto->Magnitude.Spherical = 1.0 / sqrt(2.0 * M_PI * Proto->Variance.Spherical);
Proto->TotalMagnitude = static_cast<float>(
pow(static_cast<double>(Proto->Magnitude.Spherical), static_cast<double>(N)));
Proto->Weight.Spherical = 1.0 / Proto->Variance.Spherical;
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
return (Proto);
} // NewSphericalProto
/**
* This routine creates an elliptical prototype data structure to
* approximate the samples in the specified cluster.
* Elliptical prototypes have a variance for each dimension.
* All dimensions are normally distributed and independent.
* @param N number of dimensions
* @param Cluster cluster to be made into an elliptical prototype
* @param Statistics statistical info about samples in cluster
* @return Pointer to a new elliptical prototype data structure
*/
static PROTOTYPE *NewEllipticalProto(int16_t N, CLUSTER *Cluster, STATISTICS *Statistics) {
PROTOTYPE *Proto;
int i;
Proto = NewSimpleProto(N, Cluster);
Proto->Variance.Elliptical = new float[N];
Proto->Magnitude.Elliptical = new float[N];
Proto->Weight.Elliptical = new float[N];
auto CoVariance = &Statistics->CoVariance[0];
Proto->TotalMagnitude = 1.0;
for (i = 0; i < N; i++, CoVariance += N + 1) {
Proto->Variance.Elliptical[i] = *CoVariance;
if (Proto->Variance.Elliptical[i] < MINVARIANCE) {
Proto->Variance.Elliptical[i] = MINVARIANCE;
}
Proto->Magnitude.Elliptical[i] = 1.0f / sqrt(2.0f * M_PI * Proto->Variance.Elliptical[i]);
Proto->Weight.Elliptical[i] = 1.0f / Proto->Variance.Elliptical[i];
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
}
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
Proto->Style = elliptical;
return (Proto);
} // NewEllipticalProto
/**
* This routine creates a mixed prototype data structure to
* approximate the samples in the specified cluster.
* Mixed prototypes can have different distributions for
* each dimension. All dimensions are independent. The
* structure is initially filled in as though it were an
* elliptical prototype. The actual distributions of the
* dimensions can be altered by other routines.
* @param N number of dimensions
* @param Cluster cluster to be made into a mixed prototype
* @param Statistics statistical info about samples in cluster
* @return Pointer to a new mixed prototype data structure
*/
static PROTOTYPE *NewMixedProto(int16_t N, CLUSTER *Cluster, STATISTICS *Statistics) {
auto Proto = NewEllipticalProto(N, Cluster, Statistics);
Proto->Distrib.clear();
Proto->Distrib.resize(N, normal);
Proto->Style = mixed;
return Proto;
} // NewMixedProto
/**
* This routine allocates memory to hold a simple prototype
* data structure, i.e. one without independent distributions
* and variances for each dimension.
* @param N number of dimensions
* @param Cluster cluster to be made into a prototype
* @return Pointer to new simple prototype
*/
static PROTOTYPE *NewSimpleProto(int16_t N, CLUSTER *Cluster) {
auto Proto = new PROTOTYPE;
Proto->Mean = Cluster->Mean;
Proto->Distrib.clear();
Proto->Significant = true;
Proto->Merged = false;
Proto->Style = spherical;
Proto->NumSamples = Cluster->SampleCount;
Proto->Cluster = Cluster;
Proto->Cluster->Prototype = true;
return Proto;
} // NewSimpleProto
/**
* This routine returns true if the specified covariance
* matrix indicates that all N dimensions are independent of
* one another. One dimension is judged to be independent of
* another when the magnitude of the corresponding correlation
* coefficient is
* less than the specified Independence factor. The
* correlation coefficient is calculated as: (see Duda and
* Hart, pg. 247)
* coeff[ij] = stddev[ij] / sqrt (stddev[ii] * stddev[jj])
* The covariance matrix is assumed to be symmetric (which
* should always be true).
* @param ParamDesc descriptions of each feature space dimension
* @param N number of dimensions
* @param CoVariance ptr to a covariance matrix
* @param Independence max off-diagonal correlation coefficient
* @return true if dimensions are independent, false otherwise
*/
static bool Independent(PARAM_DESC *ParamDesc, int16_t N, float *CoVariance, float Independence) {
int i, j;
float *VARii; // points to ith on-diagonal element
float *VARjj; // points to jth on-diagonal element
float CorrelationCoeff;
VARii = CoVariance;
for (i = 0; i < N; i++, VARii += N + 1) {
if (ParamDesc[i].NonEssential) {
continue;
}
VARjj = VARii + N + 1;
CoVariance = VARii + 1;
for (j = i + 1; j < N; j++, CoVariance++, VARjj += N + 1) {
if (ParamDesc[j].NonEssential) {
continue;
}
if ((*VARii == 0.0) || (*VARjj == 0.0)) {
CorrelationCoeff = 0.0;
} else {
CorrelationCoeff = sqrt(std::sqrt(*CoVariance * *CoVariance / (*VARii * *VARjj)));
}
if (CorrelationCoeff > Independence) {
return false;
}
}
}
return true;
} // Independent
/**
* This routine returns a histogram data structure which can
* be used by other routines to place samples into histogram
* buckets, and then apply a goodness of fit test to the
* histogram data to determine if the samples belong to the
* specified probability distribution. The routine keeps
* a list of bucket data structures which have already been
* created so that it minimizes the computation time needed
* to create a new bucket.
* @param clusterer which keeps a bucket_cache for us.
* @param Distribution type of probability distribution to test for
* @param SampleCount number of samples that are available
* @param Confidence probability of a Type I error
* @return Bucket data structure
*/
static BUCKETS *GetBuckets(CLUSTERER *clusterer, DISTRIBUTION Distribution, uint32_t SampleCount,
double Confidence) {
// Get an old bucket structure with the same number of buckets.
uint16_t NumberOfBuckets = OptimumNumberOfBuckets(SampleCount);
BUCKETS *Buckets = clusterer->bucket_cache[Distribution][NumberOfBuckets - MINBUCKETS];
// If a matching bucket structure is not found, make one and save it.
if (Buckets == nullptr) {
Buckets = MakeBuckets(Distribution, SampleCount, Confidence);
clusterer->bucket_cache[Distribution][NumberOfBuckets - MINBUCKETS] = Buckets;
} else {
// Just adjust the existing buckets.
if (SampleCount != Buckets->SampleCount) {
AdjustBuckets(Buckets, SampleCount);
}
if (Confidence != Buckets->Confidence) {
Buckets->Confidence = Confidence;
Buckets->ChiSquared =
ComputeChiSquared(DegreesOfFreedom(Distribution, Buckets->NumberOfBuckets), Confidence);
}
InitBuckets(Buckets);
}
return Buckets;
} // GetBuckets
/**
* This routine creates a histogram data structure which can
* be used by other routines to place samples into histogram
* buckets, and then apply a goodness of fit test to the
* histogram data to determine if the samples belong to the
* specified probability distribution. The buckets are
* allocated in such a way that the expected frequency of
* samples in each bucket is approximately the same. In
* order to make this possible, a mapping table is
* computed which maps "normalized" samples into the
* appropriate bucket.
* @param Distribution type of probability distribution to test for
* @param SampleCount number of samples that are available
* @param Confidence probability of a Type I error
* @return Pointer to new histogram data structure
*/
static BUCKETS *MakeBuckets(DISTRIBUTION Distribution, uint32_t SampleCount, double Confidence) {
const DENSITYFUNC DensityFunction[] = {NormalDensity, UniformDensity, UniformDensity};
int i, j;
double BucketProbability;
double NextBucketBoundary;
double Probability;
double ProbabilityDelta;
double LastProbDensity;
double ProbDensity;
uint16_t CurrentBucket;
bool Symmetrical;
// allocate memory needed for data structure
auto Buckets = new BUCKETS(OptimumNumberOfBuckets(SampleCount));
Buckets->SampleCount = SampleCount;
Buckets->Confidence = Confidence;
// initialize simple fields
Buckets->Distribution = Distribution;
// all currently defined distributions are symmetrical
Symmetrical = true;
Buckets->ChiSquared =
ComputeChiSquared(DegreesOfFreedom(Distribution, Buckets->NumberOfBuckets), Confidence);
if (Symmetrical) {
// allocate buckets so that all have approx. equal probability
BucketProbability = 1.0 / static_cast<double>(Buckets->NumberOfBuckets);
// distribution is symmetric so fill in upper half then copy
CurrentBucket = Buckets->NumberOfBuckets / 2;
if (Odd(Buckets->NumberOfBuckets)) {
NextBucketBoundary = BucketProbability / 2;
} else {
NextBucketBoundary = BucketProbability;
}
Probability = 0.0;
LastProbDensity = (*DensityFunction[static_cast<int>(Distribution)])(BUCKETTABLESIZE / 2);
for (i = BUCKETTABLESIZE / 2; i < BUCKETTABLESIZE; i++) {
ProbDensity = (*DensityFunction[static_cast<int>(Distribution)])(i + 1);
ProbabilityDelta = Integral(LastProbDensity, ProbDensity, 1.0);
Probability += ProbabilityDelta;
if (Probability > NextBucketBoundary) {
if (CurrentBucket < Buckets->NumberOfBuckets - 1) {
CurrentBucket++;
}
NextBucketBoundary += BucketProbability;
}
Buckets->Bucket[i] = CurrentBucket;
Buckets->ExpectedCount[CurrentBucket] += static_cast<float>(ProbabilityDelta * SampleCount);
LastProbDensity = ProbDensity;
}
// place any leftover probability into the last bucket
Buckets->ExpectedCount[CurrentBucket] += static_cast<float>((0.5 - Probability) * SampleCount);
// copy upper half of distribution to lower half
for (i = 0, j = BUCKETTABLESIZE - 1; i < j; i++, j--) {
Buckets->Bucket[i] = Mirror(Buckets->Bucket[j], Buckets->NumberOfBuckets);
}
// copy upper half of expected counts to lower half
for (i = 0, j = Buckets->NumberOfBuckets - 1; i <= j; i++, j--) {
Buckets->ExpectedCount[i] += Buckets->ExpectedCount[j];
}
}
return Buckets;
} // MakeBuckets
/**
* This routine computes the optimum number of histogram
* buckets that should be used in a chi-squared goodness of
* fit test for the specified number of samples. The optimum
* number is computed based on Table 4.1 on pg. 147 of
* "Measurement and Analysis of Random Data" by Bendat & Piersol.
* Linear interpolation is used to interpolate between table
* values. The table is intended for a 0.05 level of
* significance (alpha). This routine assumes that it is
* equally valid for other alpha's, which may not be true.
* @param SampleCount number of samples to be tested
* @return Optimum number of histogram buckets
*/
static uint16_t OptimumNumberOfBuckets(uint32_t SampleCount) {
uint8_t Last, Next;
float Slope;
if (SampleCount < kCountTable[0]) {
return kBucketsTable[0];
}
for (Last = 0, Next = 1; Next < LOOKUPTABLESIZE; Last++, Next++) {
if (SampleCount <= kCountTable[Next]) {
Slope = static_cast<float>(kBucketsTable[Next] - kBucketsTable[Last]) /
static_cast<float>(kCountTable[Next] - kCountTable[Last]);
return (
static_cast<uint16_t>(kBucketsTable[Last] + Slope * (SampleCount - kCountTable[Last])));
}
}
return kBucketsTable[Last];
} // OptimumNumberOfBuckets
/**
* This routine computes the chi-squared value which will
* leave a cumulative probability of Alpha in the right tail
* of a chi-squared distribution with the specified number of
* degrees of freedom. Alpha must be between 0 and 1.
* DegreesOfFreedom must be even. The routine maintains an
* array of lists. Each list corresponds to a different
* number of degrees of freedom. Each entry in the list
* corresponds to a different alpha value and its corresponding
* chi-squared value. Therefore, once a particular chi-squared
* value is computed, it is stored in the list and never
* needs to be computed again.
* @param DegreesOfFreedom determines shape of distribution
* @param Alpha probability of right tail
* @return Desired chi-squared value
*/
static double ComputeChiSquared(uint16_t DegreesOfFreedom, double Alpha)
#define CHIACCURACY 0.01
#define MINALPHA (1e-200)
{
static LIST ChiWith[MAXDEGREESOFFREEDOM + 1];
// limit the minimum alpha that can be used - if alpha is too small
// it may not be possible to compute chi-squared.
Alpha = ClipToRange(Alpha, MINALPHA, 1.0);
if (Odd(DegreesOfFreedom)) {
DegreesOfFreedom++;
}
/* find the list of chi-squared values which have already been computed
for the specified number of degrees of freedom. Search the list for
the desired chi-squared. */
CHISTRUCT SearchKey(0.0, Alpha);
auto *found = search(ChiWith[DegreesOfFreedom], &SearchKey, AlphaMatch);
auto OldChiSquared = reinterpret_cast<CHISTRUCT *>(found ? found->first_node() : nullptr);
if (OldChiSquared == nullptr) {
OldChiSquared = new CHISTRUCT(DegreesOfFreedom, Alpha);
OldChiSquared->ChiSquared =
Solve(ChiArea, OldChiSquared, static_cast<double>(DegreesOfFreedom), CHIACCURACY);
ChiWith[DegreesOfFreedom] = push(ChiWith[DegreesOfFreedom], OldChiSquared);
} else {
// further optimization might move OldChiSquared to front of list
}
return (OldChiSquared->ChiSquared);
} // ComputeChiSquared
/**
* This routine computes the probability density function
* of a discrete normal distribution defined by the global
* variables kNormalMean, kNormalVariance, and kNormalMagnitude.
* Normal magnitude could, of course, be computed in terms of
* the normal variance but it is precomputed for efficiency.
* @param x number to compute the normal probability density for
* @note Globals:
* kNormalMean mean of a discrete normal distribution
* kNormalVariance variance of a discrete normal distribution
* kNormalMagnitude magnitude of a discrete normal distribution
* @return The value of the normal distribution at x.
*/
static double NormalDensity(int32_t x) {
double Distance;
Distance = x - kNormalMean;
return kNormalMagnitude * exp(-0.5 * Distance * Distance / kNormalVariance);
} // NormalDensity
/**
* This routine computes the probability density function
* of a uniform distribution at the specified point. The
* range of the distribution is from 0 to BUCKETTABLESIZE.
* @param x number to compute the uniform probability density for
* @return The value of the uniform distribution at x.
*/
static double UniformDensity(int32_t x) {
constexpr auto UniformDistributionDensity = 1.0 / BUCKETTABLESIZE;
if ((x >= 0) && (x <= BUCKETTABLESIZE)) {
return UniformDistributionDensity;
} else {
return 0.0;
}
} // UniformDensity
/**
* This routine computes a trapezoidal approximation to the
* integral of a function over a small delta in x.
* @param f1 value of function at x1
* @param f2 value of function at x2
* @param Dx x2 - x1 (should always be positive)
* @return Approximation of the integral of the function from x1 to x2.
*/
static double Integral(double f1, double f2, double Dx) {
return (f1 + f2) * Dx / 2.0;
} // Integral
/**
* This routine counts the number of cluster samples which
* fall within the various histogram buckets in Buckets. Only
* one dimension of each sample is examined. The exact meaning
* of the Mean and StdDev parameters depends on the
* distribution which is being analyzed (this info is in the
* Buckets data structure). For normal distributions, Mean
* and StdDev have the expected meanings. For uniform and
* random distributions the Mean is the center point of the
* range and the StdDev is 1/2 the range. A dimension with
* zero standard deviation cannot be statistically analyzed.
* In this case, a pseudo-analysis is used.
* The Buckets data structure is filled in.
* @param Buckets histogram buckets to count samples
* @param Cluster cluster whose samples are being analyzed
* @param Dim dimension of samples which is being analyzed
* @param ParamDesc description of the dimension
* @param Mean "mean" of the distribution
* @param StdDev "standard deviation" of the distribution
*/
static void FillBuckets(BUCKETS *Buckets, CLUSTER *Cluster, uint16_t Dim, PARAM_DESC *ParamDesc,
float Mean, float StdDev) {
uint16_t BucketID;
int i;
LIST SearchState;
SAMPLE *Sample;
// initialize the histogram bucket counts to 0
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->Count[i] = 0;
}
if (StdDev == 0.0) {
/* if the standard deviation is zero, then we can't statistically
analyze the cluster. Use a pseudo-analysis: samples exactly on
the mean are distributed evenly across all buckets. Samples greater
than the mean are placed in the last bucket; samples less than the
mean are placed in the first bucket. */
InitSampleSearch(SearchState, Cluster);
i = 0;
while ((Sample = NextSample(&SearchState)) != nullptr) {
if (Sample->Mean[Dim] > Mean) {
BucketID = Buckets->NumberOfBuckets - 1;
} else if (Sample->Mean[Dim] < Mean) {
BucketID = 0;
} else {
BucketID = i;
}
Buckets->Count[BucketID] += 1;
i++;
if (i >= Buckets->NumberOfBuckets) {
i = 0;
}
}
} else {
// search for all samples in the cluster and add to histogram buckets
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample(&SearchState)) != nullptr) {
switch (Buckets->Distribution) {
case normal:
BucketID = NormalBucket(ParamDesc, Sample->Mean[Dim], Mean, StdDev);
break;
case D_random:
case uniform:
BucketID = UniformBucket(ParamDesc, Sample->Mean[Dim], Mean, StdDev);
break;
default:
BucketID = 0;
}
Buckets->Count[Buckets->Bucket[BucketID]] += 1;
}
}
} // FillBuckets
/**
* This routine determines which bucket x falls into in the
* discrete normal distribution defined by kNormalMean
* and kNormalStdDev. x values which exceed the range of
* the discrete distribution are clipped.
* @param ParamDesc used to identify circular dimensions
* @param x value to be normalized
* @param Mean mean of normal distribution
* @param StdDev standard deviation of normal distribution
* @return Bucket number into which x falls
*/
static uint16_t NormalBucket(PARAM_DESC *ParamDesc, float x, float Mean, float StdDev) {
float X;
// wraparound circular parameters if necessary
if (ParamDesc->Circular) {
if (x - Mean > ParamDesc->HalfRange) {
x -= ParamDesc->Range;
} else if (x - Mean < -ParamDesc->HalfRange) {
x += ParamDesc->Range;
}
}
X = ((x - Mean) / StdDev) * kNormalStdDev + kNormalMean;
if (X < 0) {
return 0;
}
if (X > BUCKETTABLESIZE - 1) {
return (static_cast<uint16_t>(BUCKETTABLESIZE - 1));
}
return static_cast<uint16_t>(floor(static_cast<double>(X)));
} // NormalBucket
/**
* This routine determines which bucket x falls into in the
* discrete uniform distribution defined by
* BUCKETTABLESIZE. x values which exceed the range of
* the discrete distribution are clipped.
* @param ParamDesc used to identify circular dimensions
* @param x value to be normalized
* @param Mean center of range of uniform distribution
* @param StdDev 1/2 the range of the uniform distribution
* @return Bucket number into which x falls
*/
static uint16_t UniformBucket(PARAM_DESC *ParamDesc, float x, float Mean, float StdDev) {
float X;
// wraparound circular parameters if necessary
if (ParamDesc->Circular) {
if (x - Mean > ParamDesc->HalfRange) {
x -= ParamDesc->Range;
} else if (x - Mean < -ParamDesc->HalfRange) {
x += ParamDesc->Range;
}
}
X = ((x - Mean) / (2 * StdDev) * BUCKETTABLESIZE + BUCKETTABLESIZE / 2.0);
if (X < 0) {
return 0;
}
if (X > BUCKETTABLESIZE - 1) {
return static_cast<uint16_t>(BUCKETTABLESIZE - 1);
}
return static_cast<uint16_t>(floor(static_cast<double>(X)));
} // UniformBucket
/**
* This routine performs a chi-square goodness of fit test
* on the histogram data in the Buckets data structure.
* true is returned if the histogram matches the probability
* distribution which was specified when the Buckets
* structure was originally created. Otherwise false is
* returned.
* @param Buckets histogram data to perform chi-square test on
* @return true if samples match distribution, false otherwise
*/
static bool DistributionOK(BUCKETS *Buckets) {
float FrequencyDifference;
float TotalDifference;
int i;
// compute how well the histogram matches the expected histogram
TotalDifference = 0.0;
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
FrequencyDifference = Buckets->Count[i] - Buckets->ExpectedCount[i];
TotalDifference += (FrequencyDifference * FrequencyDifference) / Buckets->ExpectedCount[i];
}
// test to see if the difference is more than expected
if (TotalDifference > Buckets->ChiSquared) {
return false;
} else {
return true;
}
} // DistributionOK
/**
* This routine computes the degrees of freedom that should
* be used in a chi-squared test with the specified number of
* histogram buckets. The result is always rounded up to
* the next even number so that the value of chi-squared can be
* computed more easily. This will cause the value of
* chi-squared to be higher than the optimum value, resulting
* in the chi-square test being more lenient than optimum.
* @param Distribution distribution being tested for
* @param HistogramBuckets number of buckets in chi-square test
* @return The number of degrees of freedom for a chi-square test
*/
static uint16_t DegreesOfFreedom(DISTRIBUTION Distribution, uint16_t HistogramBuckets) {
static uint8_t DegreeOffsets[] = {3, 3, 1};
uint16_t AdjustedNumBuckets;
AdjustedNumBuckets = HistogramBuckets - DegreeOffsets[static_cast<int>(Distribution)];
if (Odd(AdjustedNumBuckets)) {
AdjustedNumBuckets++;
}
return (AdjustedNumBuckets);
} // DegreesOfFreedom
/**
* This routine multiplies each ExpectedCount histogram entry
* by NewSampleCount/OldSampleCount so that the histogram
* is now adjusted to the new sample count.
* @param Buckets histogram data structure to adjust
* @param NewSampleCount new sample count to adjust to
*/
static void AdjustBuckets(BUCKETS *Buckets, uint32_t NewSampleCount) {
int i;
double AdjustFactor;
AdjustFactor =
((static_cast<double>(NewSampleCount)) / (static_cast<double>(Buckets->SampleCount)));
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->ExpectedCount[i] *= AdjustFactor;
}
Buckets->SampleCount = NewSampleCount;
} // AdjustBuckets
/**
* This routine sets the bucket counts in the specified histogram
* to zero.
* @param Buckets histogram data structure to init
*/
static void InitBuckets(BUCKETS *Buckets) {
int i;
for (i = 0; i < Buckets->NumberOfBuckets; i++) {
Buckets->Count[i] = 0;
}
} // InitBuckets
/**
* This routine is used to search a list of structures which
* hold pre-computed chi-squared values for a chi-squared
* value whose corresponding alpha field matches the alpha
* field of SearchKey.
*
* It is called by the list search routines.
*
* @param arg1 chi-squared struct being tested for a match
* @param arg2 chi-squared struct that is the search key
* @return true if ChiStruct's Alpha matches SearchKey's Alpha
*/
static int AlphaMatch(void *arg1, // CHISTRUCT *ChiStruct,
void *arg2) { // CHISTRUCT *SearchKey)
auto *ChiStruct = static_cast<CHISTRUCT *>(arg1);
auto *SearchKey = static_cast<CHISTRUCT *>(arg2);
return (ChiStruct->Alpha == SearchKey->Alpha);
} // AlphaMatch
/**
* This routine attempts to find an x value at which Function
* goes to zero (i.e. a root of the function). It will only
* work correctly if a solution actually exists and there
* are no extrema between the solution and the InitialGuess.
* The algorithms used are extremely primitive.
*
* @param Function function whose zero is to be found
* @param FunctionParams arbitrary data to pass to function
* @param InitialGuess point to start solution search at
* @param Accuracy maximum allowed error
* @return Solution of function (x for which f(x) = 0).
*/
static double Solve(SOLVEFUNC Function, void *FunctionParams, double InitialGuess, double Accuracy)
#define INITIALDELTA 0.1
#define DELTARATIO 0.1
{
double x;
double f;
double Slope;
double Delta;
double NewDelta;
double xDelta;
double LastPosX, LastNegX;
x = InitialGuess;
Delta = INITIALDELTA;
LastPosX = FLT_MAX;
LastNegX = -FLT_MAX;
f = (*Function)(static_cast<CHISTRUCT *>(FunctionParams), x);
while (Abs(LastPosX - LastNegX) > Accuracy) {
// keep track of outer bounds of current estimate
if (f < 0) {
LastNegX = x;
} else {
LastPosX = x;
}
// compute the approx. slope of f(x) at the current point
Slope = ((*Function)(static_cast<CHISTRUCT *>(FunctionParams), x + Delta) - f) / Delta;
// compute the next solution guess */
xDelta = f / Slope;
x -= xDelta;
// reduce the delta used for computing slope to be a fraction of
// the amount moved to get to the new guess
NewDelta = Abs(xDelta) * DELTARATIO;
if (NewDelta < Delta) {
Delta = NewDelta;
}
// compute the value of the function at the new guess
f = (*Function)(static_cast<CHISTRUCT *>(FunctionParams), x);
}
return (x);
} // Solve
/**
* This routine computes the area under a chi density curve
* from 0 to x, minus the desired area under the curve. The
* number of degrees of freedom of the chi curve is specified
* in the ChiParams structure. The desired area is also
* specified in the ChiParams structure as Alpha (or 1 minus
* the desired area). This routine is intended to be passed
* to the Solve() function to find the value of chi-squared
* which will yield a desired area under the right tail of
* the chi density curve. The function will only work for
* even degrees of freedom. The equations are based on
* integrating the chi density curve in parts to obtain
* a series that can be used to compute the area under the
* curve.
* @param ChiParams contains degrees of freedom and alpha
* @param x value of chi-squared to evaluate
* @return Error between actual and desired area under the chi curve.
*/
static double ChiArea(CHISTRUCT *ChiParams, double x) {
int i, N;
double SeriesTotal;
double Denominator;
double PowerOfx;
N = ChiParams->DegreesOfFreedom / 2 - 1;
SeriesTotal = 1;
Denominator = 1;
PowerOfx = 1;
for (i = 1; i <= N; i++) {
Denominator *= 2 * i;
PowerOfx *= x;
SeriesTotal += PowerOfx / Denominator;
}
return ((SeriesTotal * exp(-0.5 * x)) - ChiParams->Alpha);
} // ChiArea
/**
* This routine looks at all samples in the specified cluster.
* It computes a running estimate of the percentage of the
* characters which have more than 1 sample in the cluster.
* When this percentage exceeds MaxIllegal, true is returned.
* Otherwise false is returned. The CharID
* fields must contain integers which identify the training
* characters which were used to generate the sample. One
* integer is used for each sample. The NumChar field in
* the Clusterer must contain the number of characters in the
* training set. All CharID fields must be between 0 and
* NumChar-1. The main function of this routine is to help
* identify clusters which need to be split further, i.e. if
* numerous training characters have 2 or more features which are
* contained in the same cluster, then the cluster should be
* split.
*
* @param Clusterer data structure holding cluster tree
* @param Cluster cluster containing samples to be tested
* @param MaxIllegal max percentage of samples allowed to have
* more than 1 feature in the cluster
* @return true if the cluster should be split, false otherwise.
*/
static bool MultipleCharSamples(CLUSTERER *Clusterer, CLUSTER *Cluster, float MaxIllegal)
#define ILLEGAL_CHAR 2
{
static std::vector<uint8_t> CharFlags;
LIST SearchState;
SAMPLE *Sample;
int32_t CharID;
int32_t NumCharInCluster;
int32_t NumIllegalInCluster;
float PercentIllegal;
// initial estimate assumes that no illegal chars exist in the cluster
NumCharInCluster = Cluster->SampleCount;
NumIllegalInCluster = 0;
if (Clusterer->NumChar > CharFlags.size()) {
CharFlags.resize(Clusterer->NumChar);
}
for (auto &CharFlag : CharFlags) {
CharFlag = false;
}
// find each sample in the cluster and check if we have seen it before
InitSampleSearch(SearchState, Cluster);
while ((Sample = NextSample(&SearchState)) != nullptr) {
CharID = Sample->CharID;
if (CharFlags[CharID] == false) {
CharFlags[CharID] = true;
} else {
if (CharFlags[CharID] == true) {
NumIllegalInCluster++;
CharFlags[CharID] = ILLEGAL_CHAR;
}
NumCharInCluster--;
PercentIllegal = static_cast<float>(NumIllegalInCluster) / NumCharInCluster;
if (PercentIllegal > MaxIllegal) {
destroy(SearchState);
return true;
}
}
}
return false;
} // MultipleCharSamples
/**
* Compute the inverse of a matrix using LU decomposition with partial pivoting.
* The return value is the sum of norms of the off-diagonal terms of the
* product of a and inv. (A measure of the error.)
*/
static double InvertMatrix(const float *input, int size, float *inv) {
// Allocate memory for the 2D arrays.
GENERIC_2D_ARRAY<double> U(size, size, 0.0);
GENERIC_2D_ARRAY<double> U_inv(size, size, 0.0);
GENERIC_2D_ARRAY<double> L(size, size, 0.0);
// Initialize the working matrices. U starts as input, L as I and U_inv as O.
int row;
int col;
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
U[row][col] = input[row * size + col];
L[row][col] = row == col ? 1.0 : 0.0;
U_inv[row][col] = 0.0;
}
}
// Compute forward matrix by inversion by LU decomposition of input.
for (col = 0; col < size; ++col) {
// Find best pivot
int best_row = 0;
double best_pivot = -1.0;
for (row = col; row < size; ++row) {
if (Abs(U[row][col]) > best_pivot) {
best_pivot = Abs(U[row][col]);
best_row = row;
}
}
// Exchange pivot rows.
if (best_row != col) {
for (int k = 0; k < size; ++k) {
double tmp = U[best_row][k];
U[best_row][k] = U[col][k];
U[col][k] = tmp;
tmp = L[best_row][k];
L[best_row][k] = L[col][k];
L[col][k] = tmp;
}
}
// Now do the pivot itself.
for (row = col + 1; row < size; ++row) {
double ratio = -U[row][col] / U[col][col];
for (int j = col; j < size; ++j) {
U[row][j] += U[col][j] * ratio;
}
for (int k = 0; k < size; ++k) {
L[row][k] += L[col][k] * ratio;
}
}
}
// Next invert U.
for (col = 0; col < size; ++col) {
U_inv[col][col] = 1.0 / U[col][col];
for (row = col - 1; row >= 0; --row) {
double total = 0.0;
for (int k = col; k > row; --k) {
total += U[row][k] * U_inv[k][col];
}
U_inv[row][col] = -total / U[row][row];
}
}
// Now the answer is U_inv.L.
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
double sum = 0.0;
for (int k = row; k < size; ++k) {
sum += U_inv[row][k] * L[k][col];
}
inv[row * size + col] = sum;
}
}
// Check matrix product.
double error_sum = 0.0;
for (row = 0; row < size; row++) {
for (col = 0; col < size; col++) {
double sum = 0.0;
for (int k = 0; k < size; ++k) {
sum += static_cast<double>(input[row * size + k]) * inv[k * size + col];
}
if (row != col) {
error_sum += Abs(sum);
}
}
}
return error_sum;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/cluster.cpp
|
C++
|
apache-2.0
| 96,909
|
/******************************************************************************
** Filename: cluster.h
** Purpose: Definition of feature space clustering routines
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#ifndef CLUSTER_H
#define CLUSTER_H
#include "kdtree.h"
#include "oldlist.h"
namespace tesseract {
struct BUCKETS;
#define MINBUCKETS 5
#define MAXBUCKETS 39
/*----------------------------------------------------------------------
Types
----------------------------------------------------------------------*/
struct CLUSTER {
CLUSTER(size_t n) : Mean(n) {
}
~CLUSTER() {
delete Left;
delete Right;
}
bool Clustered : 1; // true if included in a higher cluster
bool Prototype : 1; // true if cluster represented by a proto
unsigned SampleCount : 30; // number of samples in this cluster
CLUSTER *Left; // ptr to left sub-cluster
CLUSTER *Right; // ptr to right sub-cluster
int32_t CharID; // identifier of char sample came from
std::vector<float> Mean; // mean of cluster - SampleSize floats
};
using SAMPLE = CLUSTER; // can refer to as either sample or cluster
typedef enum { spherical, elliptical, mixed, automatic } PROTOSTYLE;
struct CLUSTERCONFIG { // parameters to control clustering
PROTOSTYLE ProtoStyle; // specifies types of protos to be made
float MinSamples; // min # of samples per proto - % of total
float MaxIllegal; // max percentage of samples in a cluster which
// have more than 1 feature in that cluster
float Independence; // desired independence between dimensions
double Confidence; // desired confidence in prototypes created
int MagicSamples; // Ideal number of samples in a cluster.
};
typedef enum { normal, uniform, D_random, DISTRIBUTION_COUNT } DISTRIBUTION;
union FLOATUNION {
float Spherical;
float *Elliptical;
};
struct PROTOTYPE {
bool Significant : 1; // true if prototype is significant
bool Merged : 1; // Merged after clustering so do not output
// but kept for display purposes. If it has no
// samples then it was actually merged.
// Otherwise it matched an already significant
// cluster.
unsigned Style : 2; // spherical, elliptical, or mixed
unsigned NumSamples : 28; // number of samples in the cluster
CLUSTER *Cluster; // ptr to cluster which made prototype
std::vector<DISTRIBUTION> Distrib; // different distribution for each dimension
std::vector<float> Mean; // prototype mean
float TotalMagnitude; // total magnitude over all dimensions
float LogMagnitude; // log base e of TotalMagnitude
FLOATUNION Variance; // prototype variance
FLOATUNION Magnitude; // magnitude of density function
FLOATUNION Weight; // weight of density function
};
struct CLUSTERER {
int16_t SampleSize; // number of parameters per sample
PARAM_DESC *ParamDesc; // description of each parameter
int32_t NumberOfSamples; // total number of samples being clustered
KDTREE *KDTree; // for optimal nearest neighbor searching
CLUSTER *Root; // ptr to root cluster of cluster tree
LIST ProtoList; // list of prototypes
uint32_t NumChar; // # of characters represented by samples
// cache of reusable histograms by distribution type and number of buckets.
BUCKETS *bucket_cache[DISTRIBUTION_COUNT][MAXBUCKETS + 1 - MINBUCKETS];
};
struct SAMPLELIST {
int32_t NumSamples; // number of samples in list
int32_t MaxNumSamples; // maximum size of list
SAMPLE *Sample[1]; // array of ptrs to sample data structures
};
// low level cluster tree analysis routines.
#define InitSampleSearch(S, C) (((C) == nullptr) ? (S = NIL_LIST) : (S = push(NIL_LIST, (C))))
/*--------------------------------------------------------------------------
Public Function Prototypes
--------------------------------------------------------------------------*/
TESS_API
CLUSTERER *MakeClusterer(int16_t SampleSize, const PARAM_DESC ParamDesc[]);
TESS_API
SAMPLE *MakeSample(CLUSTERER *Clusterer, const float *Feature, uint32_t CharID);
TESS_API
LIST ClusterSamples(CLUSTERER *Clusterer, CLUSTERCONFIG *Config);
TESS_API
void FreeClusterer(CLUSTERER *Clusterer);
TESS_API
void FreeProtoList(LIST *ProtoList);
void FreePrototype(void *arg); // PROTOTYPE *Prototype);
CLUSTER *NextSample(LIST *SearchState);
float Mean(PROTOTYPE *Proto, uint16_t Dimension);
float StandardDeviation(PROTOTYPE *Proto, uint16_t Dimension);
TESS_API
int32_t MergeClusters(int16_t N, PARAM_DESC ParamDesc[], int32_t n1, int32_t n2, float m[],
float m1[], float m2[]);
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/cluster.h
|
C++
|
apache-2.0
| 5,517
|
/******************************************************************************
** Filename: clusttool.cpp
** Purpose: Misc. tools for use with the clustering routines
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*****************************************************************************/
#define _USE_MATH_DEFINES // for M_PI
#include "clusttool.h"
#include <cmath> // for M_PI, std::isnan
#include <locale> // for std::locale::classic
#include <sstream> // for std::stringstream
namespace tesseract {
//---------------Global Data Definitions and Declarations--------------------
#define TOKENSIZE 80 ///< max size of tokens read from an input file
#define QUOTED_TOKENSIZE "79"
#define MAXSAMPLESIZE 65535 ///< max num of dimensions in feature space
/**
* This routine reads N floats from the specified text file
* and places them into Buffer. If Buffer is nullptr, a buffer
* is created and passed back to the caller. If EOF is
* encountered before any floats can be read, nullptr is
* returned.
* @param fp open text file to read floats from
* @param N number of floats to read
* @param Buffer pointer to buffer to place floats into
* @return Pointer to buffer holding floats or nullptr if EOF
* @note Globals: None
*/
static bool ReadNFloats(TFile *fp, uint16_t N, float Buffer[]) {
const int kMaxLineSize = 1024;
char line[kMaxLineSize];
if (fp->FGets(line, kMaxLineSize) == nullptr) {
tprintf("Hit EOF in ReadNFloats!\n");
return false;
}
std::stringstream stream(line);
// Use "C" locale (needed for float values Buffer[i]).
stream.imbue(std::locale::classic());
for (uint16_t i = 0; i < N; i++) {
float f = NAN;
stream >> f;
if (std::isnan(f)) {
tprintf("Read of %u floats failed!\n", N);
return false;
}
Buffer[i] = f;
}
return true;
}
/**
* This routine writes a text representation of N floats from
* an array to a file. All of the floats are placed on one line.
* @param File open text file to write N floats to
* @param N number of floats to write
* @param Array array of floats to write
*/
static void WriteNFloats(FILE *File, uint16_t N, float Array[]) {
for (int i = 0; i < N; i++) {
fprintf(File, " %9.6f", Array[i]);
}
fprintf(File, "\n");
}
/**
* This routine writes to the specified text file a word
* which represents the ProtoStyle. It does not append
* a carriage return to the end.
* @param File open text file to write prototype style to
* @param ProtoStyle prototype style to write
*/
static void WriteProtoStyle(FILE *File, PROTOSTYLE ProtoStyle) {
switch (ProtoStyle) {
case spherical:
fprintf(File, "spherical");
break;
case elliptical:
fprintf(File, "elliptical");
break;
case mixed:
fprintf(File, "mixed");
break;
case automatic:
fprintf(File, "automatic");
break;
}
}
/**
* This routine reads a single integer from the specified
* file and checks to ensure that it is between 0 and
* MAXSAMPLESIZE.
* @param fp open text file to read sample size from
* @return Sample size
* @note Globals: None
*/
uint16_t ReadSampleSize(TFile *fp) {
int SampleSize = 0;
const int kMaxLineSize = 100;
char line[kMaxLineSize];
ASSERT_HOST(fp->FGets(line, kMaxLineSize) != nullptr);
ASSERT_HOST(sscanf(line, "%d", &SampleSize) == 1);
ASSERT_HOST(SampleSize >= 0 && SampleSize <= MAXSAMPLESIZE);
return SampleSize;
}
/**
* This routine reads textual descriptions of sets of parameters
* which describe the characteristics of feature dimensions.
*
* @param fp open text file to read N parameter descriptions from
* @param N number of parameter descriptions to read
* @return Pointer to an array of parameter descriptors.
* @note Globals: None
*/
PARAM_DESC *ReadParamDesc(TFile *fp, uint16_t N) {
auto ParamDesc = new PARAM_DESC[N];
for (int i = 0; i < N; i++) {
const int kMaxLineSize = TOKENSIZE * 4;
char line[kMaxLineSize];
ASSERT_HOST(fp->FGets(line, kMaxLineSize) != nullptr);
std::istringstream stream(line);
// Use "C" locale (needed for float values Min, Max).
stream.imbue(std::locale::classic());
std::string linear_token;
stream >> linear_token;
std::string essential_token;
stream >> essential_token;
stream >> ParamDesc[i].Min;
stream >> ParamDesc[i].Max;
ASSERT_HOST(!stream.fail());
ParamDesc[i].Circular = (linear_token[0] == 'c');
ParamDesc[i].NonEssential = (essential_token[0] != 'e');
ParamDesc[i].Range = ParamDesc[i].Max - ParamDesc[i].Min;
ParamDesc[i].HalfRange = ParamDesc[i].Range / 2;
ParamDesc[i].MidRange = (ParamDesc[i].Max + ParamDesc[i].Min) / 2;
}
return (ParamDesc);
}
/**
* This routine reads a textual description of a prototype from
* the specified file.
*
* @param fp open text file to read prototype from
* @param N number of dimensions used in prototype
* @return List of prototypes
* @note Globals: None
*/
PROTOTYPE *ReadPrototype(TFile *fp, uint16_t N) {
char sig_token[TOKENSIZE], shape_token[TOKENSIZE];
int SampleCount;
int i;
const int kMaxLineSize = TOKENSIZE * 4;
char line[kMaxLineSize];
if (fp->FGets(line, kMaxLineSize) == nullptr ||
sscanf(line, "%" QUOTED_TOKENSIZE "s %" QUOTED_TOKENSIZE "s %d", sig_token, shape_token,
&SampleCount) != 3) {
tprintf("Invalid prototype: %s\n", line);
return nullptr;
}
auto Proto = new PROTOTYPE;
Proto->Cluster = nullptr;
Proto->Significant = (sig_token[0] == 's');
switch (shape_token[0]) {
case 's':
Proto->Style = spherical;
break;
case 'e':
Proto->Style = elliptical;
break;
case 'a':
Proto->Style = automatic;
break;
default:
tprintf("Invalid prototype style specification:%s\n", shape_token);
Proto->Style = elliptical;
}
ASSERT_HOST(SampleCount >= 0);
Proto->NumSamples = SampleCount;
Proto->Mean.resize(N);
ReadNFloats(fp, N, &Proto->Mean[0]);
switch (Proto->Style) {
case spherical:
ReadNFloats(fp, 1, &(Proto->Variance.Spherical));
Proto->Magnitude.Spherical = 1.0 / sqrt(2.0 * M_PI * Proto->Variance.Spherical);
Proto->TotalMagnitude = std::pow(Proto->Magnitude.Spherical, static_cast<float>(N));
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
Proto->Weight.Spherical = 1.0 / Proto->Variance.Spherical;
Proto->Distrib.clear();
break;
case elliptical:
Proto->Variance.Elliptical = new float[N];
ReadNFloats(fp, N, Proto->Variance.Elliptical);
Proto->Magnitude.Elliptical = new float[N];
Proto->Weight.Elliptical = new float[N];
Proto->TotalMagnitude = 1.0;
for (i = 0; i < N; i++) {
Proto->Magnitude.Elliptical[i] = 1.0f / sqrt(2.0f * M_PI * Proto->Variance.Elliptical[i]);
Proto->Weight.Elliptical[i] = 1.0f / Proto->Variance.Elliptical[i];
Proto->TotalMagnitude *= Proto->Magnitude.Elliptical[i];
}
Proto->LogMagnitude = log(static_cast<double>(Proto->TotalMagnitude));
Proto->Distrib.clear();
break;
default:
delete Proto;
tprintf("Invalid prototype style\n");
return nullptr;
}
return Proto;
}
/**
* This routine writes an array of dimension descriptors to
* the specified text file.
* @param File open text file to write param descriptors to
* @param N number of param descriptors to write
* @param ParamDesc array of param descriptors to write
*/
void WriteParamDesc(FILE *File, uint16_t N, const PARAM_DESC ParamDesc[]) {
int i;
for (i = 0; i < N; i++) {
if (ParamDesc[i].Circular) {
fprintf(File, "circular ");
} else {
fprintf(File, "linear ");
}
if (ParamDesc[i].NonEssential) {
fprintf(File, "non-essential ");
} else {
fprintf(File, "essential ");
}
fprintf(File, "%10.6f %10.6f\n", ParamDesc[i].Min, ParamDesc[i].Max);
}
}
/**
* This routine writes a textual description of a prototype
* to the specified text file.
* @param File open text file to write prototype to
* @param N number of dimensions in feature space
* @param Proto prototype to write out
*/
void WritePrototype(FILE *File, uint16_t N, PROTOTYPE *Proto) {
int i;
if (Proto->Significant) {
fprintf(File, "significant ");
} else {
fprintf(File, "insignificant ");
}
WriteProtoStyle(File, static_cast<PROTOSTYLE>(Proto->Style));
fprintf(File, "%6d\n\t", Proto->NumSamples);
WriteNFloats(File, N, &Proto->Mean[0]);
fprintf(File, "\t");
switch (Proto->Style) {
case spherical:
WriteNFloats(File, 1, &(Proto->Variance.Spherical));
break;
case elliptical:
WriteNFloats(File, N, Proto->Variance.Elliptical);
break;
case mixed:
for (i = 0; i < N; i++) {
switch (Proto->Distrib[i]) {
case normal:
fprintf(File, " %9s", "normal");
break;
case uniform:
fprintf(File, " %9s", "uniform");
break;
case D_random:
fprintf(File, " %9s", "random");
break;
case DISTRIBUTION_COUNT:
ASSERT_HOST(!"Distribution count not allowed!");
}
}
fprintf(File, "\n\t");
WriteNFloats(File, N, Proto->Variance.Elliptical);
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/clusttool.cpp
|
C++
|
apache-2.0
| 9,930
|
/******************************************************************************
** Filename: clusttool.h
** Purpose: Definition of clustering utility tools
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef TESSERACT_CLASSIFY_CLUSTTOOL_H_
#define TESSERACT_CLASSIFY_CLUSTTOOL_H_
#include "cluster.h"
#include "serialis.h"
#include <cstdio>
namespace tesseract {
uint16_t ReadSampleSize(tesseract::TFile *fp);
PARAM_DESC *ReadParamDesc(tesseract::TFile *fp, uint16_t N);
PROTOTYPE *ReadPrototype(tesseract::TFile *fp, uint16_t N);
TESS_API
void WriteParamDesc(FILE *File, uint16_t N, const PARAM_DESC ParamDesc[]);
TESS_API
void WritePrototype(FILE *File, uint16_t N, PROTOTYPE *Proto);
} // namespace tesseract
#endif // TESSERACT_CLASSIFY_CLUSTTOOL_H_
|
2301_81045437/tesseract
|
src/classify/clusttool.h
|
C++
|
apache-2.0
| 1,439
|
/******************************************************************************
** Filename: cutoffs.c
** Purpose: Routines to manipulate an array of class cutoffs.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
/*----------------------------------------------------------------------------
Include Files and Type Defines
----------------------------------------------------------------------------*/
#include <cstdio>
#include <sstream> // for std::istringstream
#include <string> // for std::string
#include <tesseract/unichar.h>
#include "classify.h"
#include "helpers.h"
#include "serialis.h"
#define MAX_CUTOFF 1000
namespace tesseract {
/**
* Open file, read in all of the class-id/cutoff pairs
* and insert them into the Cutoffs array. Cutoffs are
* indexed in the array by class id. Unused entries in the
* array are set to an arbitrarily high cutoff value.
* @param fp file containing cutoff definitions
* @param Cutoffs array to put cutoffs into
*/
void Classify::ReadNewCutoffs(TFile *fp, uint16_t *Cutoffs) {
int Cutoff;
if (shape_table_ != nullptr) {
if (!fp->DeSerialize(shapetable_cutoffs_)) {
tprintf("Error during read of shapetable pffmtable!\n");
}
}
for (int i = 0; i < MAX_NUM_CLASSES; i++) {
Cutoffs[i] = MAX_CUTOFF;
}
const int kMaxLineSize = 100;
char line[kMaxLineSize];
while (fp->FGets(line, kMaxLineSize) != nullptr) {
std::string Class;
CLASS_ID ClassId;
std::istringstream stream(line);
stream.imbue(std::locale::classic());
stream >> Class >> Cutoff;
if (stream.fail()) {
break;
}
if (Class.compare("NULL") == 0) {
ClassId = unicharset.unichar_to_id(" ");
} else {
ClassId = unicharset.unichar_to_id(Class.c_str());
}
ASSERT_HOST(ClassId >= 0 && ClassId < MAX_NUM_CLASSES);
Cutoffs[ClassId] = Cutoff;
}
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/cutoffs.cpp
|
C++
|
apache-2.0
| 2,569
|
/******************************************************************************
** Filename: featdefs.cpp
** Purpose: Definitions of currently defined feature types.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "featdefs.h"
#include "picofeat.h" // for PicoFeatureLength
#include "scanutils.h"
#include <cstdio>
#include <cstring>
namespace tesseract {
#define PICO_FEATURE_LENGTH 0.05
/*-----------------------------------------------------------------------------
Global Data Definitions and Declarations
-----------------------------------------------------------------------------*/
const char *const kMicroFeatureType = "mf";
const char *const kCNFeatureType = "cn";
const char *const kIntFeatureType = "if";
const char *const kGeoFeatureType = "tb";
// Define all of the parameters for the MicroFeature type.
StartParamDesc(MicroFeatureParams) DefineParam(0, 0, -0.5, 0.5) DefineParam(0, 0, -0.25, 0.75)
DefineParam(0, 1, 0.0, 1.0) DefineParam(1, 0, 0.0, 1.0) DefineParam(0, 1, -0.5, 0.5)
DefineParam(0, 1, -0.5, 0.5) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(MicroFeatureDesc, 5, 1, kMicroFeatureType, MicroFeatureParams)
// Define all of the parameters for the NormFeat type.
StartParamDesc(CharNormParams) DefineParam(0, 0, -0.25, 0.75) DefineParam(0, 1, 0.0, 1.0)
DefineParam(0, 0, 0.0, 1.0) DefineParam(0, 0, 0.0, 1.0) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(CharNormDesc, 4, 0, kCNFeatureType, CharNormParams)
// Define all of the parameters for the IntFeature type
StartParamDesc(IntFeatParams) DefineParam(0, 0, 0.0, 255.0) DefineParam(0, 0, 0.0, 255.0)
DefineParam(1, 0, 0.0, 255.0) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(IntFeatDesc, 2, 1, kIntFeatureType, IntFeatParams)
// Define all of the parameters for the GeoFeature type
StartParamDesc(GeoFeatParams) DefineParam(0, 0, 0.0, 255.0) DefineParam(0, 0, 0.0, 255.0)
DefineParam(0, 0, 0.0, 255.0) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(GeoFeatDesc, 3, 0, kGeoFeatureType, GeoFeatParams)
// Other features used for training the adaptive classifier, but not used
// during normal training, therefore not in the DescDefs array.
// Define all of the parameters for the PicoFeature type
// define knob that can be used to adjust pico-feature length.
float PicoFeatureLength = PICO_FEATURE_LENGTH;
StartParamDesc(PicoFeatParams) DefineParam(0, 0, -0.25, 0.75) DefineParam(1, 0, 0.0, 1.0)
DefineParam(0, 0, -0.5, 0.5) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(PicoFeatDesc, 2, 1, "pf", PicoFeatParams)
// Define all of the parameters for the OutlineFeature type.
StartParamDesc(OutlineFeatParams) DefineParam(0, 0, -0.5, 0.5) DefineParam(0, 0, -0.25, 0.75)
DefineParam(0, 0, 0.0, 1.0) DefineParam(1, 0, 0.0, 1.0) EndParamDesc
// Now define the feature type itself (see features.h for parameters).
DefineFeature(OutlineFeatDesc, 3, 1, "of", OutlineFeatParams)
// MUST be kept in-sync with ExtractorDefs in fxdefs.cpp.
static const FEATURE_DESC_STRUCT *DescDefs[NUM_FEATURE_TYPES] = {
&MicroFeatureDesc, &CharNormDesc, &IntFeatDesc, &GeoFeatDesc};
/*-----------------------------------------------------------------------------
Public Code
-----------------------------------------------------------------------------*/
void InitFeatureDefs(FEATURE_DEFS_STRUCT *featuredefs) {
featuredefs->NumFeatureTypes = NUM_FEATURE_TYPES;
for (int i = 0; i < NUM_FEATURE_TYPES; ++i) {
featuredefs->FeatureDesc[i] = DescDefs[i];
}
}
/*---------------------------------------------------------------------------*/
/**
* Appends a textual representation of CharDesc to str.
* The format used is to write out the number of feature
* sets which will be written followed by a representation of
* each feature set.
*
* Each set starts with the short name for that feature followed
* by a description of the feature set. Feature sets which are
* not present are not written.
*
* @param FeatureDefs definitions of feature types/extractors
* @param str string to append CharDesc to
* @param CharDesc character description to write to File
*/
void WriteCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, CHAR_DESC_STRUCT *CharDesc, std::string &str) {
int NumSetsToWrite = 0;
for (size_t Type = 0; Type < CharDesc->NumFeatureSets; Type++) {
if (CharDesc->FeatureSets[Type]) {
NumSetsToWrite++;
}
}
str += " " + std::to_string(NumSetsToWrite);
str += "\n";
for (size_t Type = 0; Type < CharDesc->NumFeatureSets; Type++) {
if (CharDesc->FeatureSets[Type]) {
str += FeatureDefs.FeatureDesc[Type]->ShortName;
str += " ";
WriteFeatureSet(CharDesc->FeatureSets[Type], str);
}
}
} /* WriteCharDescription */
// Return whether all of the fields of the given feature set
// are well defined (not inf or nan).
bool ValidCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, CHAR_DESC_STRUCT *CharDesc) {
bool anything_written = false;
bool well_formed = true;
for (size_t Type = 0; Type < CharDesc->NumFeatureSets; Type++) {
if (CharDesc->FeatureSets[Type]) {
for (int i = 0; i < CharDesc->FeatureSets[Type]->NumFeatures; i++) {
FEATURE feat = CharDesc->FeatureSets[Type]->Features[i];
for (int p = 0; p < feat->Type->NumParams; p++) {
if (std::isnan(feat->Params[p]) || std::isinf(feat->Params[p])) {
well_formed = false;
} else {
anything_written = true;
}
}
}
} else {
return false;
}
}
return anything_written && well_formed;
} /* ValidCharDescription */
/*---------------------------------------------------------------------------*/
/**
* Read a character description from File, and return
* a data structure containing this information. The data
* is formatted as follows:
* @verbatim
NumberOfSets
ShortNameForSet1 Set1
ShortNameForSet2 Set2
...
@endverbatim
*
* Globals:
* - none
*
* @param FeatureDefs definitions of feature types/extractors
* @param File open text file to read character description from
* @return Character description read from File.
*/
CHAR_DESC_STRUCT *ReadCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, FILE *File) {
int NumSetsToRead;
char ShortName[FEAT_NAME_SIZE];
int Type;
ASSERT_HOST(tfscanf(File, "%d", &NumSetsToRead) == 1);
ASSERT_HOST(NumSetsToRead >= 0);
ASSERT_HOST(NumSetsToRead <= FeatureDefs.NumFeatureTypes);
auto CharDesc = new CHAR_DESC_STRUCT(FeatureDefs);
for (; NumSetsToRead > 0; NumSetsToRead--) {
tfscanf(File, "%s", ShortName);
Type = ShortNameToFeatureType(FeatureDefs, ShortName);
CharDesc->FeatureSets[Type] = ReadFeatureSet(File, FeatureDefs.FeatureDesc[Type]);
}
return CharDesc;
}
/*---------------------------------------------------------------------------*/
/**
* Search through all features currently defined and return
* the feature type for the feature with the specified short
* name. Trap an error if the specified name is not found.
*
* Globals:
* - none
*
* @param FeatureDefs definitions of feature types/extractors
* @param ShortName short name of a feature type
* @return Feature type which corresponds to ShortName.
*/
uint32_t ShortNameToFeatureType(const FEATURE_DEFS_STRUCT &FeatureDefs, const char *ShortName) {
for (int i = 0; i < FeatureDefs.NumFeatureTypes; i++) {
if (!strcmp((FeatureDefs.FeatureDesc[i]->ShortName), ShortName)) {
return static_cast<uint32_t>(i);
}
}
ASSERT_HOST(!"Illegal short name for a feature");
return 0;
}
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/featdefs.cpp
|
C++
|
apache-2.0
| 8,717
|
/******************************************************************************
** Filename: featdefs.h
** Purpose: Definitions of currently defined feature types.
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#ifndef FEATDEFS_H
#define FEATDEFS_H
#include "ocrfeatures.h"
#include <array> // for std::array
#include <string> // for std::string
namespace tesseract {
/* Enumerate the different types of features currently defined. */
#define NUM_FEATURE_TYPES 4
extern TESS_API const char *const kMicroFeatureType;
extern TESS_API const char *const kCNFeatureType;
extern TESS_API const char *const kIntFeatureType;
extern TESS_API const char *const kGeoFeatureType;
/* A character is described by multiple sets of extracted features. Each
set contains a number of features of a particular type, for example, a
set of bays, or a set of closures, or a set of microfeatures. Each
feature consists of a number of parameters. All features within a
feature set contain the same number of parameters.*/
struct FEATURE_DEFS_STRUCT {
int32_t NumFeatureTypes;
const FEATURE_DESC_STRUCT *FeatureDesc[NUM_FEATURE_TYPES];
};
using FEATURE_DEFS = FEATURE_DEFS_STRUCT *;
struct CHAR_DESC_STRUCT {
/// Allocate a new character description, initialize its
/// feature sets to be empty, and return it.
CHAR_DESC_STRUCT(const FEATURE_DEFS_STRUCT &FeatureDefs) {
NumFeatureSets = FeatureDefs.NumFeatureTypes;
}
/// Release the memory consumed by the specified character
/// description and all of the features in that description.
~CHAR_DESC_STRUCT() {
for (size_t i = 0; i < NumFeatureSets; i++) {
delete FeatureSets[i];
}
}
uint32_t NumFeatureSets;
std::array<FEATURE_SET_STRUCT *, NUM_FEATURE_TYPES> FeatureSets;
};
/*----------------------------------------------------------------------
Generic functions for manipulating character descriptions
----------------------------------------------------------------------*/
TESS_API
void InitFeatureDefs(FEATURE_DEFS_STRUCT *featuredefs);
bool ValidCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, CHAR_DESC_STRUCT *CharDesc);
void WriteCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, CHAR_DESC_STRUCT *CharDesc, std::string &str);
TESS_API
CHAR_DESC_STRUCT *ReadCharDescription(const FEATURE_DEFS_STRUCT &FeatureDefs, FILE *File);
TESS_API
uint32_t ShortNameToFeatureType(const FEATURE_DEFS_STRUCT &FeatureDefs, const char *ShortName);
/**----------------------------------------------------------------------------
Global Data Definitions and Declarations
----------------------------------------------------------------------------**/
extern const FEATURE_DESC_STRUCT MicroFeatureDesc;
extern TESS_API const FEATURE_DESC_STRUCT PicoFeatDesc;
extern const FEATURE_DESC_STRUCT CharNormDesc;
extern const FEATURE_DESC_STRUCT OutlineFeatDesc;
extern const FEATURE_DESC_STRUCT IntFeatDesc;
extern const FEATURE_DESC_STRUCT GeoFeatDesc;
} // namespace tesseract
#endif
|
2301_81045437/tesseract
|
src/classify/featdefs.h
|
C++
|
apache-2.0
| 3,675
|
/******************************************************************************
** Filename: float2int.cpp
** Purpose: Routines for converting float features to int features
** Author: Dan Johnson
**
** (c) Copyright Hewlett-Packard Company, 1988.
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
** http://www.apache.org/licenses/LICENSE-2.0
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
******************************************************************************/
#include "float2int.h"
#include "classify.h"
#include "mfoutline.h"
#include "normmatch.h"
#include "picofeat.h"
#include "helpers.h"
#define MAX_INT_CHAR_NORM (INT_CHAR_NORM_RANGE - 1)
/*---------------------------------------------------------------------------*/
namespace tesseract {
/**
* For each class in the unicharset, clears the corresponding
* entry in char_norm_array. char_norm_array is indexed by unichar_id.
*
* Globals:
* - none
*
* @param char_norm_array array to be cleared
*/
void Classify::ClearCharNormArray(uint8_t *char_norm_array) {
memset(char_norm_array, 0, sizeof(*char_norm_array) * unicharset.size());
} /* ClearCharNormArray */
/*---------------------------------------------------------------------------*/
/**
* For each class in unicharset, computes the match between
* norm_feature and the normalization protos for that class.
* Converts this number to the range from 0 - 255 and stores it
* into char_norm_array. CharNormArray is indexed by unichar_id.
*
* Globals:
* - PreTrainedTemplates current set of built-in templates
*
* @param norm_feature character normalization feature
* @param[out] char_norm_array place to put results of size unicharset.size()
*/
void Classify::ComputeIntCharNormArray(const FEATURE_STRUCT &norm_feature,
uint8_t *char_norm_array) {
for (unsigned i = 0; i < unicharset.size(); i++) {
if (i < PreTrainedTemplates->NumClasses) {
int norm_adjust =
static_cast<int>(INT_CHAR_NORM_RANGE * ComputeNormMatch(i, norm_feature, false));
char_norm_array[i] = ClipToRange(norm_adjust, 0, MAX_INT_CHAR_NORM);
} else {
// Classes with no templates (eg. ambigs & ligatures) default
// to worst match.
char_norm_array[i] = MAX_INT_CHAR_NORM;
}
}
} /* ComputeIntCharNormArray */
/*---------------------------------------------------------------------------*/
/**
* This routine converts each floating point pico-feature
* in Features into integer format and saves it into
* IntFeatures.
*
* Globals:
* - none
*
* @param Features floating point pico-features to be converted
* @param[out] IntFeatures array to put converted features into
*/
void Classify::ComputeIntFeatures(FEATURE_SET Features, INT_FEATURE_ARRAY IntFeatures) {
float YShift;
if (classify_norm_method == baseline) {
YShift = BASELINE_Y_SHIFT;
} else {
YShift = Y_SHIFT;
}
for (int Fid = 0; Fid < Features->NumFeatures; Fid++) {
FEATURE Feature = Features->Features[Fid];
IntFeatures[Fid].X = Bucket8For(Feature->Params[PicoFeatX], X_SHIFT, INT_FEAT_RANGE);
IntFeatures[Fid].Y = Bucket8For(Feature->Params[PicoFeatY], YShift, INT_FEAT_RANGE);
IntFeatures[Fid].Theta =
CircBucketFor(Feature->Params[PicoFeatDir], ANGLE_SHIFT, INT_FEAT_RANGE);
IntFeatures[Fid].CP_misses = 0;
}
} /* ComputeIntFeatures */
} // namespace tesseract
|
2301_81045437/tesseract
|
src/classify/float2int.cpp
|
C++
|
apache-2.0
| 3,830
|