id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,300 | list_table.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/list_table.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
class ListTable:
"""
Parse the list table line. Make a string. Form a dictionary.
Return the string and the dictionary.
"""
def __init__(
self,
bug_handler,
run_level=1,
):
self.__bug_handler = bug_handler
self.__initiate_values()
self.__run_level = run_level
def __initiate_values(self):
self.__list_table_final = ''
self.__state = 'default'
self.__final_dict = {}
self.__list_dict = {}
self.__all_lists = []
self.__level_text_string = ''
self.__level_text_list = []
self.__found_level_text_length = 0
self.__level_text_position = None
self.__prefix_string = None
self.__level_numbers_string = ''
self.__state_dict = {
'default' : self.__default_func,
'level' : self.__level_func,
'list' : self.__list_func,
'unsure_ob' : self.__after_bracket_func,
'level_number' : self.__level_number_func,
'level_text' : self.__level_text_func,
'list_name' : self.__list_name_func,
}
self.__main_list_dict = {
'cw<ls<ls-tem-id_' : 'list-template-id',
'cw<ls<list-hybri' : 'list-hybrid',
'cw<ls<lis-tbl-id' : 'list-table-id',
}
self.__level_dict = {
'cw<ls<level-star' : 'list-number-start',
'cw<ls<level-spac' : 'list-space',
'cw<ls<level-inde' : 'level-indent',
'cw<ls<fir-ln-ind' : 'first-line-indent',
'cw<ls<left-inden' : 'left-indent',
'cw<ls<tab-stop__' : 'tabs',
'cw<ls<level-type' : 'numbering-type',
'cw<pf<right-inde' : 'right-indent',
'cw<pf<left-inden' : 'left-indent',
'cw<pf<fir-ln-ind' : 'first-line-indent',
'cw<ci<italics___' : 'italics',
'cw<ci<bold______' : 'bold',
'cw<ss<para-style' : 'paragraph-style-name',
}
"""
all_lists =
[{anything here?}
[{list-templateid = ""}
[{level-indent}],[{level-indent}]
]
],
"""
def __parse_lines(self, line):
"""
Required : line --line to parse
Returns: nothing
Logic:
Split the lines into a list by a new line. Process the line
according to the state.
"""
lines = line.split('\n')
self.__ob_count = 0
self.__ob_group = 0
for line in lines:
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-4:]
self.__ob_group += 1
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-4:]
self.__ob_group -= 1
action = self.__state_dict.get(self.__state)
if action is None:
print(self.__state)
action(line)
self.__write_final_string()
# self.__add_to_final_line()
def __default_func(self, line):
"""
Requires: line --line to process
Return: nothing
Logic:
This state is used at the start and end of a list. Look for an
opening bracket, which marks the change of state.
"""
if self.__token_info == 'ob<nu<open-brack':
self.__state = 'unsure_ob'
def __found_list_func(self, line):
"""
Requires: line -- line to process
Returns: nothing
Logic:
I have found \\list.
Change the state to list
Get the open bracket count so you know when this state ends.
Append an empty list to all lists.
Create a temporary dictionary. This dictionary has the key of
"list-id" and the value of an empty list. Later, this empty list
will be filled with all the ids for which the formatting is valid.
Append the temporary dictionary to the new list.
"""
self.__state = 'list'
self.__list_ob_count = self.__ob_count
self.__all_lists.append([])
the_dict = {'list-id': []}
self.__all_lists[-1].append(the_dict)
def __list_func(self, line):
"""
Requires: line --line to process
Returns: nothing
Logic:
This method is called when you are in a list, but outside of a level.
Check for the end of the list. Otherwise, use the self.__mainlist_dict
to determine if you need to add a lines values to the main list.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__list_ob_count:
self.__state = 'default'
elif self.__token_info == 'ob<nu<open-brack':
self.__state = 'unsure_ob'
else:
att = self.__main_list_dict.get(self.__token_info)
if att:
value = line[20:]
# dictionary is always the first item in the last list
# [{att:value}, [], [att:value, []]
self.__all_lists[-1][0][att] = value
def __found_level_func(self, line):
"""
Requires: line -- line to process
Returns: nothing
Logic:
I have found \\listlevel.
Change the state to level
Get the open bracket count so you know when this state ends.
Append an empty list to the last list inside all lists.
Create a temporary dictionary.
Append the temporary dictionary to the new list.
self.__all_lists now looks like:
[[{list-id:[]}, [{}]]]
Where:
self.__all_lists[-1] => a list. The first item is a dictionary.
The second item is a list containing a dictionary:
[{list-id:[]}, [{}]]
self.__all_lists[-1][0] => a dictionary of the list attributes
self.__all_lists[-1][-1] => a list with just a dictionary
self.__all_lists[-1][-1][0] => the dictionary of level attributes
"""
self.__state = 'level'
self.__level_ob_count = self.__ob_count
self.__all_lists[-1].append([])
the_dict = {}
self.__all_lists[-1][-1].append(the_dict)
self.__level_dict
def __level_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Look for the end of the this group.
Change states if an open bracket is found.
Add attributes to all_dicts if an appropriate token is found.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__level_ob_count:
self.__state = 'list'
elif self.__token_info == 'ob<nu<open-brack':
self.__state = 'unsure_ob'
else:
att = self.__level_dict.get(self.__token_info)
if att:
value = line[20:]
self.__all_lists[-1][-1][0][att] = value
def __level_number_func(self, line):
"""
Requires:
line -- line to process
Returns:
nothing
Logic:
Check for the end of the group.
Otherwise, if the token is hexadecimal, create an attribute.
Do so by finding the base-10 value of the number. Then divide
this by 2 and round it. Remove the ".0". Sandwwhich the result to
give you something like level1-show-level.
The show-level attribute means the numbering for this level.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__level_number_ob_count:
self.__state = 'level'
self.__all_lists[-1][-1][0]['level-numbers'] = self.__level_numbers_string
self.__level_numbers_string = ''
elif self.__token_info == 'tx<hx<__________':
self.__level_numbers_string += '\\'%s' % line[18:]
elif self.__token_info == 'tx<nu<__________':
self.__level_numbers_string += line[17:]
"""
num = line[18:]
num = int(num, 16)
level = str(round((num - 1)/2, 0))
level = level[:-2]
level = 'level%s-show-level' % level
self.__all_lists[-1][-1][0][level] = 'true'
"""
def __level_text_func(self, line):
"""
Requires:
line --line to process
Returns:
nothing
Logic:
Check for the end of the group.
Otherwise, if the text is hexadecimal, call on the method
__parse_level_text_length.
Otherwise, if the text is regular text, create an attribute.
This attribute indicates the puncuation after a certain level.
An example is "level1-marker = '.'"
Otherwise, check for a level-template-id.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__level_text_ob_count:
if self.__prefix_string:
if self.__all_lists[-1][-1][0]['numbering-type'] == 'bullet':
self.__prefix_string = self.__prefix_string.replace('_', '')
self.__all_lists[-1][-1][0]['bullet-type'] = self.__prefix_string
self.__state = 'level'
# self.__figure_level_text_func()
self.__level_text_string = ''
self.__found_level_text_length = 0
elif self.__token_info == 'tx<hx<__________':
self.__parse_level_text_length(line)
elif self.__token_info == 'tx<nu<__________':
text = line[17:]
if text and text[-1] == ';':
text = text.replace(';', '')
if not self.__level_text_position:
self.__prefix_string = text
else:
self.__all_lists[-1][-1][0][self.__level_text_position] = text
elif self.__token_info == 'cw<ls<lv-tem-id_':
value = line[20:]
self.__all_lists[-1][-1][0]['level-template-id'] = value
def __parse_level_text_length(self, line):
"""
Requires:
line --line with hexadecimal number
Returns:
nothing
Logic:
Method is used for to parse text in the \\leveltext group.
"""
num = line[18:]
the_num = int(num, 16)
if not self.__found_level_text_length:
self.__all_lists[-1][-1][0]['list-text-length'] = str(the_num)
self.__found_level_text_length = 1
else:
the_num += 1
the_string = str(the_num)
level_marker = 'level%s-suffix' % the_string
show_marker = 'show-level%s' % the_string
self.__level_text_position = level_marker
self.__all_lists[-1][-1][0][show_marker] = 'true'
if self.__prefix_string:
prefix_marker = 'level%s-prefix' % the_string
self.__all_lists[-1][-1][0][prefix_marker] = self.__prefix_string
self.__prefix_string = None
def __list_name_func(self, line):
"""
Requires:
line --line to process
Returns:
nothing
Logic:
Simply check for the end of the group and change states.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__list_name_ob_count:
self.__state = 'list'
def __after_bracket_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing.
Logic:
The last token found was "{". This method determines what group
you are now in.
WARNING: this could cause problems. If no group is found, the state will remain
unsure_ob, which means no other text will be parsed.
"""
if self.__token_info == 'cw<ls<level-text':
self.__state = 'level_text'
self.__level_text_ob_count = self.__ob_count
elif self.__token_info == 'cw<ls<level-numb':
self.__level_number_ob_count = self.__ob_count
self.__state = 'level_number'
elif self.__token_info == 'cw<ls<list-tb-le':
self.__found_level_func(line)
elif self.__token_info == 'cw<ls<list-in-tb':
self.__found_list_func(line)
elif self.__token_info == 'cw<ls<list-name_':
self.__state = 'list_name'
self.__list_name_ob_count = self.__ob_count
else:
if self.__run_level > 3:
msg = 'No matching token after open bracket\n'
msg += 'token is "%s\n"' % (line)
raise self.__bug_handler
def __add_to_final_line(self):
"""
Method no longer used.
"""
self.__list_table_final = 'mi<mk<listabbeg_\n'
self.__list_table_final += 'mi<tg<open______<list-table\n' + \
'mi<mk<listab-beg\n' + self.__list_table_final
self.__list_table_final += \
'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<mk<listabend_\n'
def __write_final_string(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Write out the list-table start tag.
Iterate through self.__all_lists. For each list, write out
a list-in-table tag. Get the dictionary of this list
(the first item). Print out the key => value pair.
Remove the first item (the dictionary) form this list. Now iterate
through what is left in the list. Each list will contain one item,
a dictionary. Get this dictionary and print out key => value pair.
"""
not_allow = ['list-id',]
id = 0
self.__list_table_final = 'mi<mk<listabbeg_\n'
self.__list_table_final += 'mi<tg<open______<list-table\n' + \
'mi<mk<listab-beg\n' + self.__list_table_final
for list in self.__all_lists:
id += 1
self.__list_table_final += 'mi<tg<open-att__<list-in-table'
# self.__list_table_final += '<list-id>%s' % (str(id))
the_dict = list[0]
the_keys = the_dict.keys()
for the_key in the_keys:
if the_key in not_allow:
continue
att = the_key
value = the_dict[att]
self.__list_table_final += f'<{att}>{value}'
self.__list_table_final += '\n'
levels = list[1:]
level_num = 0
for level in levels:
level_num += 1
self.__list_table_final += 'mi<tg<empty-att_<level-in-table'
self.__list_table_final += '<level>%s' % (str(level_num))
the_dict2 = level[0]
the_keys2 = the_dict2.keys()
is_bullet = 0
bullet_text = ''
for the_key2 in the_keys2:
if the_key2 in not_allow:
continue
test_bullet = the_dict2.get('numbering-type')
if test_bullet == 'bullet':
is_bullet = 1
att2 = the_key2
value2 = the_dict2[att2]
# sys.stderr.write('%s\n' % att2[0:10])
if att2[0:10] == 'show-level' and is_bullet:
# sys.stderr.write('No print %s\n' % att2)
pass
elif att2[-6:] == 'suffix' and is_bullet:
# sys.stderr.write('%s\n' % att2)
bullet_text += value2
elif att2[-6:] == 'prefix' and is_bullet:
# sys.stderr.write('%s\n' % att2)
bullet_text += value2
else:
self.__list_table_final += f'<{att2}>{value2}'
if is_bullet:
pass
# self.__list_table_final += '<bullet-type>%s' % (bullet_text)
self.__list_table_final += '\n'
self.__list_table_final += 'mi<tg<close_____<list-in-table\n'
self.__list_table_final += \
'mi<mk<listab-end\n' + 'mi<tg<close_____<list-table\n'
self.__list_table_final += 'mi<mk<listabend_\n'
def parse_list_table(self, line):
"""
Requires:
line -- line with border definition in it
Returns:
A string and the dictionary of list-table values and attributes.
Logic:
Call on the __parse_lines method, which splits the text string into
lines (which will be tokens) and processes them.
"""
self.__parse_lines(line)
return self.__list_table_final, self.__all_lists
| 18,220 | Python | .py | 426 | 30.85446 | 91 | 0.487455 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,301 | pict.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/pict.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Pict:
"""Process graphic information"""
def __init__(self,
in_file,
bug_handler,
out_file,
copy=None,
orig_file=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__bracket_count = 0
self.__ob_count = 0
self.__cb_count = 0
self.__pict_count = 0
self.__in_pict = False
self.__already_found_pict = False
self.__orig_file = orig_file
self.__initiate_pict_dict()
self.__out_file = out_file
def __initiate_pict_dict(self):
self.__pict_dict = {
'ob<nu<open-brack' : self.__open_br_func,
'cb<nu<clos-brack' : self.__close_br_func,
'tx<nu<__________' : self.__text_func,
}
def __open_br_func(self, line):
return "{\n"
def __close_br_func(self, line):
return "}\n"
def __text_func(self, line):
# tx<nu<__________<true text
return line[17:]
def __make_dir(self):
""" Make a directory to put the image data in"""
base_name = os.path.basename(getattr(self.__orig_file, 'name',
self.__orig_file))
base_name = os.path.splitext(base_name)[0]
if self.__out_file:
dir_name = os.path.dirname(getattr(self.__out_file, 'name',
self.__out_file))
else:
dir_name = os.path.dirname(self.__orig_file)
self.__dir_name = base_name + "_rtf_pict_dir/"
self.__dir_name = os.path.join(dir_name, self.__dir_name)
if not os.path.isdir(self.__dir_name):
try:
os.mkdir(self.__dir_name)
except OSError as msg:
msg = f"{str(msg)}Couldn't make directory '{self.__dir_name}':\n"
raise self.__bug_handler
else:
if self.__run_level > 1:
sys.stderr.write('Removing files from old pict directory...\n')
all_files = os.listdir(self.__dir_name)
for the_file in all_files:
the_file = os.path.join(self.__dir_name, the_file)
try:
os.remove(the_file)
except OSError:
pass
if self.__run_level > 1:
sys.stderr.write('Files removed.\n')
def __create_pict_file(self):
"""Create a file for all the pict data to be written to.
"""
self.__pict_file = os.path.join(self.__dir_name, 'picts.rtf')
self.__write_pic_obj = open_for_write(self.__pict_file, append=True)
def __in_pict_func(self, line):
if self.__cb_count == self.__pict_br_count:
self.__in_pict = False
self.__write_pic_obj.write("}\n")
return True
else:
action = self.__pict_dict.get(self.__token_info)
if action:
self.__write_pic_obj.write(action(line))
return False
def __default(self, line, write_obj):
"""Determine if each token marks the beginning of pict data.
If it does, create a new file to write data to (if that file
has not already been created.) Set the self.__in_pict flag to true.
If the line does not contain pict data, return 1
"""
"""
$pict_count++;
$pict_count = sprintf("%03d", $pict_count);
print OUTPUT "dv<xx<em<nu<pict<at<num>$pict_count\n";
"""
if self.__token_info == 'cw<gr<picture___':
self.__pict_count += 1
# write_obj.write("mi<tg<em<at<pict<num>%03d\n" % self.__pict_count)
write_obj.write('mi<mk<pict-start\n')
write_obj.write('mi<tg<empty-att_<pict<num>%03d\n' % self.__pict_count)
write_obj.write('mi<mk<pict-end__\n')
if not self.__already_found_pict:
self.__create_pict_file()
self.__already_found_pict=True
self.__print_rtf_header()
self.__in_pict = 1
self.__pict_br_count = self.__ob_count
self.__cb_count = 0
self.__write_pic_obj.write("{\\pict\n")
return False
return True
def __print_rtf_header(self):
"""Print to pict file the necessary RTF data for the file to be
recognized as an RTF file.
"""
self.__write_pic_obj.write("{\\rtf1 \n{\\fonttbl\\f0\\null;} \n")
self.__write_pic_obj.write("{\\colortbl\\red255\\green255\\blue255;} \n\\pard \n")
def process_pict(self):
self.__make_dir()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as write_obj:
for line in read_obj:
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
if not self.__in_pict:
to_print = self.__default(line, write_obj)
if to_print :
write_obj.write(line)
else:
to_print = self.__in_pict_func(line)
if to_print :
write_obj.write(line)
if self.__already_found_pict:
self.__write_pic_obj.write("}\n")
self.__write_pic_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "pict.data")
try:
copy_obj.copy_file(self.__pict_file, "pict.rtf")
except:
pass
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
if self.__pict_count == 0:
try:
os.rmdir(self.__dir_name)
except OSError:
pass
| 7,220 | Python | .py | 167 | 32.017964 | 90 | 0.477767 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,302 | footnote.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/footnote.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Footnote:
"""
Two public methods are available. The first separates all of the
footnotes from the body and puts them at the bottom of the text, where
they are easier to process. The second joins those footnotes to the
proper places in the body.
"""
def __init__(self,
in_file ,
bug_handler,
copy=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__found_a_footnote = 0
def __first_line_func(self, line):
"""
Print the tag info for footnotes. Check whether footnote is an
endnote and make the tag according to that.
"""
if self.__token_info == 'cw<nt<type______':
self.__write_to_foot_obj.write(
'mi<tg<open-att__<footnote<type>endnote<num>%s\n' % self.__footnote_count)
else:
self.__write_to_foot_obj.write(
'mi<tg<open-att__<footnote<num>%s\n' % self.__footnote_count)
self.__first_line = 0
def __in_footnote_func(self, line):
"""Handle all tokens that are part of footnote"""
if self.__first_line:
self.__first_line_func(line)
if self.__token_info == 'cw<ci<footnot-mk':
num = str(self.__footnote_count)
self.__write_to_foot_obj.write(line)
self.__write_to_foot_obj.write(
'tx<nu<__________<%s\n' % num
)
if self.__cb_count == self.__footnote_bracket_count:
self.__in_footnote = 0
self.__write_obj.write(line)
self.__write_to_foot_obj.write(
'mi<mk<foot___clo\n')
self.__write_to_foot_obj.write(
'mi<tg<close_____<footnote\n')
self.__write_to_foot_obj.write(
'mi<mk<footnt-clo\n')
else:
self.__write_to_foot_obj.write(line)
def __found_footnote(self, line):
""" Found a footnote"""
self.__found_a_footnote = 1
self.__in_footnote = 1
self.__first_line = 1
self.__footnote_count += 1
# temporarily set this to zero so I can enter loop
self.__cb_count = 0
self.__footnote_bracket_count = self.__ob_count
self.__write_obj.write(
'mi<mk<footnt-ind<%04d\n' % self.__footnote_count)
self.__write_to_foot_obj.write(
'mi<mk<footnt-ope<%04d\n' % self.__footnote_count)
def __default_sep(self, line):
"""Handle all tokens that are not footnote tokens"""
if self.__token_info == 'cw<nt<footnote__':
self.__found_footnote(line)
self.__write_obj.write(line)
if self.__token_info == 'cw<ci<footnot-mk':
num = str(self.__footnote_count + 1)
self.__write_obj.write(
'tx<nu<__________<%s\n' % num
)
def __initiate_sep_values(self):
"""
initiate counters for separate_footnotes method.
"""
self.__bracket_count=0
self.__ob_count = 0
self.__cb_count = 0
self.__footnote_bracket_count = 0
self.__in_footnote = 0
self.__first_line = 0 # have not processed the first line of footnote
self.__footnote_count = 0
def separate_footnotes(self):
"""
Separate all the footnotes in an RTF file and put them at the bottom,
where they are easier to process. Each time a footnote is found,
print all of its contents to a temporary file. Close both the main and
temporary file. Print the footnotes from the temporary file to the
bottom of the main file.
"""
self.__initiate_sep_values()
self.__footnote_holder = better_mktemp()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
with open_for_write(self.__footnote_holder) as self.__write_to_foot_obj:
for line in read_obj:
self.__token_info = line[:16]
# keep track of opening and closing brackets
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
# In the middle of footnote text
if self.__in_footnote:
self.__in_footnote_func(line)
# not in the middle of footnote text
else:
self.__default_sep(line)
with open_for_read(self.__footnote_holder) as read_obj:
with open_for_write(self.__write_to, append=True) as write_obj:
write_obj.write(
'mi<mk<sect-close\n'
'mi<mk<body-close\n'
'mi<tg<close_____<section\n'
'mi<tg<close_____<body\n'
'mi<tg<close_____<doc\n'
'mi<mk<footnt-beg\n')
for line in read_obj:
write_obj.write(line)
write_obj.write(
'mi<mk<footnt-end\n')
os.remove(self.__footnote_holder)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "footnote_separate.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
def update_info(self, file, copy):
"""
Unused method
"""
self.__file = file
self.__copy = copy
def __get_foot_body_func(self, line):
"""
Process lines in main body and look for beginning of footnotes.
"""
# mi<mk<footnt-end
if self.__token_info == 'mi<mk<footnt-beg':
self.__state = 'foot'
else:
self.__write_obj.write(line)
def __get_foot_foot_func(self, line):
"""
Copy footnotes from bottom of file to a separate, temporary file.
"""
if self.__token_info == 'mi<mk<footnt-end':
self.__state = 'body'
else:
self.__write_to_foot_obj.write(line)
def __get_footnotes(self):
"""
Private method to remove footnotes from main file. Read one line from
the main file at a time. If the state is 'body', call on the private
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
These two functions do the work of separating the footnotes form the
body.
"""
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
with open_for_write(self.__footnote_holder) as self.__write_to_foot_obj:
for line in read_obj:
self.__token_info = line[:16]
if self.__state == 'body':
self.__get_foot_body_func(line)
elif self.__state == 'foot':
self.__get_foot_foot_func(line)
def __get_foot_from_temp(self, num):
"""
Private method for joining footnotes to body. This method reads from
the temporary file until the proper footnote marker is found. It
collects all the tokens until the end of the footnote, and returns
them as a string.
"""
look_for = 'mi<mk<footnt-ope<' + num + '\n'
found_foot = 0
string_to_return = ''
for line in self.__read_from_foot_obj:
if found_foot:
if line == 'mi<mk<footnt-clo\n':
return string_to_return
string_to_return = string_to_return + line
else:
if line == look_for:
found_foot = 1
def __join_from_temp(self):
"""
Private method for rejoining footnotes to body. Read from the
newly-created, temporary file that contains the body text but no
footnotes. Each time a footnote marker is found, call the private
method __get_foot_from_temp(). This method will return a string to
print out to the third file.
If no footnote marker is found, simply print out the token (line).
"""
with open_for_read(self.__footnote_holder) as self.__read_from_foot_obj:
with open_for_read(self.__write_to) as read_obj:
with open_for_write(self.__write_to2) as self.__write_obj:
for line in read_obj:
if line[:16] == 'mi<mk<footnt-ind':
line = self.__get_foot_from_temp(line[17:-1])
self.__write_obj.write(line)
def join_footnotes(self):
"""
Join the footnotes from the bottom of the file and put them in their
former places. First, remove the footnotes from the bottom of the
input file, outputting them to a temporary file. This creates two new
files, one without footnotes, and one of just footnotes. Open both
these files to read. When a marker is found in the main file, find the
corresponding marker in the footnote file. Output the mix of body and
footnotes to a third file.
"""
if not self.__found_a_footnote:
return
self.__write_to2 = better_mktemp()
self.__state = 'body'
self.__get_footnotes()
self.__join_from_temp()
# self.__write_obj.close()
# self.__read_from_foot_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to2, "footnote_joined.data")
copy_obj.rename(self.__write_to2, self.__file)
os.remove(self.__write_to2)
os.remove(self.__footnote_holder)
| 11,073 | Python | .py | 244 | 34.127049 | 88 | 0.524188 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,303 | preamble_div.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/preamble_div.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy, list_table, override_table
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class PreambleDiv:
"""
Break the preamble into divisions.
"""
def __init__(self, in_file,
bug_handler,
copy=None,
no_namespace=None,
run_level=1,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__no_namespace = no_namespace
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Set values, including those for the dictionary.
"""
self.__all_lists = {}
self.__page = {
'margin-top' : 72,
'margin-bottom' : 72,
'margin-left' : 90,
'margin-right' : 90,
'gutter' : 0,
}
self.__cb_count = ''
self.__ob_count = ''
self.__state = 'preamble'
self.__rtf_final = ''
self.__close_group_count = ''
self.__found_font_table = 0
self.__list_table_final = ''
self.__override_table_final = ''
self.__revision_table_final = ''
self.__doc_info_table_final = ''
self.__state_dict = {
'default' : self.__default_func,
'rtf_header' : self.__rtf_head_func,
'preamble' : self.__preamble_func,
'font_table' : self.__font_table_func,
'color_table' : self.__color_table_func,
'style_sheet' : self.__style_sheet_func,
'list_table' : self.__list_table_func,
'override_table' : self.__override_table_func,
'revision_table' : self.__revision_table_func,
'doc_info' : self.__doc_info_func,
'body' : self.__body_func,
'ignore' : self.__ignore_func,
'cw<ri<rtf_______' : self.__found_rtf_head_func,
'cw<pf<par-def___' : self.__para_def_func,
'tx<nu<__________' : self.__text_func,
'cw<tb<row-def___' : self.__row_def_func,
'cw<sc<section___' : self.__new_section_func,
'cw<sc<sect-defin' : self.__new_section_func,
'cw<it<font-table' : self.__found_font_table_func,
'cw<it<colr-table' : self.__found_color_table_func,
'cw<ss<style-shet' : self.__found_style_sheet_func,
'cw<it<listtable_' : self.__found_list_table_func,
'cw<it<lovr-table' : self.__found_override_table_func,
'cw<it<revi-table' : self.__found_revision_table_func,
'cw<di<doc-info__' : self.__found_doc_info_func,
'cw<pa<margin-lef' : self.__margin_func,
'cw<pa<margin-rig' : self.__margin_func,
'cw<pa<margin-top' : self.__margin_func,
'cw<pa<margin-bot' : self.__margin_func,
'cw<pa<gutter____' : self.__margin_func,
'cw<pa<paper-widt' : self.__margin_func,
'cw<pa<paper-hght' : self.__margin_func,
# 'cw<tb<columns___' : self.__section_func,
}
self.__margin_dict = {
'margin-lef' : 'margin-left',
'margin-rig' : 'margin-right',
'margin-top' : 'margin-top',
'margin-bot' : 'margin-bottom',
'gutter____' : 'gutter',
'paper-widt' : 'paper-width',
'paper-hght' : 'paper-height',
}
self.__translate_sec = {
'columns___' : 'column',
}
self.__section = {}
# self.__write_obj.write(self.__color_table_final)
self.__color_table_final = ''
self.__style_sheet_final = ''
self.__individual_font = 0
self.__old_font = 0
self.__ob_group = 0 # depth of group
self.__font_table_final = 0
self.__list_table_obj = list_table.ListTable(
run_level=self.__run_level,
bug_handler=self.__bug_handler,
)
def __ignore_func(self, line):
"""
Ignore all lines, until the bracket is found that marks the end of
the group.
"""
if self.__ignore_num == self.__cb_count:
self.__state = self.__previous_state
def __found_rtf_head_func(self, line):
self.__state = 'rtf_header'
def __rtf_head_func(self, line):
if self.__ob_count == '0002':
self.__rtf_final = (
'mi<mk<rtfhed-beg\n' +
self.__rtf_final +
'mi<mk<rtfhed-end\n'
)
self.__state = 'preamble'
elif self.__token_info == 'tx<nu<__________' or \
self.__token_info == 'cw<pf<par-def___':
self.__state = 'body'
self.__rtf_final = (
'mi<mk<rtfhed-beg\n' +
self.__rtf_final +
'mi<mk<rtfhed-end\n'
)
self.__make_default_font_table()
self.__write_preamble()
self.__write_obj.write(line)
else:
self.__rtf_final = self.__rtf_final + line
def __make_default_font_table(self):
"""
If not font table is found, need to write one out.
"""
self.__font_table_final = 'mi<tg<open______<font-table\n'
self.__font_table_final += 'mi<mk<fonttb-beg\n'
self.__font_table_final += 'mi<mk<fontit-beg\n'
self.__font_table_final += 'cw<ci<font-style<nu<0\n'
self.__font_table_final += 'tx<nu<__________<Times;\n'
self.__font_table_final += 'mi<mk<fontit-end\n'
self.__font_table_final += 'mi<mk<fonttb-end\n'
self.__font_table_final += 'mi<tg<close_____<font-table\n'
def __make_default_color_table(self):
"""
If no color table is found, write a string for a default one
"""
self.__color_table_final = 'mi<tg<open______<color-table\n'
self.__color_table_final += 'mi<mk<clrtbl-beg\n'
self.__color_table_final += 'cw<ci<red_______<nu<00\n'
self.__color_table_final += 'cw<ci<green_____<nu<00\n'
self.__color_table_final += 'cw<ci<blue______<en<00\n'
self.__color_table_final += 'mi<mk<clrtbl-end\n'
self.__color_table_final += 'mi<tg<close_____<color-table\n'
def __make_default_style_table(self):
"""
If not font table is found, make a string for a default one
"""
"""
self.__style_sheet_final = 'mi<tg<open______<style-table\n'
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final +=
self.__style_sheet_final += 'mi<tg<close_____<style-table\n'
"""
self.__style_sheet_final = """mi<tg<open______<style-table
mi<mk<styles-beg
mi<mk<stylei-beg
cw<ci<font-style<nu<0
tx<nu<__________<Normal;
mi<mk<stylei-end
mi<mk<stylei-beg
cw<ss<char-style<nu<0
tx<nu<__________<Default Paragraph Font;
mi<mk<stylei-end
mi<mk<styles-end
mi<tg<close_____<style-table
"""
def __found_font_table_func(self, line):
if self.__found_font_table:
self.__state = 'ignore'
else:
self.__state = 'font_table'
self.__font_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
self.__found_font_table = 1
def __font_table_func(self, line):
"""
Keep adding to the self.__individual_font string until end of group
found. If a bracket is found, check that it is only one bracket deep.
If it is, then set the marker for an individual font. If it is not,
then ignore all data in this group.
cw<ci<font-style<nu<0
"""
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__font_table_final = 'mi<tg<open______<font-table\n' + \
'mi<mk<fonttb-beg\n' + self.__font_table_final
self.__font_table_final += \
'mi<mk<fonttb-end\n' + 'mi<tg<close_____<font-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__font_table_final += \
'mi<mk<fontit-beg\n'
self.__individual_font = 1
else:
# ignore
self.__previous_state = 'font_table'
self.__state = 'ignore'
self.__ignore_num = self.__ob_count
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__individual_font = 0
self.__font_table_final += \
'mi<mk<fontit-end\n'
elif self.__individual_font:
if self.__old_font and self.__token_info == 'tx<nu<__________':
if ';' in line:
self.__font_table_final += line
self.__font_table_final += 'mi<mk<fontit-end\n'
self.__individual_font = 0
else:
self.__font_table_final += line
elif self.__token_info == 'cw<ci<font-style':
self.__old_font = 1
self.__individual_font = 1
self.__font_table_final += 'mi<mk<fontit-beg\n'
self.__font_table_final += line
def __old_font_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
used for older forms of RTF:
\f3\fswiss\fcharset77 Helvetica-Oblique;\f4\fnil\fcharset77 Geneva;}
Note how each font is not divided by a bracket
"""
def __found_color_table_func(self, line):
"""
all functions that start with __found operate the same. They set the
state, initiate a string, determine the self.__close_group_count, and
set self.__cb_count to zero.
"""
self.__state = 'color_table'
self.__color_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __color_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__color_table_final = 'mi<tg<open______<color-table\n' + \
'mi<mk<clrtbl-beg\n' + self.__color_table_final
self.__color_table_final += \
'mi<mk<clrtbl-end\n' + 'mi<tg<close_____<color-table\n'
else:
self.__color_table_final += line
def __found_style_sheet_func(self, line):
self.__state = 'style_sheet'
self.__style_sheet_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __style_sheet_func(self, line):
"""
Same logic as the font_table_func.
"""
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__style_sheet_final = 'mi<tg<open______<style-table\n' + \
'mi<mk<styles-beg\n' + self.__style_sheet_final
self.__style_sheet_final += \
'mi<mk<styles-end\n' + 'mi<tg<close_____<style-table\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__style_sheet_final += \
'mi<mk<stylei-beg\n'
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__style_sheet_final += \
'mi<mk<stylei-end\n'
else:
self.__style_sheet_final += line
def __found_list_table_func(self, line):
self.__state = 'list_table'
self.__list_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __list_table_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__list_table_final, self.__all_lists =\
self.__list_table_obj.parse_list_table(
self.__list_table_final)
# sys.stderr.write(repr(all_lists))
elif self.__token_info == '':
pass
else:
self.__list_table_final += line
pass
def __found_override_table_func(self, line):
self.__override_table_obj = override_table.OverrideTable(
run_level=self.__run_level,
list_of_lists=self.__all_lists,
)
self.__state = 'override_table'
self.__override_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
# cw<it<lovr-table
def __override_table_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__override_table_final, self.__all_lists =\
self.__override_table_obj.parse_override_table(self.__override_table_final)
elif self.__token_info == '':
pass
else:
self.__override_table_final += line
def __found_revision_table_func(self, line):
self.__state = 'revision_table'
self.__revision_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __revision_table_func(self, line):
if int(self.__cb_count) == int(self.__close_group_count):
self.__state = 'preamble'
self.__revision_table_final = 'mi<tg<open______<revision-table\n' + \
'mi<mk<revtbl-beg\n' + self.__revision_table_final
self.__revision_table_final += \
'mi<mk<revtbl-end\n' + 'mi<tg<close_____<revision-table\n'
else:
self.__revision_table_final += line
def __found_doc_info_func(self, line):
self.__state = 'doc_info'
self.__doc_info_table_final = ''
self.__close_group_count = self.__ob_count
self.__cb_count = 0
def __doc_info_func(self, line):
if self.__cb_count == self.__close_group_count:
self.__state = 'preamble'
self.__doc_info_table_final = 'mi<tg<open______<doc-information\n' + \
'mi<mk<doc-in-beg\n' + self.__doc_info_table_final
self.__doc_info_table_final += \
'mi<mk<doc-in-end\n' + 'mi<tg<close_____<doc-information\n'
elif self.__token_info == 'ob<nu<open-brack':
if int(self.__ob_count) == int(self.__close_group_count) + 1:
self.__doc_info_table_final += \
'mi<mk<docinf-beg\n'
elif self.__token_info == 'cb<nu<clos-brack':
if int(self.__cb_count) == int(self.__close_group_count) + 1:
self.__doc_info_table_final += \
'mi<mk<docinf-end\n'
else:
self.__doc_info_table_final += line
def __margin_func(self, line):
"""
Handles lines that describe page info. Add the appropriate info in the
token to the self.__margin_dict dictionary.
"""
info = line[6:16]
changed = self.__margin_dict.get(info)
if changed is None:
print('woops!')
else:
self.__page[changed] = line[20:-1]
# cw<pa<margin-lef<nu<1728
def __print_page_info(self):
self.__write_obj.write('mi<tg<empty-att_<page-definition')
for key in self.__page.keys():
self.__write_obj.write(
f'<{key}>{self.__page[key]}'
)
self.__write_obj.write('\n')
# mi<tg<open-att__<footn
def __print_sec_info(self):
"""
Check if there is any section info. If so, print it out.
If not, print out an empty tag to satisfy the dtd.
"""
if len(self.__section.keys()) == 0:
self.__write_obj.write(
'mi<tg<open______<section-definition\n'
)
else:
self.__write_obj.write(
'mi<tg<open-att__<section-definition')
keys = self.__section.keys()
for key in keys:
self.__write_obj.write(
'<%s>%s' % (key, self.__section[key])
)
self.__write_obj.write('\n')
def __section_func(self, line):
"""
Add info pertaining to section to the self.__section dictionary, to be
printed out later.
"""
info = self.__translate_sec.get(line[6:16])
if info is None:
sys.stderr.write('woops!\n')
else:
self.__section[info] = 'true'
def __body_func(self, line):
self.__write_obj.write(line)
def __default_func(self, line):
# either in preamble or in body
pass
def __para_def_func(self, line):
# if self.__ob_group == 1
# this tells dept of group
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __text_func(self, line):
"""
If the cb_count is less than 1, you have hit the body
For older RTF
Newer RTF should never have to use this function
"""
if self.__cb_count == '':
cb_count = '0002'
else:
cb_count = self.__cb_count
# ignore previous lines
# should be
# if self.__ob_group == 1
# this tells dept of group
if cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __row_def_func(self, line):
# if self.__ob_group == 1
# this tells dept of group
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
self.__write_obj.write(line)
def __new_section_func(self, line):
"""
This is new. The start of a section marks the end of the preamble
"""
if self.__cb_count == '0002':
self.__state = 'body'
self.__write_preamble()
else:
sys.stderr.write('module is preamble_div\n')
sys.stderr.write('method is __new_section_func\n')
sys.stderr.write('bracket count should be 2?\n')
self.__write_obj.write(line)
def __write_preamble(self):
"""
Write all the strings, which represent all the data in the preamble.
Write a body and section beginning.
"""
if self.__no_namespace:
self.__write_obj.write(
'mi<tg<open______<doc\n'
)
else:
self.__write_obj.write(
'mi<tg<open-att__<doc<xmlns>http://rtf2xml.sourceforge.net/\n')
self.__write_obj.write('mi<tg<open______<preamble\n')
self.__write_obj.write(self.__rtf_final)
if not self.__color_table_final:
self.__make_default_color_table()
if not self.__font_table_final:
self.__make_default_font_table()
self.__write_obj.write(self.__font_table_final)
self.__write_obj.write(self.__color_table_final)
if not self.__style_sheet_final:
self.__make_default_style_table()
self.__write_obj.write(self.__style_sheet_final)
self.__write_obj.write(self.__list_table_final)
self.__write_obj.write(self.__override_table_final)
self.__write_obj.write(self.__revision_table_final)
self.__write_obj.write(self.__doc_info_table_final)
self.__print_page_info()
self.__write_obj.write('ob<nu<open-brack<0001\n')
self.__write_obj.write('ob<nu<open-brack<0002\n')
self.__write_obj.write('cb<nu<clos-brack<0002\n')
self.__write_obj.write('mi<tg<close_____<preamble\n')
self.__write_obj.write('mi<tg<open______<body\n')
# self.__write_obj.write('mi<tg<open-att__<section<num>1\n')
# self.__print_sec_info()
# self.__write_obj.write('mi<tg<open______<headers-and-footers\n')
# self.__write_obj.write('mi<mk<head_foot_<\n')
# self.__write_obj.write('mi<tg<close_____<headers-and-footers\n')
self.__write_obj.write('mi<mk<body-open_\n')
def __preamble_func(self, line):
"""
Check if the token info belongs to the dictionary. If so, take the
appropriate action.
"""
action = self.__state_dict.get(self.__token_info)
if action:
action(line)
def make_preamble_divisions(self):
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
self.__ob_group += 1
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
self.__ob_group -= 1
action = self.__state_dict.get(self.__state)
if action is None:
print(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "preamble_div.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__all_lists
| 23,011 | Python | .py | 552 | 31.751812 | 91 | 0.51443 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,304 | options_trem.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/options_trem.py | import sys
class ParseOptions:
"""
Requires:
system_string --The string from the command line
options_dict -- a dictionary with the key equal to the opition, and
a list describing that option. (See below)
Returns:
A tuple. The first item in the tuple is a dictionary containing
the arguments for each options. The second is a list of the
arguments.
If invalid options are passed to the module, 0,0 is returned.
Examples:
Your script has the option '--indents', and '--output=file'.
You want to give short option names as well:
--i and -o=file
Use this:
options_dict = {'output': [1, 'o'],
'indents': [0, 'i']
}
options_obj = ParseOptions(
system_string = sys.argv,
options_dict = options_dict
)
options, arguments = options_obj.parse_options()
print options
print arguments
The result will be:
{indents:None, output:'/home/paul/file'}, ['/home/paul/input']
"""
def __init__(self, system_string, options_dict):
self.__system_string = system_string[1:]
long_list = self.__make_long_list_func(options_dict)
# # print long_list
short_list = self.__make_short_list_func(options_dict)
# # print short_list
self.__legal_options = long_list + short_list
# # print self.__legal_options
self.__short_long_dict = self.__make_short_long_dict_func(options_dict)
# # print self.__short_long_dict
self.__opt_with_args = self.__make_options_with_arg_list(options_dict)
# # print self.__opt_with_args
self.__options_okay = 1
def __make_long_list_func(self, options_dict):
"""
Required:
options_dict -- the dictionary mapping options to a list
Returns:
a list of legal options
"""
legal_list = []
keys = options_dict.keys()
for key in keys:
key = '--' + key
legal_list.append(key)
return legal_list
def __make_short_list_func(self, options_dict):
"""
Required:
options_dict --the dictionary mapping options to a list
Returns:
a list of legal short options
"""
legal_list = []
keys = options_dict.keys()
for key in keys:
values = options_dict[key]
try:
legal_list.append('-' + values[1])
except IndexError:
pass
return legal_list
def __make_short_long_dict_func(self, options_dict):
"""
Required:
options_dict --the dictionary mapping options to a list
Returns:
a dictionary with keys of short options and values of long options
Logic:
read through the options dictionary and pair short options with long options
"""
short_long_dict = {}
keys = options_dict.keys()
for key in keys:
values = options_dict[key]
try:
short = '-' + values[1]
long = '--' + key
short_long_dict[short] = long
except IndexError:
pass
return short_long_dict
def __make_options_with_arg_list(self, options_dict):
"""
Required:
options_dict --the dictionary mapping options to a list
Returns:
a list of options that take arguments.
"""
opt_with_arg = []
keys = options_dict.keys()
for key in keys:
values = options_dict[key]
try:
if values[0]:
opt_with_arg.append('--' + key)
except IndexError:
pass
return opt_with_arg
def __sub_short_with_long(self):
"""
Required:
nothing
Returns:
a new system string
Logic:
iterate through the system string and replace short options with long options
"""
new_string = []
sub_list = self.__short_long_dict.keys()
for item in self.__system_string:
if item in sub_list:
item = self.__short_long_dict[item]
new_string.append(item)
return new_string
def __pair_arg_with_option(self):
"""
Required:
nothing
Returns
nothing (changes value of self.__system_string)
Logic:
iterate through the system string, and match arguments with options:
old_list = ['--foo', 'bar']
new_list = ['--foo=bar'
"""
opt_len = len(self.__system_string)
new_system_string = []
counter = 0
slurp_value = 0
for arg in self.__system_string:
# previous value was an option with an argument, so this arg is
# actually an argument that has already been added
counter += 1
if slurp_value:
slurp_value = 0
continue
# not an option--an argument
if arg[0] != '-':
new_system_string.append(arg)
# option and argument already paired
elif '=' in arg:
new_system_string .append(arg)
else:
# this option takes an argument
if arg in self.__opt_with_args:
# option is the last in the list
if counter + 1 > opt_len:
sys.stderr.write('option "%s" must take an argument\n' % arg)
new_system_string.append(arg)
self.__options_okay = 0
else:
# the next item in list is also an option
if self.__system_string[counter][0] == '-':
sys.stderr.write('option "%s" must take an argument\n' % arg)
new_system_string.append(arg)
self.__options_okay = 0
# the next item in the list is the argument
else:
new_system_string.append(arg + '=' + self.__system_string[counter])
slurp_value = 1
# this option does not take an argument
else:
new_system_string.append(arg)
return new_system_string
def __get_just_options(self):
"""
Requires:
nothing
Returns:
list of options
Logic:
Iterate through the self.__system string, looking for the last
option. The options are everything in the system string before the
last option.
Check to see that the options contain no arguments.
"""
highest = 0
counter = 0
found_options = 0
for item in self.__system_string:
if item[0] == '-':
highest = counter
found_options = 1
counter += 1
if found_options:
just_options = self.__system_string[:highest + 1]
arguments = self.__system_string[highest + 1:]
else:
just_options = []
arguments = self.__system_string
if found_options:
for item in just_options:
if item[0] != '-':
sys.stderr.write('%s is an argument in an option list\n' % item)
self.__options_okay = 0
return just_options, arguments
def __is_legal_option_func(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Check each value in the newly creatd options list to see if it
matches what the user describes as a legal option.
"""
illegal_options = []
for arg in self.__system_string:
if '=' in arg:
temp_list = arg.split('=')
arg = temp_list[0]
if arg not in self.__legal_options and arg[0] == '-':
illegal_options.append(arg)
if illegal_options:
self.__options_okay = 0
sys.stderr.write('The following options are not permitted:\n')
for not_legal in illegal_options:
sys.stderr.write('%s\n' % not_legal)
def __make_options_dict(self, options):
options_dict = {}
for item in options:
if '=' in item:
option, arg = item.split('=')
else:
option = item
arg = None
if option[0] == '-':
option = option[1:]
if option[0] == '-':
option = option[1:]
options_dict[option] = arg
return options_dict
def parse_options(self):
self.__system_string = self.__sub_short_with_long()
# # print 'subbed list is %s' % self.__system_string
self.__system_string = self.__pair_arg_with_option()
# # print 'list with pairing is %s' % self.__system_string
options, arguments = self.__get_just_options()
# # print 'options are %s ' % options
# # print 'arguments are %s ' % arguments
self.__is_legal_option_func()
if self.__options_okay:
options_dict = self.__make_options_dict(options)
# # print options_dict
return options_dict, arguments
else:
return 0,0
if __name__ == '__main__':
this_dict = {
'indents': [0, 'i'],
'output': [1, 'o'],
'test3': [1, 't'],
}
test_obj = ParseOptions(system_string=sys.argv,
options_dict=this_dict
)
options, the_args = test_obj.parse_options()
print(options, the_args)
"""
this_options = ['--foo', '-o']
this_opt_with_args = ['--foo']
"""
| 10,294 | Python | .py | 273 | 25.117216 | 95 | 0.505697 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,305 | old_rtf.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/old_rtf.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys
from . import open_for_read
class OldRtf:
"""
Check to see if the RTF is an older version
Logic:
If allowable control word/properties happen in text without being enclosed
in brackets the file will be considered old rtf
"""
def __init__(self, in_file,
bug_handler,
run_level,
):
"""
Required:
'file'--file to parse
'table_data' -- a dictionary for each table.
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__run_level = run_level
self.__allowable = [
'annotation' ,
'blue______' ,
'bold______',
'caps______',
'char-style' ,
'dbl-strike' ,
'emboss____',
'engrave___' ,
'font-color',
'font-down_' ,
'font-size_',
'font-style',
'font-up___',
'footnot-mk' ,
'green_____' ,
'hidden____',
'italics___',
'outline___',
'red_______',
'shadow____' ,
'small-caps',
'strike-thr',
'subscript_',
'superscrip' ,
'underlined' ,
]
self.__action_dict = {
'before_body' : self.__before_body_func,
'in_body' : self.__check_tokens_func,
'after_pard' : self.__after_pard_func,
}
def __initiate_values(self):
self.__previous_token = ''
self.__state = 'before_body'
self.__found_new = 0
self.__ob_group = 0
def __check_tokens_func(self, line):
if self.__inline_info in self.__allowable:
if self.__ob_group == self.__base_ob_count:
return 'old_rtf'
else:
self.__found_new += 1
elif self.__token_info == 'cw<pf<par-def___':
self.__state = 'after_pard'
def __before_body_func(self, line):
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'in_body'
self.__base_ob_count = self.__ob_group
def __after_pard_func(self, line):
if line[0:2] != 'cw':
self.__state = 'in_body'
def check_if_old_rtf(self):
"""
Requires:
nothing
Returns:
True if file is older RTf
False if file is newer RTF
"""
self.__initiate_values()
line_num = 0
with open_for_read(self.__file) as read_obj:
for line in read_obj:
line_num += 1
self.__token_info = line[:16]
if self.__token_info == 'mi<mk<body-close':
return False
if self.__token_info == 'ob<nu<open-brack':
self.__ob_group += 1
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__ob_group -= 1
self.__cb_count = line[-5:-1]
self.__inline_info = line[6:16]
if self.__state == 'after_body':
return False
action = self.__action_dict.get(self.__state)
if action is None:
try:
sys.stderr.write('No action for this state!\n')
except:
pass
result = action(line)
if result == 'new_rtf':
return False
elif result == 'old_rtf':
if self.__run_level > 3:
sys.stderr.write(
'Old rtf construction {} (bracket {}, line {})\n'.format(
self.__inline_info, str(self.__ob_group), line_num)
)
return True
self.__previous_token = line[6:16]
return False
| 5,163 | Python | .py | 134 | 26.507463 | 85 | 0.408367 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,306 | ParseRtf.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/ParseRtf.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
# $Revision: 1.41 $
# $Date: 2006/03/24 23:50:07 $
import os
import sys
from calibre.ebooks.rtf2xml import (
add_brackets,
body_styles,
check_brackets,
check_encoding,
colors,
combine_borders,
convert_to_tags,
copy,
default_encoding,
delete_info,
fields_large,
fields_small,
fonts,
footnote,
group_borders,
group_styles,
header,
headings_to_sections,
hex_2_utf8,
info,
inline,
line_endings,
list_numbers,
make_lists,
output,
paragraph_def,
paragraphs,
pict,
preamble_div,
preamble_rest,
process_tokens,
sections,
styles,
table,
table_info,
tokenize,
)
from calibre.ebooks.rtf2xml.old_rtf import OldRtf
from . import open_for_read, open_for_write
"""
Here is an example script using the ParseRTF module directly
#!/usr/bin/env python
def Handle_Main():
# Handles options and creates a parse object
parse_obj =ParseRtf.ParseRtf(
in_file = 'in.rtf',
# All values from here on are optional
# determine the output file
out_file = 'out.xml',
# determine the run level. The default is 1.
run_level = 3,
# The name of a debug directory, if you are running at
# run level 3 or higher.
debug = 'debug_dir',
# Convert RTF caps to real caps.
# Default is 1.
convert_caps = 1,
# Indent resulting XML.
# Default is 0 (no indent).
indent = 1,
# Form lists from RTF. Default is 1.
form_lists = 1,
# Convert headings to sections. Default is 0.
headings_to_sections = 1,
# Group paragraphs with the same style name. Default is 1.
group_styles = 1,
# Group borders. Default is 1.
group_borders = 1,
# Write or do not write paragraphs. Default is 0.
empty_paragraphs = 0,
# Allow to use a custom default encoding as fallback
default_encoding = 'cp1252',
)
try:
parse_obj.parse_rtf()
except ParseRtf.InvalidRtfException, msg:
sys.stderr.write(msg)
except ParseRtf.RtfInvalidCodeException, msg:
sys.stderr.write(msg)
"""
class InvalidRtfException(Exception):
"""
handle invalid RTF
"""
pass
class RtfInvalidCodeException(Exception):
"""
handle bugs in program
"""
pass
class ParseRtf:
"""
Main class for controlling the rest of the parsing.
"""
def __init__(self,
in_file,
out_file='',
out_dir=None,
dtd='',
deb_dir=None,
convert_symbol=None,
convert_wingdings=None,
convert_zapf=None,
convert_caps=None,
run_level=1,
indent=None,
replace_illegals=1,
form_lists=1,
headings_to_sections=1,
group_styles=1,
group_borders=1,
empty_paragraphs=1,
no_dtd=0,
char_data='',
default_encoding='cp1252',
):
"""
Requires:
'file' --file to parse
'char_data' --file containing character maps
'dtd' --path to dtd
Possible parameters, but not necessary:
'output' --a file to output the parsed file. (Default is standard
output.)
'temp_dir' --directory for temporary output (If not provided, the
script tries to output to directory where is script is executed.)
'deb_dir' --debug directory. If a debug_dir is provided, the script
will copy each run through as a file to examine in the debug_dir
'check_brackets' -- make sure the brackets match up after each run
through a file. Only for debugging.
Returns: Nothing
"""
self.__file = in_file
self.__out_file = out_file
self.__out_dir = out_dir
self.__temp_dir = out_dir
self.__dtd_path = dtd
self.__check_file(in_file,"file_to_parse")
self.__char_data = char_data
self.__debug_dir = deb_dir
self.__check_dir(self.__temp_dir)
self.__copy = self.__check_dir(self.__debug_dir)
self.__convert_caps = convert_caps
self.__convert_symbol = convert_symbol
self.__convert_wingdings = convert_wingdings
self.__convert_zapf = convert_zapf
self.__run_level = run_level
self.__exit_level = 0
self.__indent = indent
self.__replace_illegals = replace_illegals
self.__form_lists = form_lists
self.__headings_to_sections = headings_to_sections
self.__group_styles = group_styles
self.__group_borders = group_borders
self.__empty_paragraphs = empty_paragraphs
self.__no_dtd = no_dtd
self.__default_encoding = default_encoding
def __check_file(self, the_file, type):
"""Check to see if files exist"""
if hasattr(the_file, 'read'):
return
if the_file is None:
if type == "file_to_parse":
msg = "\nYou must provide a file for the script to work"
raise RtfInvalidCodeException(msg)
elif os.path.exists(the_file):
pass # do nothing
else:
msg = "\nThe file '%s' cannot be found" % the_file
raise RtfInvalidCodeException(msg)
def __check_dir(self, the_dir):
"""Check to see if directory exists"""
if not the_dir :
return
dir_exists = os.path.isdir(the_dir)
if not dir_exists:
msg = "\n%s is not a directory" % the_dir
raise RtfInvalidCodeException(msg)
return 1
def parse_rtf(self):
"""
Parse the file by calling on other classes.
Requires:
Nothing
Returns:
A parsed file in XML, either to standard output or to a file,
depending on the value of 'output' when the instance was created.
"""
self.__temp_file = self.__make_temp_file(self.__file)
# if the self.__deb_dir is true, then create a copy object,
# set the directory to write to, remove files, and copy
# the new temporary file to this directory
if self.__debug_dir:
copy_obj = copy.Copy(
bug_handler=RtfInvalidCodeException,
)
copy_obj.set_dir(self.__debug_dir)
copy_obj.remove_files()
copy_obj.copy_file(self.__temp_file, "original_file")
# Function to check if bracket are well handled
if self.__debug_dir or self.__run_level > 2:
self.__check_brack_obj = check_brackets.CheckBrackets(
file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
)
# convert Macintosh and Windows line endings to Unix line endings
# why do this if you don't wb after?
line_obj = line_endings.FixLineEndings(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
replace_illegals=self.__replace_illegals,
)
return_value = line_obj.fix_endings() # calibre return what?
self.__return_code(return_value)
tokenize_obj = tokenize.Tokenize(
bug_handler=RtfInvalidCodeException,
in_file=self.__temp_file,
copy=self.__copy,
run_level=self.__run_level)
tokenize_obj.tokenize()
process_tokens_obj = process_tokens.ProcessTokens(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
exception_handler=InvalidRtfException,
)
try:
return_value = process_tokens_obj.process_tokens()
except InvalidRtfException as msg:
# Check to see if the file is correctly encoded
encode_obj = default_encoding.DefaultEncoding(
in_file=self.__temp_file,
run_level=self.__run_level,
bug_handler=RtfInvalidCodeException,
check_raw=True,
default_encoding=self.__default_encoding,
)
platform, code_page, default_font_num = encode_obj.find_default_encoding()
check_encoding_obj = check_encoding.CheckEncoding(
bug_handler=RtfInvalidCodeException,
)
enc = encode_obj.get_codepage()
# TODO: to check if cp is a good idea or if I should use a dict to convert
enc = 'cp' + enc
msg = '%s\nException in token processing' % str(msg)
if check_encoding_obj.check_encoding(self.__file, enc):
file_name = self.__file if isinstance(self.__file, bytes) \
else self.__file.encode('utf-8')
msg +='\nFile %s does not appear to be correctly encoded.\n' % file_name
try:
os.remove(self.__temp_file)
except OSError:
pass
raise InvalidRtfException(msg)
delete_info_obj = delete_info.DeleteInfo(
in_file=self.__temp_file,
copy=self.__copy,
bug_handler=RtfInvalidCodeException,
run_level=self.__run_level,)
# found destination means {\*\destination
# if found, the RTF should be newer RTF
found_destination = delete_info_obj.delete_info()
self.__bracket_match('delete_data_info')
# put picts in a separate file
pict_obj = pict.Pict(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
orig_file=self.__file,
out_file=self.__out_file,
run_level=self.__run_level,
)
pict_obj.process_pict()
self.__bracket_match('pict_data_info')
combine_obj = combine_borders.CombineBorders(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,)
combine_obj.combine_borders()
self.__bracket_match('combine_borders_info')
footnote_obj = footnote.Footnote(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
footnote_obj.separate_footnotes()
self.__bracket_match('separate_footnotes_info')
header_obj = header.Header(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
header_obj.separate_headers()
self.__bracket_match('separate_headers_info')
list_numbers_obj = list_numbers.ListNumbers(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
list_numbers_obj.fix_list_numbers()
self.__bracket_match('list_number_info')
preamble_div_obj = preamble_div.PreambleDiv(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
list_of_lists = preamble_div_obj.make_preamble_divisions()
self.__bracket_match('make_preamble_divisions')
encode_obj = default_encoding.DefaultEncoding(
in_file=self.__temp_file,
run_level=self.__run_level,
bug_handler=RtfInvalidCodeException,
default_encoding=self.__default_encoding,
)
platform, code_page, default_font_num = encode_obj.find_default_encoding()
hex2utf_obj = hex_2_utf8.Hex2Utf8(
in_file=self.__temp_file,
copy=self.__copy,
area_to_convert='preamble',
char_file=self.__char_data,
default_char_map=code_page,
run_level=self.__run_level,
bug_handler=RtfInvalidCodeException,
invalid_rtf_handler=InvalidRtfException,
)
hex2utf_obj.convert_hex_2_utf8()
self.__bracket_match('hex_2_utf_preamble')
fonts_obj = fonts.Fonts(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
default_font_num=default_font_num,
run_level=self.__run_level,
)
special_font_dict = fonts_obj.convert_fonts()
self.__bracket_match('fonts_info')
color_obj = colors.Colors(
in_file=self.__temp_file,
copy=self.__copy,
bug_handler=RtfInvalidCodeException,
run_level=self.__run_level,
)
color_obj.convert_colors()
self.__bracket_match('colors_info')
style_obj = styles.Styles(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
style_obj.convert_styles()
self.__bracket_match('styles_info')
info_obj = info.Info(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
info_obj.fix_info()
default_font = special_font_dict.get('default-font')
preamble_rest_obj = preamble_rest.Preamble(
file=self.__temp_file, copy=self.__copy,
bug_handler=RtfInvalidCodeException,
platform=platform, default_font=default_font,
code_page=code_page)
preamble_rest_obj.fix_preamble()
self.__bracket_match('preamble_rest_info')
old_rtf_obj = OldRtf(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
run_level=self.__run_level,
)
# RTF can actually have destination groups and old RTF.
# BAH!
old_rtf = old_rtf_obj.check_if_old_rtf()
if old_rtf:
if self.__run_level > 5:
msg = 'Older RTF\n' \
'self.__run_level is "%s"\n' % self.__run_level
raise RtfInvalidCodeException(msg)
if self.__run_level > 1:
sys.stderr.write('File could be older RTF...\n')
if found_destination:
if self.__run_level > 1:
sys.stderr.write(
'File also has newer RTF.\n'
'Will do the best to convert...\n'
)
add_brackets_obj = add_brackets.AddBrackets(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,
)
add_brackets_obj.add_brackets()
fields_small_obj = fields_small.FieldsSmall(
in_file=self.__temp_file,
copy=self.__copy,
bug_handler=RtfInvalidCodeException,
run_level=self.__run_level,)
fields_small_obj.fix_fields()
self.__bracket_match('fix_small_fields_info')
fields_large_obj = fields_large.FieldsLarge(
in_file=self.__temp_file,
copy=self.__copy,
bug_handler=RtfInvalidCodeException,
run_level=self.__run_level)
fields_large_obj.fix_fields()
self.__bracket_match('fix_large_fields_info')
sections_obj = sections.Sections(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,)
sections_obj.make_sections()
self.__bracket_match('sections_info')
paragraphs_obj = paragraphs.Paragraphs(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
write_empty_para=self.__empty_paragraphs,
run_level=self.__run_level,)
paragraphs_obj.make_paragraphs()
self.__bracket_match('paragraphs_info')
default_font = special_font_dict['default-font']
paragraph_def_obj = paragraph_def.ParagraphDef(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
default_font=default_font,
run_level=self.__run_level,)
list_of_styles = paragraph_def_obj.make_paragraph_def()
body_styles_obj = body_styles.BodyStyles(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
list_of_styles=list_of_styles,
run_level=self.__run_level,)
body_styles_obj.insert_info()
self.__bracket_match('body_styles_info')
self.__bracket_match('paragraph_def_info')
table_obj = table.Table(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,)
table_data = table_obj.make_table()
self.__bracket_match('table_info')
table_info_obj = table_info.TableInfo(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
table_data=table_data,
run_level=self.__run_level,)
table_info_obj.insert_info()
self.__bracket_match('table__data_info')
if self.__form_lists:
make_list_obj = make_lists.MakeLists(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
headings_to_sections=self.__headings_to_sections,
run_level=self.__run_level,
list_of_lists=list_of_lists,
)
make_list_obj.make_lists()
self.__bracket_match('form_lists_info')
if self.__headings_to_sections:
headings_to_sections_obj = headings_to_sections.HeadingsToSections(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,)
headings_to_sections_obj.make_sections()
self.__bracket_match('headings_to_sections_info')
if self.__group_styles:
group_styles_obj = group_styles.GroupStyles(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
wrap=1,
run_level=self.__run_level,)
group_styles_obj.group_styles()
self.__bracket_match('group_styles_info')
if self.__group_borders:
group_borders_obj = group_borders.GroupBorders(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
wrap=1,
run_level=self.__run_level,)
group_borders_obj.group_borders()
self.__bracket_match('group_borders_info')
inline_obj = inline.Inline(
in_file=self.__temp_file,
bug_handler=RtfInvalidCodeException,
copy=self.__copy,
run_level=self.__run_level,)
inline_obj.form_tags()
self.__bracket_match('inline_info')
hex2utf_obj.update_values(file=self.__temp_file,
area_to_convert='body',
copy=self.__copy,
char_file=self.__char_data,
convert_caps=self.__convert_caps,
convert_symbol=self.__convert_symbol,
convert_wingdings=self.__convert_wingdings,
convert_zapf=self.__convert_zapf,
symbol=1,
wingdings=1,
dingbats=1,
)
hex2utf_obj.convert_hex_2_utf8()
header_obj.join_headers()
footnote_obj.join_footnotes()
tags_obj = convert_to_tags.ConvertToTags(
in_file=self.__temp_file,
copy=self.__copy,
dtd_path=self.__dtd_path,
indent=self.__indent,
run_level=self.__run_level,
no_dtd=self.__no_dtd,
encoding=encode_obj.get_codepage(),
bug_handler=RtfInvalidCodeException,
)
tags_obj.convert_to_tags()
output_obj = output.Output(
file=self.__temp_file,
orig_file=self.__file,
output_dir=self.__out_dir,
out_file=self.__out_file,
)
output_obj.output()
os.remove(self.__temp_file)
return self.__exit_level
def __bracket_match(self, file_name):
if self.__run_level > 2:
good_br, msg = self.__check_brack_obj.check_brackets()
if good_br:
pass
# sys.stderr.write( msg + ' in ' + file_name + "\n")
else:
msg = f'{msg} in file {file_name}'
print(msg, file=sys.stderr)
def __return_code(self, num):
if num is None:
return
if int(num) > self.__exit_level:
self.__exit_level = num
def __make_temp_file(self,file):
"""Make a temporary file to parse"""
write_file="rtf_write_file"
read_obj = file if hasattr(file, 'read') else open_for_read(file)
with open_for_write(write_file) as write_obj:
for line in read_obj:
write_obj.write(line)
return write_file
| 23,251 | Python | .py | 582 | 28.245704 | 88 | 0.541433 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,307 | hex_2_utf8.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/hex_2_utf8.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import io
import os
import sys
from calibre.ebooks.rtf2xml import copy, get_char_map
from calibre.ebooks.rtf2xml.char_set import char_set
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Hex2Utf8:
"""
Convert Microsoft hexadecimal numbers to utf-8
"""
def __init__(self,
in_file,
area_to_convert,
char_file,
default_char_map,
bug_handler,
invalid_rtf_handler,
copy=None,
temp_dir=None,
symbol=None,
wingdings=None,
caps=None,
convert_caps=None,
dingbats=None,
run_level=1,
):
"""
Required:
'file'
'area_to_convert'--the area of file to convert
'char_file'--the file containing the character mappings
'default_char_map'--name of default character map
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
'symbol'--whether to load the symbol character map
'winddings'--whether to load the wingdings character map
'caps'--whether to load the caps character map
'convert_to_caps'--wether to convert caps to utf-8
Returns:
nothing
"""
self.__file = in_file
self.__copy = copy
if area_to_convert not in ('preamble', 'body'):
msg = (
'Developer error! Wrong flag.\n'
'in module "hex_2_utf8.py\n'
'"area_to_convert" must be "body" or "preamble"\n'
)
raise self.__bug_handler(msg)
self.__char_file = char_file
self.__area_to_convert = area_to_convert
self.__default_char_map = default_char_map
self.__symbol = symbol
self.__wingdings = wingdings
self.__dingbats = dingbats
self.__caps = caps
self.__convert_caps = 0
self.__convert_symbol = 0
self.__convert_wingdings = 0
self.__convert_zapf = 0
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__bug_handler = bug_handler
self.__invalid_rtf_handler = invalid_rtf_handler
def update_values(self,
file,
area_to_convert,
char_file,
convert_caps,
convert_symbol,
convert_wingdings,
convert_zapf,
copy=None,
temp_dir=None,
symbol=None,
wingdings=None,
caps=None,
dingbats=None,
):
"""
Required:
'file'
'area_to_convert'--the area of file to convert
'char_file'--the file containing the character mappings
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
'symbol'--whether to load the symbol character map
'winddings'--whether to load the wingdings character map
'caps'--whether to load the caps character map
'convert_to_caps'--wether to convert caps to utf-8
Returns:
nothing
"""
self.__file=file
self.__copy = copy
if area_to_convert not in ('preamble', 'body'):
msg = (
'in module "hex_2_utf8.py\n'
'"area_to_convert" must be "body" or "preamble"\n'
)
raise self.__bug_handler(msg)
self.__area_to_convert = area_to_convert
self.__symbol = symbol
self.__wingdings = wingdings
self.__dingbats = dingbats
self.__caps = caps
self.__convert_caps = convert_caps
self.__convert_symbol = convert_symbol
self.__convert_wingdings = convert_wingdings
self.__convert_zapf = convert_zapf
# new!
# no longer try to convert these
# self.__convert_symbol = 0
# self.__convert_wingdings = 0
# self.__convert_zapf = 0
def __initiate_values(self):
"""
Required:
Nothing
Set values, including those for the dictionaries.
The file that contains the maps is broken down into many different
sets. For example, for the Symbol font, there is the standard part for
hexadecimal numbers, and the part for Microsoft characters. Read
each part in, and then combine them.
"""
# the default encoding system, the lower map for characters 0 through
# 128, and the encoding system for Microsoft characters.
# New on 2004-05-8: the self.__char_map is not in directory with other
# modules
self.__char_file = io.StringIO(char_set)
char_map_obj = get_char_map.GetCharMap(
char_file=self.__char_file,
bug_handler=self.__bug_handler,
)
up_128_dict = char_map_obj.get_char_map(map=self.__default_char_map)
bt_128_dict = char_map_obj.get_char_map(map='bottom_128')
ms_standard_dict = char_map_obj.get_char_map(map='ms_standard')
self.__def_dict = {}
self.__def_dict.update(up_128_dict)
self.__def_dict.update(bt_128_dict)
self.__def_dict.update(ms_standard_dict)
self.__current_dict = self.__def_dict
self.__current_dict_name = 'default'
self.__in_caps = 0
self.__special_fonts_found = 0
if self.__symbol:
symbol_base_dict = char_map_obj.get_char_map(map='SYMBOL')
ms_symbol_dict = char_map_obj.get_char_map(map='ms_symbol')
self.__symbol_dict = {}
self.__symbol_dict.update(symbol_base_dict)
self.__symbol_dict.update(ms_symbol_dict)
if self.__wingdings:
wingdings_base_dict = char_map_obj.get_char_map(map='wingdings')
ms_wingdings_dict = char_map_obj.get_char_map(map='ms_wingdings')
self.__wingdings_dict = {}
self.__wingdings_dict.update(wingdings_base_dict)
self.__wingdings_dict.update(ms_wingdings_dict)
if self.__dingbats:
dingbats_base_dict = char_map_obj.get_char_map(map='dingbats')
ms_dingbats_dict = char_map_obj.get_char_map(map='ms_dingbats')
self.__dingbats_dict = {}
self.__dingbats_dict.update(dingbats_base_dict)
self.__dingbats_dict.update(ms_dingbats_dict)
# load dictionary for caps, and make a string for the replacement
self.__caps_uni_dict = char_map_obj.get_char_map(map='caps_uni')
# # print self.__caps_uni_dict
# don't think I'll need this
# keys = self.__caps_uni_dict.keys()
# self.__caps_uni_replace = '|'.join(keys)
self.__preamble_state_dict = {
'preamble' : self.__preamble_func,
'body' : self.__body_func,
'mi<mk<body-open_' : self.__found_body_func,
'tx<hx<__________' : self.__hex_text_func,
}
self.__body_state_dict = {
'preamble' : self.__preamble_for_body_func,
'body' : self.__body_for_body_func,
}
self.__in_body_dict = {
'mi<mk<body-open_' : self.__found_body_func,
'tx<ut<__________' : self.__utf_to_caps_func,
'tx<hx<__________' : self.__hex_text_func,
'tx<mc<__________' : self.__hex_text_func,
'tx<nu<__________' : self.__text_func,
'mi<mk<font______' : self.__start_font_func,
'mi<mk<caps______' : self.__start_caps_func,
'mi<mk<font-end__' : self.__end_font_func,
'mi<mk<caps-end__' : self.__end_caps_func,
}
self.__caps_list = ['false']
self.__font_list = ['not-defined']
def __hex_text_func(self, line):
"""
Required:
'line' -- the line
Logic:
get the hex_num and look it up in the default dictionary. If the
token is in the dictionary, then check if the value starts with a
"&". If it does, then tag the result as utf text. Otherwise, tag it
as normal text.
If the hex_num is not in the dictionary, then a mistake has been
made.
"""
hex_num = line[17:-1]
converted = self.__current_dict.get(hex_num)
if converted is not None:
# tag as utf-8
if converted[0:1] == "&":
font = self.__current_dict_name
if self.__convert_caps\
and self.__caps_list[-1] == 'true'\
and font not in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
converted = self.__utf_token_to_caps_func(converted)
self.__write_obj.write(
'tx<ut<__________<%s\n' % converted
)
# tag as normal text
else:
font = self.__current_dict_name
if self.__convert_caps\
and self.__caps_list[-1] == 'true'\
and font not in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
converted = converted.upper()
self.__write_obj.write(
'tx<nu<__________<%s\n' % converted
)
# error
else:
token = hex_num.replace("'", '')
the_num = 0
if token:
the_num = int(token, 16)
if the_num > 10:
self.__write_obj.write('mi<tg<empty-att_<udef_symbol<num>%s<description>not-in-table\n' %
hex_num)
if self.__run_level > 4:
# msg = 'no dictionary entry for %s\n'
# msg += 'the hexadecimal num is "%s"\n' % (hex_num)
# msg += 'dictionary is %s\n' % self.__current_dict_name
msg = 'Character "&#x%s;" does not appear to be valid (or is a control character)\n' % token
raise self.__bug_handler(msg)
def __found_body_func(self, line):
self.__state = 'body'
self.__write_obj.write(line)
def __body_func(self, line):
"""
When parsing preamble
"""
self.__write_obj.write(line)
def __preamble_func(self, line):
action = self.__preamble_state_dict.get(self.__token_info)
if action is not None:
action(line)
else:
self.__write_obj.write(line)
def __convert_preamble(self):
self.__state = 'preamble'
with open_for_write(self.__write_to) as self.__write_obj:
with open_for_read(self.__file) as read_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__preamble_state_dict.get(self.__state)
if action is None:
sys.stderr.write('error no state found in hex_2_utf8',
self.__state
)
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "preamble_utf_convert.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
def __preamble_for_body_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
Used when parsing the body.
"""
if self.__token_info == 'mi<mk<body-open_':
self.__found_body_func(line)
self.__write_obj.write(line)
def __body_for_body_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
Used when parsing the body.
"""
action = self.__in_body_dict.get(self.__token_info)
if action is not None:
action(line)
else:
self.__write_obj.write(line)
def __start_font_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
add font face to font_list
"""
face = line[17:-1]
self.__font_list.append(face)
if face == 'Symbol' and self.__convert_symbol:
self.__current_dict_name = 'Symbol'
self.__current_dict = self.__symbol_dict
elif face == 'Wingdings' and self.__convert_wingdings:
self.__current_dict_name = 'Wingdings'
self.__current_dict = self.__wingdings_dict
elif face == 'Zapf Dingbats' and self.__convert_zapf:
self.__current_dict_name = 'Zapf Dingbats'
self.__current_dict = self.__dingbats_dict
else:
self.__current_dict_name = 'default'
self.__current_dict = self.__def_dict
def __end_font_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
pop font_list
"""
if len(self.__font_list) > 1:
self.__font_list.pop()
else:
sys.stderr.write('module is hex_2_utf8\n')
sys.stderr.write('method is end_font_func\n')
sys.stderr.write('self.__font_list should be greater than one?\n')
face = self.__font_list[-1]
if face == 'Symbol' and self.__convert_symbol:
self.__current_dict_name = 'Symbol'
self.__current_dict = self.__symbol_dict
elif face == 'Wingdings' and self.__convert_wingdings:
self.__current_dict_name = 'Wingdings'
self.__current_dict = self.__wingdings_dict
elif face == 'Zapf Dingbats' and self.__convert_zapf:
self.__current_dict_name = 'Zapf Dingbats'
self.__current_dict = self.__dingbats_dict
else:
self.__current_dict_name = 'default'
self.__current_dict = self.__def_dict
def __start_special_font_func_old(self, line):
"""
Required:
line -- line
Returns;
nothing
Logic:
change the dictionary to use in conversion
"""
# for error checking
if self.__token_info == 'mi<mk<font-symbo':
self.__current_dict.append(self.__symbol_dict)
self.__special_fonts_found += 1
self.__current_dict_name = 'Symbol'
elif self.__token_info == 'mi<mk<font-wingd':
self.__special_fonts_found += 1
self.__current_dict.append(self.__wingdings_dict)
self.__current_dict_name = 'Wingdings'
elif self.__token_info == 'mi<mk<font-dingb':
self.__current_dict.append(self.__dingbats_dict)
self.__special_fonts_found += 1
self.__current_dict_name = 'Zapf Dingbats'
def __end_special_font_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
pop the last dictionary, which should be a special font
"""
if len(self.__current_dict) < 2:
sys.stderr.write('module is hex_2_utf 8\n')
sys.stderr.write('method is __end_special_font_func\n')
sys.stderr.write('less than two dictionaries --can\'t pop\n')
self.__special_fonts_found -= 1
else:
self.__current_dict.pop()
self.__special_fonts_found -= 1
self.__dict_name = 'default'
def __start_caps_func_old(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
A marker that marks the start of caps has been found. Set
self.__in_caps to 1
"""
self.__in_caps = 1
def __start_caps_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
A marker that marks the start of caps has been found. Set
self.__in_caps to 1
"""
self.__in_caps = 1
value = line[17:-1]
self.__caps_list.append(value)
def __end_caps_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
A marker that marks the end of caps has been found.
set self.__in_caps to 0
"""
if len(self.__caps_list) > 1:
self.__caps_list.pop()
else:
sys.stderr.write('Module is hex_2_utf8\n'
'method is __end_caps_func\n'
'caps list should be more than one?\n') # self.__in_caps not set
def __text_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
if in caps, convert. Otherwise, print out.
"""
text = line[17:-1]
# print line
if self.__current_dict_name in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
the_string = ''
for letter in text:
hex_num = hex(ord(letter))
hex_num = str(hex_num)
hex_num = hex_num.upper()
hex_num = hex_num[2:]
hex_num = '\'%s' % hex_num
converted = self.__current_dict.get(hex_num)
if converted is None:
sys.stderr.write('module is hex_2_ut8\nmethod is __text_func\n')
sys.stderr.write('no hex value for "%s"\n' % hex_num)
else:
the_string += converted
self.__write_obj.write('tx<nu<__________<%s\n' % the_string)
# print the_string
else:
if self.__caps_list[-1] == 'true' \
and self.__convert_caps\
and self.__current_dict_name not in ('Symbol', 'Wingdings', 'Zapf Dingbats'):
text = text.upper()
self.__write_obj.write('tx<nu<__________<%s\n' % text)
def __utf_to_caps_func(self, line):
"""
Required:
line -- line to parse
returns
nothing
Logic
Get the text, and use another method to convert
"""
utf_text = line[17:-1]
if self.__caps_list[-1] == 'true' and self.__convert_caps:
# utf_text = utf_text.upper()
utf_text = self.__utf_token_to_caps_func(utf_text)
self.__write_obj.write('tx<ut<__________<%s\n' % utf_text)
def __utf_token_to_caps_func(self, char_entity):
"""
Required:
utf_text -- such as &xxx;
Returns:
token converted to the capital equivalent
Logic:
RTF often stores text in the improper values. For example, a
capital umlaut o (?), is stores as ?. This function swaps the
case by looking up the value in a dictionary.
"""
hex_num = char_entity[3:]
length = len(hex_num)
if length == 3:
hex_num = '00%s' % hex_num
elif length == 4:
hex_num = '0%s' % hex_num
new_char_entity = '&#x%s' % hex_num
converted = self.__caps_uni_dict.get(new_char_entity)
if not converted:
# bullets and other entities don't have capital equivalents
return char_entity
else:
return converted
def __convert_body(self):
self.__state = 'body'
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__body_state_dict.get(self.__state)
if action is None:
sys.stderr.write('error no state found in hex_2_utf8',
self.__state
)
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "body_utf_convert.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
def convert_hex_2_utf8(self):
self.__initiate_values()
if self.__area_to_convert == 'preamble':
self.__convert_preamble()
else:
self.__convert_body()
"""
how to swap case for non-capitals
my_string.swapcase()
An example of how to use a hash for the caps function
(but I shouldn't need this, since utf text is separate
from regular text?)
sub_dict = {
"а" : "some other value"
}
def my_sub_func(matchobj):
info = matchobj.group(0)
value = sub_dict.get(info)
return value
return "f"
line = "а more text"
reg_exp = re.compile(r'(?P<name>а|б)')
line2 = re.sub(reg_exp, my_sub_func, line)
print line2
"""
| 22,374 | Python | .py | 561 | 28.627451 | 112 | 0.511636 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,308 | paragraph_def.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/paragraph_def.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import border_parse, copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class ParagraphDef:
"""
=================
Purpose
=================
Write paragraph definition tags.
States:
1. before_1st_para_def.
Before any para_def token is found. This means all the text in the preamble.
Look for the token 'cw<pf<par-def___'. This will changet the state to collect_tokens.
2. collect_tokens.
Found a paragraph_def. Need to get all tokens.
Change with start of a paragrph ('mi<mk<para-start'). State then becomes
in_paragraphs
If another paragraph definition is found, the state does not change.
But the dictionary is reset.
3. in_paragraphs
State changes when 'mi<mk<para-end__', or end of paragraph is found.
State then becomes 'self.__state = 'after_para_end'
4. after_para_end
If 'mi<mk<para-start' (the start of a paragraph) or 'mi<mk<para-end__' (the end of a paragraph--must be empty paragraph?) are found:
state changes to 'in_paragraphs'
If 'cw<pf<par-def___' (paragraph_definition) is found:
state changes to collect_tokens
if 'mi<mk<body-close', 'mi<mk<par-in-fld',
'cw<tb<cell______','cw<tb<row-def___','cw<tb<row_______',
'mi<mk<sect-close', 'mi<mk<header-beg', 'mi<mk<header-end'
are found. (All these tokens mark the start of a bigger element. para_def must
be closed:
state changes to 'after_para_def'
5. after_para_def
'mi<mk<para-start' changes state to in_paragraphs
if another paragraph_def is found, the state changes to collect_tokens.
"""
def __init__(self,
in_file,
bug_handler,
default_font,
copy=None,
run_level=1,):
"""
Required:
'file'--file to parse
'default_font' --document default font
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__default_font = default_font
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
# Dictionary needed to convert shortened style names to readable names
self.__token_dict={
# paragraph formatting => pf
'par-end___' : 'para',
'par-def___' : 'paragraph-definition',
'keep-w-nex' : 'keep-with-next',
'widow-cntl' : 'widow-control',
'adjust-rgt' : 'adjust-right',
'language__' : 'language',
'right-inde' : 'right-indent',
'fir-ln-ind' : 'first-line-indent',
'left-inden' : 'left-indent',
'space-befo' : 'space-before',
'space-afte' : 'space-after',
'line-space' : 'line-spacing',
'default-ta' : 'default-tab',
'align_____' : 'align',
'widow-cntr' : 'widow-control',
# stylesheet = > ss
'style-shet' : 'stylesheet',
'based-on__' : 'based-on-style',
'next-style' : 'next-style',
'char-style' : 'character-style',
# this is changed to get a nice attribute
'para-style' : 'name',
# graphics => gr
'picture___' : 'pict',
'obj-class_' : 'obj_class',
'mac-pic___' : 'mac-pict',
# section => sc
'section___' : 'section-new',
'sect-defin' : 'section-reset',
'sect-note_' : 'endnotes-in-section',
# list=> ls
'list-text_' : 'list-text',
'list______' : 'list',
'list-lev-d' : 'list-level-definition',
'list-cardi' : 'list-cardinal-numbering',
'list-decim' : 'list-decimal-numbering',
'list-up-al' : 'list-uppercase-alphabetic-numbering',
'list-up-ro' : 'list-uppercae-roman-numbering',
'list-ord__' : 'list-ordinal-numbering',
'list-ordte' : 'list-ordinal-text-numbering',
'list-bulli' : 'list-bullet',
'list-simpi' : 'list-simple',
'list-conti' : 'list-continue',
'list-hang_' : 'list-hang',
# 'list-tebef' : 'list-text-before',
# 'list-level' : 'level',
'list-id___' : 'list-id',
'list-start' : 'list-start',
'nest-level' : 'nest-level',
# duplicate
'list-level' : 'list-level',
# notes => nt
'footnote__' : 'footnote',
'type______' : 'type',
# anchor => an
'toc_______' : 'anchor-toc',
'book-mk-st' : 'bookmark-start',
'book-mk-en' : 'bookmark-end',
'index-mark' : 'anchor-index',
'place_____' : 'place',
# field => fd
'field_____' : 'field',
'field-inst' : 'field-instruction',
'field-rslt' : 'field-result',
'datafield_' : 'data-field',
# info-tables => it
'font-table' : 'font-table',
'colr-table' : 'color-table',
'lovr-table' : 'list-override-table',
'listtable_' : 'list-table',
'revi-table' : 'revision-table',
# character info => ci
'hidden____' : 'hidden',
'italics___' : 'italics',
'bold______' : 'bold',
'strike-thr' : 'strike-through',
'shadow____' : 'shadow',
'outline___' : 'outline',
'small-caps' : 'small-caps',
'caps______' : 'caps',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'subscript_' : 'subscript',
'superscrip' : 'superscipt',
'font-style' : 'font-style',
'font-color' : 'font-color',
'font-size_' : 'font-size',
'font-up___' : 'superscript',
'font-down_' : 'subscript',
'red_______' : 'red',
'blue______' : 'blue',
'green_____' : 'green',
# table => tb
'row-def___' : 'row-definition',
'cell______' : 'cell',
'row_______' : 'row',
'in-table__' : 'in-table',
'columns___' : 'columns',
'row-pos-le' : 'row-position-left',
'cell-posit' : 'cell-position',
# preamble => pr
# underline
'underlined' : 'underlined',
# border => bd
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
# 'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bo' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outisde',
'bor-none__' : 'border',
# border type => bt
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm__' : 'thick-thin-medium',
'bdr-htm__' : 'thin-thick-medium',
'bdr-hthm_' : 'thin-thick-thin-medium',
'bdr-thl__' : 'thick-thin-large',
'bdr-hthl_' : 'think-thick-think-large',
'bdr-wavy_' : 'wavy',
'bdr-d-wav' : 'double-wavy',
'bdr-strip' : 'striped',
'bdr-embos' : 'emboss',
'bdr-engra' : 'engrave',
'bdr-frame' : 'frame',
'bdr-li-wid' : 'line-width',
}
self.__tabs_dict = {
'cw<pf<tab-stop__' : self.__tab_stop_func,
'cw<pf<tab-center' : self.__tab_type_func,
'cw<pf<tab-right_' : self.__tab_type_func,
'cw<pf<tab-dec___' : self.__tab_type_func,
'cw<pf<leader-dot' : self.__tab_leader_func,
'cw<pf<leader-hyp' : self.__tab_leader_func,
'cw<pf<leader-und' : self.__tab_leader_func,
'cw<pf<tab-bar-st' : self.__tab_bar_func,
}
self.__tab_type_dict = {
'cw<pf<tab-center' : 'center',
'cw<pf<tab-right_' : 'right',
'cw<pf<tab-dec___' : 'decimal',
'cw<pf<leader-dot' : 'leader-dot',
'cw<pf<leader-hyp' : 'leader-hyphen',
'cw<pf<leader-und' : 'leader-underline',
}
self.__border_obj = border_parse.BorderParse()
self.__style_num_strings = []
self.__body_style_strings = []
self.__state = 'before_1st_para_def'
self.__att_val_dict = {}
self.__start_marker = 'mi<mk<pard-start\n' # outside para tags
self.__start2_marker = 'mi<mk<pardstart_\n' # inside para tags
self.__end2_marker = 'mi<mk<pardend___\n' # inside para tags
self.__end_marker = 'mi<mk<pard-end__\n' # outside para tags
self.__text_string = ''
self.__state_dict = {
'before_1st_para_def' : self.__before_1st_para_def_func,
'collect_tokens' : self.__collect_tokens_func,
'after_para_def' : self.__after_para_def_func,
'in_paragraphs' : self.__in_paragraphs_func,
'after_para_end' : self.__after_para_end_func,
}
self.__collect_tokens_dict = {
'mi<mk<para-start' : self.__end_para_def_func,
'cw<pf<par-def___' : self.__para_def_in_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__after_para_def_dict = {
'mi<mk<para-start' : self.__start_para_after_def_func,
'cw<pf<par-def___' : self.__found_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__in_paragraphs_dict = {
'mi<mk<para-end__' : self.__found_para_end_func,
}
self.__after_para_end_dict = {
'mi<mk<para-start' : self.__continue_block_func,
'mi<mk<para-end__' : self.__continue_block_func,
'cw<pf<par-def___' : self.__new_para_def_func,
'mi<mk<body-close' : self.__stop_block_func,
'mi<mk<par-in-fld' : self.__stop_block_func,
'cw<tb<cell______' : self.__stop_block_func,
'cw<tb<row-def___' : self.__stop_block_func,
'cw<tb<row_______' : self.__stop_block_func,
'mi<mk<sect-close' : self.__stop_block_func,
'mi<mk<sect-start' : self.__stop_block_func,
'mi<mk<header-beg' : self.__stop_block_func,
'mi<mk<header-end' : self.__stop_block_func,
'mi<mk<head___clo' : self.__stop_block_func,
'mi<mk<fldbk-end_' : self.__stop_block_func,
'mi<mk<lst-txbeg_' : self.__stop_block_func,
}
def __before_1st_para_def_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
Look for the beginning of a paragraph definition
"""
# cw<pf<par-def___<nu<true
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
else:
self.__write_obj.write(line)
def __found_para_def_func(self):
self.__state = 'collect_tokens'
# not exactly right--have to reset the dictionary--give it default
# values
self.__reset_dict()
def __collect_tokens_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
Check the collect_tokens_dict for either the beginning of a
paragraph or a new paragraph definition. Take the actions
according to the value in the dict.
Otherwise, check if the token is not a control word. If it is not,
change the state to after_para_def.
Otherwise, check if the token is a paragraph definition word; if
so, add it to the attributes and values dictionary.
"""
action = self.__collect_tokens_dict.get(self.__token_info)
if action:
action(line)
elif line[0:2] != 'cw':
self.__write_obj.write(line)
self.__state = 'after_para_def'
elif line[0:5] == 'cw<bd':
self.__parse_border(line)
else:
action = self.__tabs_dict.get(self.__token_info)
if action:
action(line)
else:
token = self.__token_dict.get(line[6:16])
if token:
self.__att_val_dict[token] = line[20:-1]
def __tab_stop_func(self, line):
"""
"""
self.__att_val_dict['tabs'] += '%s:' % self.__tab_type
self.__att_val_dict['tabs'] += '%s;' % line[20:-1]
self.__tab_type = 'left'
def __tab_type_func(self, line):
"""
"""
type = self.__tab_type_dict.get(self.__token_info)
if type is not None:
self.__tab_type = type
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler(msg)
def __tab_leader_func(self, line):
"""
"""
leader = self.__tab_type_dict.get(self.__token_info)
if leader is not None:
self.__att_val_dict['tabs'] += '%s^' % leader
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler(msg)
def __tab_bar_func(self, line):
"""
"""
# self.__att_val_dict['tabs-bar'] += '%s:' % line[20:-1]
self.__att_val_dict['tabs'] += 'bar:%s;' % (line[20:-1])
self.__tab_type = 'left'
def __parse_border(self, line):
"""
Requires:
line --line to parse
Returns:
nothing (updates dictionary)
Logic:
Uses the border_parse module to return a dictionary of attribute
value pairs for a border line.
"""
border_dict = self.__border_obj.parse_border(line)
self.__att_val_dict.update(border_dict)
def __para_def_in_para_def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found a \\pard while I am collecting tokens. I want to reset
the dectionary and do nothing else.
"""
# Change this
self.__state = 'collect_tokens'
self.__reset_dict()
def __end_para_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The previous state was collect tokens, and I have found the start
of a paragraph. I want to output the definition tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs';
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __start_para_after_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The state was is after_para_def. and I have found the start of a
paragraph. I want to output the definition tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs'.
(I now realize that this is absolutely identical to the function above!)
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __after_para_def_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Check if the token info is the start of a paragraph. If so, call
on the function found in the value of the dictionary.
"""
action = self.__after_para_def_dict.get(self.__token_info)
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
elif action:
action(line)
else:
self.__write_obj.write(line)
def __in_paragraphs_func(self, line):
"""
Requires:
line --current line
Returns:
nothing
Logic:
Look for the end of a paragraph, the start of a cell or row.
"""
action = self.__in_paragraphs_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __found_para_end_func(self,line):
"""
Requires:
line -- line to print out
Returns:
Nothing
Logic:
State is in paragraphs. You have found the end of a paragraph. You
need to print out the line and change the state to after
paragraphs.
"""
self.__state = 'after_para_end'
self.__write_obj.write(line)
def __after_para_end_func(self, line):
"""
Requires:
line -- line to output
Returns:
nothing
Logic:
The state is after the end of a paragraph. You are collecting all
the lines in a string and waiting to see if you need to write
out the paragraph definition. If you find another paragraph
definition, then you write out the old paragraph dictionary and
print out the string. You change the state to collect tokens.
If you find any larger block elements, such as cell, row,
field-block, or section, you write out the paragraph definition and
then the text string.
If you find the beginning of a paragraph, then you don't need to
write out the paragraph definition. Write out the string, and
change the state to in paragraphs.
"""
self.__text_string += line
action = self.__after_para_end_dict.get(self.__token_info)
if action:
action(line)
def __continue_block_func(self, line):
"""
Requires:
line --line to print out
Returns:
Nothing
Logic:
The state is after the end of a paragraph. You have found the
start of a paragraph, so you don't need to print out the paragraph
definition. Print out the string, the line, and change the state
to in paragraphs.
"""
self.__state = 'in_paragraphs'
self.__write_obj.write(self.__text_string)
self.__text_string = ''
# found a new paragraph definition after an end of a paragraph
def __new_para_def_func(self, line):
"""
Requires:
line -- line to output
Returns:
Nothing
Logic:
You have found a new paragraph definition at the end of a
paragraph. Output the end of the old paragraph definition. Output
the text string. Output the line. Change the state to collect
tokens. (And don't forget to set the text string to ''!)
"""
self.__write_para_def_end_func()
self.__found_para_def_func()
# after a paragraph and found reason to stop this block
def __stop_block_func(self, line):
"""
Requires:
line --(shouldn't be here?)
Returns:
nothing
Logic:
The state is after a paragraph, and you have found a larger block
than paragraph-definition. You want to write the end tag of the
old definition and reset the text string (handled by other
methods).
"""
self.__write_para_def_end_func()
self.__state = 'after_para_def'
def __write_para_def_end_func(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the end of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__write_obj.write(self.__end2_marker)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__end_marker)
self.__write_obj.write(self.__text_string)
self.__text_string = ''
keys = self.__att_val_dict.keys()
if 'font-style' in keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in keys:
self.__write_obj.write('mi<mk<caps-end__\n')
def __get_num_of_style(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Get a unique value for each style.
"""
my_string = ''
new_style = 0
# when determining uniqueness for a style, ingorne these values, since
# they don't tell us if the style is unique
ignore_values = ['style-num', 'nest-level', 'in-table']
for k in sorted(self.__att_val_dict):
if k not in ignore_values:
my_string += f'{k}:{self.__att_val_dict[k]}'
if my_string in self.__style_num_strings:
num = self.__style_num_strings.index(my_string)
num += 1 # since indexing starts at zero, rather than 1
else:
self.__style_num_strings.append(my_string)
num = len(self.__style_num_strings)
new_style = 1
num = '%04d' % num
self.__att_val_dict['style-num'] = 's' + str(num)
if new_style:
self.__write_body_styles()
def __write_body_styles(self):
style_string = ''
style_string += 'mi<tg<empty-att_<paragraph-style-in-body'
style_string += '<name>%s' % self.__att_val_dict['name']
style_string += '<style-number>%s' % self.__att_val_dict['style-num']
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
style_string += ('<{}>{}'.format('tabs', the_value))
exclude = frozenset(['name', 'style-num', 'in-table'] + tabs_list)
for k in sorted(self.__att_val_dict):
if k not in exclude:
style_string += (f'<{k}>{self.__att_val_dict[k]}')
style_string += '\n'
self.__body_style_strings.append(style_string)
def __write_para_def_beg(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the beginning of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__get_num_of_style()
table = self.__att_val_dict.get('in-table')
if table:
# del self.__att_val_dict['in-table']
self.__write_obj.write('mi<mk<in-table__\n')
else:
self.__write_obj.write('mi<mk<not-in-tbl\n')
left_indent = self.__att_val_dict.get('left-indent')
if left_indent:
self.__write_obj.write('mi<mk<left_inden<%s\n' % left_indent)
is_list = self.__att_val_dict.get('list-id')
if is_list:
self.__write_obj.write('mi<mk<list-id___<%s\n' % is_list)
else:
self.__write_obj.write('mi<mk<no-list___\n')
self.__write_obj.write('mi<mk<style-name<%s\n' % self.__att_val_dict['name'])
self.__write_obj.write(self.__start_marker)
self.__write_obj.write('mi<tg<open-att__<paragraph-definition')
self.__write_obj.write('<name>%s' % self.__att_val_dict['name'])
self.__write_obj.write('<style-number>%s' % self.__att_val_dict['style-num'])
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
"""
for tab_item in tabs_list:
if self.__att_val_dict[tab_item] != '':
the_value = self.__att_val_dict[tab_item]
the_value = the_value[:-1]
self.__write_obj.write('<%s>%s' % (tab_item, the_value))
"""
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
self.__write_obj.write('<{}>{}'.format('tabs', the_value))
keys = sorted(self.__att_val_dict)
exclude = frozenset(['name', 'style-num', 'in-table'] + tabs_list)
for key in keys:
if key not in exclude:
self.__write_obj.write(f'<{key}>{self.__att_val_dict[key]}')
self.__write_obj.write('\n')
self.__write_obj.write(self.__start2_marker)
if 'font-style' in keys:
face = self.__att_val_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in keys:
value = self.__att_val_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
def __empty_table_element_func(self, line):
self.__write_obj.write('mi<mk<in-table__\n')
self.__write_obj.write(line)
self.__state = 'after_para_def'
def __reset_dict(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
The dictionary containing values and attributes must be reset each
time a new paragraphs definition is found.
"""
self.__att_val_dict.clear()
self.__att_val_dict['name'] = 'Normal'
self.__att_val_dict['font-style'] = self.__default_font
self.__tab_type = 'left'
self.__att_val_dict['tabs-left'] = ''
self.__att_val_dict['tabs-right'] = ''
self.__att_val_dict['tabs-center'] = ''
self.__att_val_dict['tabs-decimal'] = ''
self.__att_val_dict['tabs-bar'] = ''
self.__att_val_dict['tabs'] = ''
def make_paragraph_def(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('no no matching state in module sections.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "paragraphs_def.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__body_style_strings
| 29,613 | Python | .py | 729 | 31.211248 | 132 | 0.521332 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,309 | tokenize.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/tokenize.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from calibre.utils.mreplace import MReplace
from polyglot.builtins import codepoint_to_chr
from . import open_for_read, open_for_write
class Tokenize:
"""Tokenize RTF into one line per field. Each line will contain information useful for the rest of the script"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
# out_file = None,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
# self.__write_to = out_file
self.__compile_expressions()
# variables
self.__uc_char = 0
self.__uc_bin = False
self.__uc_value = [1]
def __reini_utf8_counters(self):
self.__uc_char = 0
self.__uc_bin = False
def __remove_uc_chars(self, startchar, token):
for i in range(startchar, len(token)):
if self.__uc_char:
self.__uc_char -= 1
else:
return token[i:]
# if only char to skip
return ''
def __unicode_process(self, token):
# change scope in
if token == r'\{':
self.__uc_value.append(self.__uc_value[-1])
# basic error handling
self.__reini_utf8_counters()
return token
# change scope out
elif token == r'\}':
if self.__uc_value:
self.__uc_value.pop()
self.__reini_utf8_counters()
return token
# add a uc control
elif token[:3] == '\\uc':
self.__uc_value[-1] = int(token[3:])
self.__reini_utf8_counters()
return token
# bin data to slip
elif self.__uc_bin:
self.__uc_bin = False
return ''
# uc char to remove
elif self.__uc_char:
# handle \bin tag in case of uc char to skip
if token[:4] == '\bin':
self.__uc_char -=1
self.__uc_bin = True
return ''
elif token[:1] == "\\" :
self.__uc_char -=1
return ''
else:
return self.__remove_uc_chars(0, token)
# go for real \u token
match_obj = self.__utf_exp.match(token)
if match_obj is not None:
self.__reini_utf8_counters()
# get value and handle negative case
uni_char = int(match_obj.group(1))
uni_len = len(match_obj.group(0))
if uni_char < 0:
uni_char += 65536
uni_char = codepoint_to_chr(uni_char).encode('ascii', 'xmlcharrefreplace').decode('ascii')
self.__uc_char = self.__uc_value[-1]
# there is only an unicode char
if len(token)<= uni_len:
return uni_char
# an unicode char and something else
# must be after as it is splited on \
# necessary? maybe for \bin?
elif not self.__uc_char:
return uni_char + token[uni_len:]
# if not uc0 and chars
else:
return uni_char + self.__remove_uc_chars(uni_len, token)
# default
return token
def __sub_reg_split(self,input_file):
input_file = self.__replace_spchar.mreplace(input_file)
# this is for older RTF
input_file = self.__par_exp.sub(r'\n\\par \n', input_file)
input_file = self.__cwdigit_exp.sub(r"\g<1>\n\g<2>", input_file)
input_file = self.__cs_ast.sub(r"\g<1>", input_file)
input_file = self.__ms_hex_exp.sub(r"\\mshex0\g<1> ", input_file)
input_file = self.__utf_ud.sub(r"\\{\\uc0 \g<1>\\}", input_file)
# remove \n in bin data
input_file = self.__bin_exp.sub(lambda x:
x.group().replace('\n', '') + '\n', input_file)
# split
tokens = re.split(self.__splitexp, input_file)
# remove empty tokens and \n
return list(filter(lambda x: len(x) > 0 and x != '\n', tokens))
def __compile_expressions(self):
SIMPLE_RPL = {
"\\\\": "\\backslash ",
"\\~": "\\~ ",
"\\;": "\\; ",
"&": "&",
"<": "<",
">": ">",
"\\_": "\\_ ",
"\\:": "\\: ",
"\\-": "\\- ",
# turn into a generic token to eliminate special
# cases and make processing easier
"\\{": "\\ob ",
# turn into a generic token to eliminate special
# cases and make processing easier
"\\}": "\\cb ",
# put a backslash in front of to eliminate special cases and
# make processing easier
"{": "\\{",
# put a backslash in front of to eliminate special cases and
# make processing easier
"}": "\\}",
}
self.__replace_spchar = MReplace(SIMPLE_RPL)
# add ;? in case of char following \u
self.__ms_hex_exp = re.compile(r"\\\'([0-9a-fA-F]{2})")
self.__utf_exp = re.compile(r"\\u(-?\d{3,6}) ?")
self.__bin_exp = re.compile(r"(?:\\bin(-?\d{0,10})[\n ]+)[01\n]+")
# manage upr/ud situations
self.__utf_ud = re.compile(r"\\{[\n ]?\\upr[\n ]?(?:\\{.*?\\})[\n ]?" +
r"\\{[\n ]?\\*[\n ]?\\ud[\n ]?(\\{.*?\\})[\n ]?\\}[\n ]?\\}")
# add \n in split for whole file reading
# why keep backslash whereas \is replaced before?
# remove \n from endline char
self.__splitexp = re.compile(r"(\\[{}]|\n|\\[^\s\\{}&]+(?:[ \t\r\f\v])?)")
# this is for old RTF
self.__par_exp = re.compile(r'(\\\n+|\\ )')
# handle improper cs char-style with \* before without {
self.__cs_ast = re.compile(r'\\\*([\n ]*\\cs\d+[\n \\]+)')
# handle cw using a digit as argument and without space as delimiter
self.__cwdigit_exp = re.compile(r"(\\[a-zA-Z]+[\-0-9]+)([^0-9 \\]+)")
def tokenize(self):
"""Main class for handling other methods. Reads the file \
, uses method self.sub_reg to make basic substitutions,\
and process tokens by itself"""
# read
with open_for_read(self.__file) as read_obj:
input_file = read_obj.read()
# process simple replacements and split giving us a correct list
# remove '' and \n in the process
tokens = self.__sub_reg_split(input_file)
# correct unicode
tokens = map(self.__unicode_process, tokens)
# remove empty items created by removing \uc
tokens = list(filter(lambda x: len(x) > 0, tokens))
# write
with open_for_write(self.__write_to) as write_obj:
write_obj.write('\n'.join(tokens))
# Move and copy
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "tokenize.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
# self.__special_tokens = [ '_', '~', "'", '{', '}' ]
# import sys
# def main(args=sys.argv):
# if len(args) < 2:
# print 'No file'
# return
# file = 'data_tokens.txt'
# if len(args) == 3:
# file = args[2]
# to = Tokenize(args[1], Exception, out_file = file)
# to.tokenize()
# if __name__ == '__main__':
# sys.exit(main())
# calibre-debug -e src/calibre/ebooks/rtf2xml/tokenize.py
| 8,555 | Python | .py | 201 | 32.701493 | 116 | 0.483325 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,310 | info.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/info.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Info:
"""
Make tags for document-information
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__text_string = ''
self.__state = 'before_info_table'
self.rmspace = re.compile(r'\s+')
self.__state_dict = {
'before_info_table': self.__before_info_table_func,
'after_info_table': self.__after_info_table_func,
'in_info_table' : self.__in_info_table_func,
'collect_text' : self.__collect_text_func,
'collect_tokens' : self.__collect_tokens_func,
}
self.__info_table_dict = {
'cw<di<title_____' : (self.__found_tag_with_text_func, 'title'),
'cw<di<author____' : (self.__found_tag_with_text_func, 'author'),
'cw<di<operator__' : (self.__found_tag_with_text_func, 'operator'),
'cw<di<manager___' : (self.__found_tag_with_text_func, 'manager'),
'cw<di<company___' : (self.__found_tag_with_text_func, 'company'),
'cw<di<keywords__' : (self.__found_tag_with_text_func, 'keywords'),
'cw<di<category__' : (self.__found_tag_with_text_func, 'category'),
'cw<di<doc-notes_' : (self.__found_tag_with_text_func, 'doc-notes'),
'cw<di<subject___' : (self.__found_tag_with_text_func, 'subject'),
'cw<di<linkbase__' : (self.__found_tag_with_text_func, 'hyperlink-base'),
'cw<di<create-tim' : (self.__found_tag_with_tokens_func, 'creation-time'),
'cw<di<revis-time' : (self.__found_tag_with_tokens_func, 'revision-time'),
'cw<di<print-time' : (self.__found_tag_with_tokens_func, 'printing-time'),
'cw<di<backuptime' : (self.__found_tag_with_tokens_func, 'backup-time'),
'cw<di<num-of-wor' : (self.__single_field_func, 'number-of-words'),
'cw<di<num-of-chr' : (self.__single_field_func, 'number-of-characters'),
'cw<di<numofchrws' : (self.__single_field_func, 'number-of-characters-without-space'),
'cw<di<num-of-pag' : (self.__single_field_func, 'number-of-pages'),
'cw<di<version___' : (self.__single_field_func, 'version'),
'cw<di<edit-time_' : (self.__single_field_func, 'editing-time'),
'cw<di<intern-ver' : (self.__single_field_func, 'internal-version-number'),
'cw<di<internalID' : (self.__single_field_func, 'internal-id-number'),
}
self.__token_dict = {
'year______' : 'year',
'month_____' : 'month',
'day_______' : 'day',
'minute____' : 'minute',
'second____' : 'second',
'revis-time' : 'revision-time',
'create-tim' : 'creation-time',
'edit-time_' : 'editing-time',
'print-time' : 'printing-time',
'backuptime' : 'backup-time',
'num-of-wor' : 'number-of-words',
'num-of-chr' : 'number-of-characters',
'numofchrws' : 'number-of-characters-without-space',
'num-of-pag' : 'number-of-pages',
'version___' : 'version',
'intern-ver' : 'internal-version-number',
'internalID' : 'internal-id-number',
}
def __before_info_table_func(self, line):
"""
Required:
line -- the line to parse
Returns:
nothing
Logic:
Check for the beginning of the information table. When found, set
the state to the information table. Always write the line.
"""
if self.__token_info == 'mi<mk<doc-in-beg':
self.__state = 'in_info_table'
self.__write_obj.write(line)
def __in_info_table_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing.
Logic:
Check for the end of information. If not found, check if the
token has a special value in the info table dictionary. If it
does, execute that function.
Otherwise, output the line to the file.
"""
if self.__token_info == 'mi<mk<doc-in-end':
self.__state = 'after_info_table'
else:
action, tag = self.__info_table_dict.get(self.__token_info, (None, None))
if action:
action(line, tag)
else:
self.__write_obj.write(line)
def __found_tag_with_text_func(self, line, tag):
"""
Requires:
line -- line to parse
tag --what kind of line
Returns:
nothing
Logic:
This function marks the beginning of information fields that have
text that must be collected. Set the type of information field
with the tag option. Set the state to collecting text
"""
self.__tag = tag
self.__state = 'collect_text'
def __collect_text_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
If the end of the information field is found, write the text
string to the file.
Otherwise, if the line contains text, add it to the text string.
"""
if self.__token_info == 'mi<mk<docinf-end':
self.__state = 'in_info_table'
# Don't print empty tags
if len(self.rmspace.sub('',self.__text_string)):
self.__write_obj.write(
'mi<tg<open______<%s\n'
'tx<nu<__________<%s\n'
'mi<tg<close_____<%s\n' % (self.__tag, self.__text_string, self.__tag)
)
self.__text_string = ''
elif line[0:2] == 'tx':
self.__text_string += line[17:-1]
def __found_tag_with_tokens_func(self, line, tag):
"""
Requires:
line -- line to parse
tag -- type of field
Returns:
nothing
Logic:
Some fields have a series of tokens (cw<di<year______<nu<2003)
that must be parsed as attributes for the element.
Set the state to collect tokesn, and set the text string to
start an empty element with attributes.
"""
self.__state = 'collect_tokens'
self.__text_string = 'mi<tg<empty-att_<%s' % tag
# mi<tg<empty-att_<page-definition<margin>33\n
def __collect_tokens_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
This function collects all the token information and adds it to
the text string until the end of the field is found.
First check of the end of the information field. If found, write
the text string to the file.
If not found, get the relevant information from the text string.
This information cannot be directly added to the text string,
because it exists in abbreviated form. (num-of-wor)
I want to check this information in a dictionary to convert it
to a longer, readable form. If the key does not exist in the
dictionary, print out an error message. Otherwise add the value
to the text string.
(num-of-wor => number-of-words)
"""
# cw<di<year______<nu<2003
if self.__token_info == 'mi<mk<docinf-end':
self.__state = 'in_info_table'
self.__write_obj.write(
'%s\n' % self.__text_string
)
self.__text_string = ''
else:
att = line[6:16]
value = line[20:-1]
att_changed = self.__token_dict.get(att)
if att_changed is None:
if self.__run_level > 3:
msg = 'No dictionary match for %s\n' % att
raise self.__bug_handler(msg)
else:
self.__text_string += f'<{att_changed}>{value}'
def __single_field_func(self, line, tag):
value = line[20:-1]
self.__write_obj.write(
f'mi<tg<empty-att_<{tag}<{tag}>{value}\n'
)
def __after_info_table_func(self, line):
"""
Requires:
line --line to write to file
Returns:
nothing
Logic:
After the end of the information table, simple write the line to
the file.
"""
self.__write_obj.write(line)
def fix_info(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the information table, look for the
beginning of the style table.
If the state is in the information table, use other methods to
parse the information
style table, look for lines with style info, and substitute the
number with the name of the style. If the state if after the
information table, simply write the line to the output file.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('No matching state in module styles.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "info.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 11,684 | Python | .py | 270 | 32.937037 | 95 | 0.513732 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,311 | styles.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/styles.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import border_parse, copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Styles:
"""
Change lines with style numbers to actual style names.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Initiate all values.
"""
self.__border_obj = border_parse.BorderParse()
self.__styles_dict = {'par':{}, 'char':{}}
self.__styles_num = '0'
self.__type_of_style = 'par'
self.__text_string = ''
self.__state = 'before_styles_table'
self.__state_dict = {
'before_styles_table': self.__before_styles_func,
'in_styles_table' : self.__in_styles_func,
'in_individual_style' : self.__in_individual_style_func,
'after_styles_table' : self.__after_styles_func,
'mi<mk<styles-beg' : self.__found_styles_table_func,
'mi<mk<styles-end' : self.__found_end_styles_table_func,
'mi<mk<stylei-beg' : self.__found_beg_ind_style_func,
'mi<mk<stylei-end' : self.__found_end_ind_style_func,
'cw<ss<para-style' : self.__para_style_func,
'cw<ss<char-style' : self.__char_style_func,
}
# A separate dictionary for parsing the body text
self.__body_dict = {
'cw<ss<para-style' : (self.__para_style_in_body_func, 'par'),
'cw<ss<char-style' : (self.__para_style_in_body_func, 'char'),
}
# Dictionary needed to convert shortened style names to readable names
self.__token_dict={
# paragraph formatting => pf
'par-end___' : 'para',
'par-def___' : 'paragraph-definition',
'keep-w-nex' : 'keep-with-next',
'widow-cntl' : 'widow-control',
'adjust-rgt' : 'adjust-right',
'language__' : 'language',
'right-inde' : 'right-indent',
'fir-ln-ind' : 'first-line-indent',
'left-inden' : 'left-indent',
'space-befo' : 'space-before',
'space-afte' : 'space-after',
'line-space' : 'line-spacing',
'default-ta' : 'default-tab',
'align_____' : 'align',
'widow-cntr' : 'widow-control',
# page formatting mixed in! (Just in older RTF?)
'margin-lef' : 'left-indent',
'margin-rig' : 'right-indent',
'margin-bot' : 'space-after',
'margin-top' : 'space-before',
# stylesheet = > ss
'style-shet' : 'stylesheet',
'based-on__' : 'based-on-style',
'next-style' : 'next-style',
'char-style' : 'character-style',
'para-style' : 'paragraph-style',
# graphics => gr
'picture___' : 'pict',
'obj-class_' : 'obj_class',
'mac-pic___' : 'mac-pict',
# section => sc
'section___' : 'section-new',
'sect-defin' : 'section-reset',
'sect-note_' : 'endnotes-in-section',
# list=> ls
'list-text_' : 'list-text',
'list______' : 'list',
'list-lev-d' : 'list-level-definition',
'list-cardi' : 'list-cardinal-numbering',
'list-decim' : 'list-decimal-numbering',
'list-up-al' : 'list-uppercase-alphabetic-numbering',
'list-up-ro' : 'list-uppercae-roman-numbering',
'list-ord__' : 'list-ordinal-numbering',
'list-ordte' : 'list-ordinal-text-numbering',
'list-bulli' : 'list-bullet',
'list-simpi' : 'list-simple',
'list-conti' : 'list-continue',
'list-hang_' : 'list-hang',
# 'list-tebef' : 'list-text-before',
# 'list-level' : 'level',
'list-id___' : 'list-id',
'list-start' : 'list-start',
'nest-level' : 'nest-level',
# duplicate
'list-level' : 'list-level',
# notes => nt
'footnote__' : 'footnote',
'type______' : 'type',
# anchor => an
'toc_______' : 'anchor-toc',
'book-mk-st' : 'bookmark-start',
'book-mk-en' : 'bookmark-end',
'index-mark' : 'anchor-index',
'place_____' : 'place',
# field => fd
'field_____' : 'field',
'field-inst' : 'field-instruction',
'field-rslt' : 'field-result',
'datafield_' : 'data-field',
# info-tables => it
'font-table' : 'font-table',
'colr-table' : 'color-table',
'lovr-table' : 'list-override-table',
'listtable_' : 'list-table',
'revi-table' : 'revision-table',
# character info => ci
'hidden____' : 'hidden',
'italics___' : 'italics',
'bold______' : 'bold',
'strike-thr' : 'strike-through',
'shadow____' : 'shadow',
'outline___' : 'outline',
'small-caps' : 'small-caps',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'subscript_' : 'subscript',
'superscrip' : 'superscript',
'plain_____' : 'plain',
'font-style' : 'font-style',
'font-color' : 'font-color',
'font-size_' : 'font-size',
'font-up___' : 'superscript',
'font-down_' : 'subscript',
'red_______' : 'red',
'blue______' : 'blue',
'green_____' : 'green',
'caps______' : 'caps',
# table => tb
'row-def___' : 'row-definition',
'cell______' : 'cell',
'row_______' : 'row',
'in-table__' : 'in-table',
'columns___' : 'columns',
'row-pos-le' : 'row-position-left',
'cell-posit' : 'cell-position',
# preamble => pr
# underline
'underlined' : 'underlined',
# border => bd
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
# 'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bo' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outisde',
'bor-none__' : 'border',
# border type => bt
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm__' : 'thick-thin-medium',
'bdr-htm__' : 'thin-thick-medium',
'bdr-hthm_' : 'thin-thick-thin-medium',
'bdr-thl__' : 'thick-thin-large',
'bdr-hthl_' : 'think-thick-think-large',
'bdr-wavy_' : 'wavy',
'bdr-d-wav' : 'double-wavy',
'bdr-strip' : 'striped',
'bdr-embos' : 'emboss',
'bdr-engra' : 'engrave',
'bdr-frame' : 'frame',
'bdr-li-wid' : 'line-width',
# tabs
'tab-center' : 'center',
'tab-right_' : 'right',
'tab-dec___' : 'decimal',
'leader-dot' : 'leader-dot',
'leader-hyp' : 'leader-hyphen',
'leader-und' : 'leader-underline',
}
self.__tabs_dict = {
'cw<pf<tab-stop__' : self.__tab_stop_func,
'cw<pf<tab-center' : self.__tab_type_func,
'cw<pf<tab-right_' : self.__tab_type_func,
'cw<pf<tab-dec___' : self.__tab_type_func,
'cw<pf<leader-dot' : self.__tab_leader_func,
'cw<pf<leader-hyp' : self.__tab_leader_func,
'cw<pf<leader-und' : self.__tab_leader_func,
'cw<pf<tab-bar-st' : self.__tab_bar_func,
}
self.__tab_type_dict = {
'cw<pf<tab-center' : 'center',
'cw<pf<tab-right_' : 'right',
'cw<pf<tab-dec___' : 'decimal',
'cw<pf<leader-dot' : 'leader-dot',
'cw<pf<leader-hyp' : 'leader-hyphen',
'cw<pf<leader-und' : 'leader-underline',
}
self.__ignore_list = [
'list-tebef',
]
self.__tabs_list = self.__tabs_dict.keys()
self.__tab_type = 'left'
self.__leader_found = 0
def __in_individual_style_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Check if the token marks the end of the individual style. (Action
is the value of the state dictionary, and the only key that will
match in this function is the end of the individual style.)
If the end of the individual style is not found, check if the line
is a control word. If it is, extract the relelvant info and look
up this info in the tokens dictionary. I want to change
abbreviated names for longer, more readable ones.
Write an error message if no key is found for the info.
If the line is text, add the text to a text string. The text
string will be the name of the style.
"""
action = self.__state_dict.get(self.__token_info)
if action:
action(line)
# have to parse border lines with external module
elif line[0:5] == 'cw<bd':
border_dict = self.__border_obj.parse_border(line)
keys = border_dict.keys()
for key in keys:
self.__enter_dict_entry(key, border_dict[key])
elif self.__token_info in self.__tabs_list:
action = self.__tabs_dict.get(self.__token_info)
if action is not None:
action(line)
elif line[0:2] == 'cw':
# cw<pf<widow-cntl<nu<true
info = line[6:16]
att = self.__token_dict.get(info)
if att is None :
if info not in self.__ignore_list:
if self.__run_level > 3:
msg = 'no value for key %s\n' % info
raise self.__bug_handler(msg)
else:
value = line[20:-1]
self.__enter_dict_entry(att, value)
elif line[0:2] == 'tx':
self.__text_string += line[17:-1]
def __tab_stop_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Try to add the number to dictionary entry tabs-left, or tabs-right, etc.
If the dictionary entry doesn't exist, create one.
"""
try:
if self.__leader_found:
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s:' % self.__tab_type
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s;' % line[20:-1]
else:
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s:' % self.__tab_type
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s;' % line[20:-1]
except KeyError:
self.__enter_dict_entry('tabs', '')
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s:' % self.__tab_type
self.__styles_dict['par'][self.__styles_num]['tabs'] += '%s;' % line[20:-1]
self.__tab_type = 'left'
self.__leader_found = 0
def __tab_type_func(self, line):
"""
"""
type = self.__tab_type_dict.get(self.__token_info)
if type is not None:
self.__tab_type = type
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler(msg)
def __tab_leader_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Try to add the string of the tab leader to dictionary entry
tabs-left, or tabs-right, etc. If the dictionary entry doesn't
exist, create one.
"""
self.__leader_found = 1
leader = self.__tab_type_dict.get(self.__token_info)
if leader is not None:
leader += '^'
try:
self.__styles_dict['par'][self.__styles_num]['tabs'] += ':%s;' % leader
except KeyError:
self.__enter_dict_entry('tabs', '')
self.__styles_dict['par'][self.__styles_num]['tabs'] += '%s;' % leader
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler(msg)
def __tab_bar_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Try to add the string of the tab bar to dictionary entry tabs-bar.
If the dictionary entry doesn't exist, create one.
"""
# self.__add_dict_entry('tabs-bar', line[20:-1])
try:
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s:' % 'bar'
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s;' % line[20:-1]
except KeyError:
self.__enter_dict_entry('tabs', '')
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s:' % 'bar'
self.__styles_dict['par'][self.__styles_num]['tabs']\
+= '%s;' % line[20:-1]
self.__tab_type = 'left'
def __enter_dict_entry(self, att, value):
"""
Required:
att -- the attribute
value -- the value
Returns:
nothing
Logic:
Try to add the attribute value directly to the styles dictionary.
If a keyerror is found, that means I have to build the "branches"
of the dictionary before I can add the key value pair.
"""
try:
self.__styles_dict[self.__type_of_style][self.__styles_num][att] = value
except KeyError:
self.__add_dict_entry(att, value)
def __add_dict_entry(self, att, value):
"""
Required:
att --the attribute
value --the value
Returns:
nothing
Logic:
I have to build the branches of the dictionary before I can add
the leaves. (I am comparing a dictionary to a tree.) To achieve
this, I first make a temporary dictionary by extracting either the
inside dictionary of the keyword par or char. This temporary
dictionary is called type_dict.
Next, create a second, smaller dictionary with just the attribute and value.
Add the small dictionary to the type dictionary.
Add this type dictionary to the main styles dictionary.
"""
if self.__type_of_style == 'par':
type_dict =self.__styles_dict['par']
elif self.__type_of_style == 'char':
type_dict = self.__styles_dict['char']
else:
if self.__run_level > 3:
msg = self.__type_of_style + 'error\n'
raise self.__bug_handler(msg)
smallest_dict = {}
smallest_dict[att] = value
type_dict[self.__styles_num] = smallest_dict
self.__styles_dict[self.__type_of_style] = type_dict
def __para_style_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Set the type of style to paragraph.
Extract the number for a line such as "cw<ss<para-style<nu<15".
"""
self.__type_of_style = 'par'
self.__styles_num = line[20:-1]
"""
self.__enter_dict_entry('tabs-left', '')
self.__enter_dict_entry('tabs-right', '')
self.__enter_dict_entry('tabs-center', '')
self.__enter_dict_entry('tabs-decimal', '')
self.__enter_dict_entry('tabs-bar', '')
"""
def __char_style_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Set the type of style to character.
Extract the number for a line such as "cw<ss<char-style<nu<15".
"""
self.__type_of_style = 'char'
self.__styles_num = line[20:-1]
def __found_beg_ind_style_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Get rid of the last semicolon in the text string. Add the text
string as the value with 'name' as the key in the style
dictionary.
"""
self.__state = 'in_individual_style'
def __found_end_ind_style_func(self, line):
name = self.__text_string[:-1] # get rid of semicolon
# add 2005-04-29
# get rid of space before or after
name = name.strip()
self.__enter_dict_entry('name', name)
self.__text_string = ''
def __found_end_styles_table_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Set the state to after the styles table.
Fix the styles. (I explain this below.)
Print out the style table.
"""
self.__state = 'after_styles_table'
self.__fix_based_on()
self.__print_style_table()
def __fix_based_on(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
The styles dictionary may contain a pair of key values such as
'next-style' => '15'. I want to change the 15 to the name of the
style. I accomplish this by simply looking up the value of 15 in
the styles table.
Use two loops. First, check all the paragraph styles. Then check
all the character styles.
The inner loop: first check 'next-style', then check 'based-on-style'.
Make sure values exist for the keys to avoid the nasty keyerror message.
"""
types = ['par', 'char']
for type in types:
keys = self.__styles_dict[type].keys()
for key in keys:
styles = ['next-style', 'based-on-style']
for style in styles:
value = self.__styles_dict[type][key].get(style)
if value is not None:
temp_dict = self.__styles_dict[type].get(value)
if temp_dict:
changed_value = self.__styles_dict[type][value].get('name')
if changed_value:
self.__styles_dict[type][key][style] = \
changed_value
else:
if value == 0 or value == '0':
pass
else:
if self.__run_level > 4:
msg = f'{type} {key} is based on {value}\n'
msg = 'There is no style with %s\n' % value
raise self.__bug_handler(msg)
del self.__styles_dict[type][key][style]
def __print_style_table(self):
"""
Required:
nothing
Returns:
nothing
Logic:
This function prints out the style table.
I use three nested for loops. The outer loop prints out the
paragraphs styles, then the character styles.
The next loop iterates through the style numbers.
The most inside loop iterates over the pairs of attributes and
values, and prints them out.
"""
types = ['par', 'char']
for type in types:
if type == 'par':
prefix = 'paragraph'
else:
prefix = 'character'
self.__write_obj.write(
'mi<tg<open______<%s-styles\n' % prefix
)
style_numbers = self.__styles_dict[type].keys()
for num in style_numbers:
self.__write_obj.write(
f'mi<tg<empty-att_<{prefix}-style-in-table<num>{num}'
)
attributes = self.__styles_dict[type][num].keys()
for att in attributes:
this_value = self.__styles_dict[type][num][att]
self.__write_obj.write(
f'<{att}>{this_value}'
)
self.__write_obj.write('\n')
self.__write_obj.write(
'mi<tg<close_____<%s-styles\n' % prefix
)
def __found_styles_table_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Change the state to in the style table when the marker has been found.
"""
self.__state = 'in_styles_table'
def __before_styles_func(self, line):
"""
Required:
line
Returns:
nothing.
Logic:
Check the line info in the state dictionary. When the beginning of
the styles table is found, change the state to in the styles
table.
"""
action = self.__state_dict.get(self.__token_info)
if not action:
self.__write_obj.write(line)
else:
action(line)
def __in_styles_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Check the line for the beginning of an individual style. If it is
not found, simply print out the line.
"""
action = self.__state_dict.get(self.__token_info)
if action is None:
self.__write_obj.write(line)
else:
action(line)
def __para_style_in_body_func(self, line, type):
"""
Required:
line-- the line
type -- whether a character or paragraph
Returns:
nothing
Logic:
Determine the prefix by whether the type is "par" or "char".
Extract the number from a line such as "cw<ss<para-style<nu<15".
Look up that number in the styles dictionary and put a name for a number
"""
if type == 'par':
prefix = 'para'
else:
prefix = 'char'
num = line[20:-1]
# may be invalid RTF--a style down below not defined above!
try:
value = self.__styles_dict[type][num]['name']
except KeyError:
value = None
if value:
self.__write_obj.write(
f'cw<ss<{prefix}-style<nu<{value}\n'
)
else:
self.__write_obj.write(
'cw<ss<%s_style<nu<not-defined\n' % prefix
)
def __after_styles_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Determine if a line with either character of paragraph style info
has been found. If so, then use the appropriate method to parse
the line. Otherwise, write the line to a file.
"""
action, type = self.__body_dict.get(self.__token_info, (None, None))
if action:
action(line, type)
else:
self.__write_obj.write(line)
def convert_styles(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the style table, look for the
beginning of the style table.
If the state is in the style table, create the style dictionary
and print out the tags.
If the state if after the style table, look for lines with style
info, and substitute the number with the name of the style.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('no matching state in module styles.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "styles.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 27,689 | Python | .py | 699 | 28.761087 | 88 | 0.492212 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,312 | char_set.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/char_set.py | char_set = """
<ms_standard>
NON-BREAKING HYPEHN:_:8290:‑
LEFT DOUBLE QUOTATION MARK:ldblquote:8220:“
RIGHT DOUBLE QUOTATION MARK:rdblquote:8221:”
LEFT SINGLE QUOTATION MARK:lquote:8216:‘
RIGHT SINGLE QUOTATION MARK:rquote:8217:’
EN DASH:endash:8211:–
EM DASH:emdash:8212:—
MIDDLE DOT:bullet:183:·
<control>:tab:9:	
NO-BREAK SPACE:~:160: 
SOFT-HYPHEN:-:173:­
</ms_standard>
<ms_symbol>
REGISTERED SIGN:ldblquote:174:®
COPYRIGHT SIGN:rdblquote:169:©
N-ARY PRODUCT:rquote:8719:∏
TRADE MARK SIGN:lquote:8482:™
ANGLE:emdash:8736:∠
WHITE DOWN-POINTING TRIANGLE:endash:9661:▽
INFINITY:bullet:8734:∞
<control>:tab:9:	
NO-BREAK SPACE:~:160: 
NON-BREAKING HYPEHN:_:8209:‑
</ms_symbol>
<ms_dingbats>
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE:10130:ldblquote:➒
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TEN:rdblquote:10131:➓
HEAVY WIDE-HEADED RIGHTWARDS ARROW:lquote:10132:➔
RIGHTWARDS ARROW:rquote:8594:→
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN:endash:10128:➐
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT:emdash:10129:➑
ROTATED HEAVY BLACK HEART BULLET:bullet:10085:❥
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE:~:10122:➊
WRITING HAND:_:9997:✍
</ms_dingbats>
<ms_wingdings>
<control>:tab:9:	
PROPOSE "HEDERA UPPER RIGHT":ldblquote:none:<udef_symbol num="?" description="hedera_upper_right"/>
PROPOSE "HEDERA LOWER LEFT":rdblquote:none:<udef_symbol num="?" description="hedera_lower_left"/>
PROPOSE "HEDERA LOWER RIGHT":lquote:none:<udef_symbol num="?" description="hedera_lower_right"/>
ERASE TO THE LEFT:rquote:9003:⌫
AMPERSAND:endash:38:&
PROPOSE "HEDERA UPPER LEFT":emdash:none:<udef_symbol num="?" description="hedera_upper_left"/>
SUN:bullet:9737:☉
PROPOSE "NOTCHED RIGHTWARDS DOUBLE ARROW WITH TIP DOWNWARDS":~:none:<udef_symbol description="notched_rightwards_double_arrow_with_tip_downwards"/>
PROPOSE "MAIL FLAG UP":_:none:<udef_symbol num="?" description="mail_flag_up"/>
</ms_wingdings>
<not_unicode>
NULL (NUL):'00:0:�
START OF HEADING (SOH):'01:1:
START OF TEXT (STX):'02:2:
END OF TEXT (ETX):'03:3:
END OF TRANSMISSION (EOT):'04:4:
ENQUIRY (ENQ):'05:5:
ACKNOWLEDGE (ACK):'06:6:
BELL (BEL):'07:7:
BACKSPACE (BS):'08:8:
LINE TABULATION (VT):'0B:11:
FORM FEED (FF):'0C:12:
SHIFT OUT (SO):'0E:14:
SHIFT IN (SI):'0F:15:
DATALINK ESCAPE (DLE):'10:16:
DEVICE CONTROL ONE (DC1):'11:17:
DEVICE CONTROL TWO (DC2):'12:18:
DEVICE CONTROL THREE (DC3):'13:19:
DEVICE CONTROL FOUR (DC4):'14:20:
NEGATIVE ACKNOWLEDGE (NAK):'15:21:
SYNCHRONOUS IDLE (SYN):'16:22:
END OF TRANSMISSION BLOCK (ETB):'17:23:
CANCEL (CAN):'18:24:
END OF MEDIUM (EM):'19:25:
SUBSTITUTE (SUB):'1A:26:
ESCAPE (ESC):'1B:27:
FILE SEPARATOR (IS4):'1C:28:
GROUP SEPARATOR (IS3):'1D:29:
RECORD SEPARATOR (IS2):'1E:30:
UNIT SEPARATOR (IS1):'1F:31:
</not_unicode>
<bottom_128>
CHARACTER TABULATION (HT):'09:9:	
LINE FEED (LF):'0A:10:

CARRIAGE RETURN (CR):'0D:13:
SPACE:'20:32:
EXCLAMATION MARK:'21:33:!
QUOTATION MARK:'22:34:"
NUMBER SIGN:'23:35:#
DOLLAR SIGN:'24:36:$
PERCENT SIGN:'25:37:%
AMPERSAND:'26:38:&
APOSTROPHE:'27:39:'
LEFT PARENTHESIS:'28:40:(
RIGHT PARENTHESIS:'29:41:)
ASTERISK:'2A:42:*
PLUS SIGN:'2B:43:+
COMMA:'2C:44:,
HYPHEN-MINUS:'2D:45:-
FULL STOP:'2E:46:.
SOLIDUS:'2F:47:/
DIGIT ZERO:'30:48:0
DIGIT ONE:'31:49:1
DIGIT TWO:'32:50:2
DIGIT THREE:'33:51:3
DIGIT FOUR:'34:52:4
DIGIT FIVE:'35:53:5
DIGIT SIX:'36:54:6
DIGIT SEVEN:'37:55:7
DIGIT EIGHT:'38:56:8
DIGIT NINE:'39:57:9
COLON:'3A:58:\\colon
SEMICOLON:'3B:59:;
LESS-THAN SIGN:'3C:60:<
EQUALS SIGN:'3D:61:=
GREATER-THAN SIGN:'3E:62:>
QUESTION MARK:'3F:63:?
COMMERCIAL AT:'40:64:@
LATIN CAPITAL LETTER A:'41:65:A
LATIN CAPITAL LETTER B:'42:66:B
LATIN CAPITAL LETTER C:'43:67:C
LATIN CAPITAL LETTER D:'44:68:D
LATIN CAPITAL LETTER E:'45:69:E
LATIN CAPITAL LETTER F:'46:70:F
LATIN CAPITAL LETTER G:'47:71:G
LATIN CAPITAL LETTER H:'48:72:H
LATIN CAPITAL LETTER I:'49:73:I
LATIN CAPITAL LETTER J:'4A:74:J
LATIN CAPITAL LETTER K:'4B:75:K
LATIN CAPITAL LETTER L:'4C:76:L
LATIN CAPITAL LETTER M:'4D:77:M
LATIN CAPITAL LETTER N:'4E:78:N
LATIN CAPITAL LETTER O:'4F:79:O
LATIN CAPITAL LETTER P:'50:80:P
LATIN CAPITAL LETTER Q:'51:81:Q
LATIN CAPITAL LETTER R:'52:82:R
LATIN CAPITAL LETTER S:'53:83:S
LATIN CAPITAL LETTER T:'54:84:T
LATIN CAPITAL LETTER U:'55:85:U
LATIN CAPITAL LETTER V:'56:86:V
LATIN CAPITAL LETTER W:'57:87:W
LATIN CAPITAL LETTER X:'58:88:X
LATIN CAPITAL LETTER Y:'59:89:Y
LATIN CAPITAL LETTER Z:'5A:90:Z
LEFT SQUARE BRACKET:'5B:91:[
REVERSE SOLIDUS:'5C:92:\
RIGHT SQUARE BRACKET:'5D:93:]
CIRCUMFLEX ACCENT:'5E:94:^
LOW LINE:'5F:95:_
GRAVE ACCENT:'60:96:`
LATIN SMALL LETTER A:'61:97:a
LATIN SMALL LETTER B:'62:98:b
LATIN SMALL LETTER C:'63:99:c
LATIN SMALL LETTER D:'64:100:d
LATIN SMALL LETTER E:'65:101:e
LATIN SMALL LETTER F:'66:102:f
LATIN SMALL LETTER G:'67:103:g
LATIN SMALL LETTER H:'68:104:h
LATIN SMALL LETTER I:'69:105:i
LATIN SMALL LETTER J:'6A:106:j
LATIN SMALL LETTER K:'6B:107:k
LATIN SMALL LETTER L:'6C:108:l
LATIN SMALL LETTER M:'6D:109:m
LATIN SMALL LETTER N:'6E:110:n
LATIN SMALL LETTER O:'6F:111:o
LATIN SMALL LETTER P:'70:112:p
LATIN SMALL LETTER Q:'71:113:q
LATIN SMALL LETTER R:'72:114:r
LATIN SMALL LETTER S:'73:115:s
LATIN SMALL LETTER T:'74:116:t
LATIN SMALL LETTER U:'75:117:u
LATIN SMALL LETTER V:'76:118:v
LATIN SMALL LETTER W:'77:119:w
LATIN SMALL LETTER X:'78:120:x
LATIN SMALL LETTER Y:'79:121:y
LATIN SMALL LETTER Z:'7A:122:z
LEFT CURLY BRACKET:'7B:123:{
VERTICAL LINE:'7C:124:|
RIGHT CURLY BRACKET:'7D:125:}
TILDE:'7E:126:~
DELETE (DEL):'7F:127:
</bottom_128>
<bottom_128_old>
NULL (NUL):'00:0:�
START OF HEADING (SOH):'01:1:
START OF TEXT (STX):'02:2:
END OF TEXT (ETX):'03:3:
END OF TRANSMISSION (EOT):'04:4:
ENQUIRY (ENQ):'05:5:
ACKNOWLEDGE (ACK):'06:6:
BELL (BEL):'07:7:
BACKSPACE (BS):'08:8:
CHARACTER TABULATION (HT):'09:9:	
LINE FEED (LF):'0A:10:

LINE TABULATION (VT):'0B:11:
FORM FEED (FF):'0C:12:
CARRIAGE RETURN (CR):'0D:13:
SHIFT OUT (SO):'0E:14:
SHIFT IN (SI):'0F:15:
DATALINK ESCAPE (DLE):'10:16:
DEVICE CONTROL ONE (DC1):'11:17:
DEVICE CONTROL TWO (DC2):'12:18:
DEVICE CONTROL THREE (DC3):'13:19:
DEVICE CONTROL FOUR (DC4):'14:20:
NEGATIVE ACKNOWLEDGE (NAK):'15:21:
SYNCHRONOUS IDLE (SYN):'16:22:
END OF TRANSMISSION BLOCK (ETB):'17:23:
CANCEL (CAN):'18:24:
END OF MEDIUM (EM):'19:25:
SUBSTITUTE (SUB):'1A:26:
ESCAPE (ESC):'1B:27:
FILE SEPARATOR (IS4):'1C:28:
GROUP SEPARATOR (IS3):'1D:29:
RECORD SEPARATOR (IS2):'1E:30:
UNIT SEPARATOR (IS1):'1F:31:
SPACE:'20:32: 
EXCLAMATION MARK:'21:33:!
QUOTATION MARK:'22:34:"
NUMBER SIGN:'23:35:#
DOLLAR SIGN:'24:36:$
PERCENT SIGN:'25:37:%
AMPERSAND:'26:38:&
APOSTROPHE:'27:39:'
LEFT PARENTHESIS:'28:40:(
RIGHT PARENTHESIS:'29:41:)
ASTERISK:'2A:42:*
PLUS SIGN:'2B:43:+
COMMA:'2C:44:,
HYPHEN-MINUS:'2D:45:-
FULL STOP:'2E:46:.
SOLIDUS:'2F:47:/
DIGIT ZERO:'30:48:0
DIGIT ONE:'31:49:1
DIGIT TWO:'32:50:2
DIGIT THREE:'33:51:3
DIGIT FOUR:'34:52:4
DIGIT FIVE:'35:53:5
DIGIT SIX:'36:54:6
DIGIT SEVEN:'37:55:7
DIGIT EIGHT:'38:56:8
DIGIT NINE:'39:57:9
COLON:'3A:58::
SEMICOLON:'3B:59:;
LESS-THAN SIGN:'3C:60:<
EQUALS SIGN:'3D:61:=
GREATER-THAN SIGN:'3E:62:>
QUESTION MARK:'3F:63:?
COMMERCIAL AT:'40:64:@
LATIN CAPITAL LETTER A:'41:65:A
LATIN CAPITAL LETTER B:'42:66:B
LATIN CAPITAL LETTER C:'43:67:C
LATIN CAPITAL LETTER D:'44:68:D
LATIN CAPITAL LETTER E:'45:69:E
LATIN CAPITAL LETTER F:'46:70:F
LATIN CAPITAL LETTER G:'47:71:G
LATIN CAPITAL LETTER H:'48:72:H
LATIN CAPITAL LETTER I:'49:73:I
LATIN CAPITAL LETTER J:'4A:74:J
LATIN CAPITAL LETTER K:'4B:75:K
LATIN CAPITAL LETTER L:'4C:76:L
LATIN CAPITAL LETTER M:'4D:77:M
LATIN CAPITAL LETTER N:'4E:78:N
LATIN CAPITAL LETTER O:'4F:79:O
LATIN CAPITAL LETTER P:'50:80:P
LATIN CAPITAL LETTER Q:'51:81:Q
LATIN CAPITAL LETTER R:'52:82:R
LATIN CAPITAL LETTER S:'53:83:S
LATIN CAPITAL LETTER T:'54:84:T
LATIN CAPITAL LETTER U:'55:85:U
LATIN CAPITAL LETTER V:'56:86:V
LATIN CAPITAL LETTER W:'57:87:W
LATIN CAPITAL LETTER X:'58:88:X
LATIN CAPITAL LETTER Y:'59:89:Y
LATIN CAPITAL LETTER Z:'5A:90:Z
LEFT SQUARE BRACKET:'5B:91:[
REVERSE SOLIDUS:'5C:92:\
RIGHT SQUARE BRACKET:'5D:93:]
CIRCUMFLEX ACCENT:'5E:94:^
LOW LINE:'5F:95:_
GRAVE ACCENT:'60:96:`
LATIN SMALL LETTER A:'61:97:a
LATIN SMALL LETTER B:'62:98:b
LATIN SMALL LETTER C:'63:99:c
LATIN SMALL LETTER D:'64:100:d
LATIN SMALL LETTER E:'65:101:e
LATIN SMALL LETTER F:'66:102:f
LATIN SMALL LETTER G:'67:103:g
LATIN SMALL LETTER H:'68:104:h
LATIN SMALL LETTER I:'69:105:i
LATIN SMALL LETTER J:'6A:106:j
LATIN SMALL LETTER K:'6B:107:k
LATIN SMALL LETTER L:'6C:108:l
LATIN SMALL LETTER M:'6D:109:m
LATIN SMALL LETTER N:'6E:110:n
LATIN SMALL LETTER O:'6F:111:o
LATIN SMALL LETTER P:'70:112:p
LATIN SMALL LETTER Q:'71:113:q
LATIN SMALL LETTER R:'72:114:r
LATIN SMALL LETTER S:'73:115:s
LATIN SMALL LETTER T:'74:116:t
LATIN SMALL LETTER U:'75:117:u
LATIN SMALL LETTER V:'76:118:v
LATIN SMALL LETTER W:'77:119:w
LATIN SMALL LETTER X:'78:120:x
LATIN SMALL LETTER Y:'79:121:y
LATIN SMALL LETTER Z:'7A:122:z
</bottom_128_old>
<ansicpg950>
DBCS LEAD BYTE:'81:129:<udef_symbol num="129"/>
DBCS LEAD BYTE:'82:130:<udef_symbol num="130"/>
DBCS LEAD BYTE:'83:131:<udef_symbol num="131"/>
DBCS LEAD BYTE:'84:132:<udef_symbol num="132"/>
DBCS LEAD BYTE:'85:133:<udef_symbol num="133"/>
DBCS LEAD BYTE:'86:134:<udef_symbol num="134"/>
DBCS LEAD BYTE:'87:135:<udef_symbol num="135"/>
DBCS LEAD BYTE:'88:136:<udef_symbol num="136"/>
DBCS LEAD BYTE:'89:137:<udef_symbol num="137"/>
DBCS LEAD BYTE:'8A:138:<udef_symbol num="138"/>
DBCS LEAD BYTE:'8B:139:<udef_symbol num="139"/>
DBCS LEAD BYTE:'8C:140:<udef_symbol num="140"/>
DBCS LEAD BYTE:'8D:141:<udef_symbol num="141"/>
DBCS LEAD BYTE:'8E:142:<udef_symbol num="142"/>
DBCS LEAD BYTE:'8F:143:<udef_symbol num="143"/>
DBCS LEAD BYTE:'90:144:<udef_symbol num="144"/>
DBCS LEAD BYTE:'91:145:<udef_symbol num="145"/>
DBCS LEAD BYTE:'92:146:<udef_symbol num="146"/>
DBCS LEAD BYTE:'93:147:<udef_symbol num="147"/>
DBCS LEAD BYTE:'94:148:<udef_symbol num="148"/>
DBCS LEAD BYTE:'95:149:<udef_symbol num="149"/>
DBCS LEAD BYTE:'96:150:<udef_symbol num="150"/>
DBCS LEAD BYTE:'97:151:<udef_symbol num="151"/>
DBCS LEAD BYTE:'98:152:<udef_symbol num="152"/>
DBCS LEAD BYTE:'99:153:<udef_symbol num="153"/>
DBCS LEAD BYTE:'9A:154:<udef_symbol num="154"/>
DBCS LEAD BYTE:'9B:155:<udef_symbol num="155"/>
DBCS LEAD BYTE:'9C:156:<udef_symbol num="156"/>
DBCS LEAD BYTE:'9D:157:<udef_symbol num="157"/>
DBCS LEAD BYTE:'9E:158:<udef_symbol num="158"/>
DBCS LEAD BYTE:'9F:159:<udef_symbol num="159"/>
DBCS LEAD BYTE:'A0:160:<udef_symbol num="160"/>
DBCS LEAD BYTE:'A1:161:<udef_symbol num="161"/>
DBCS LEAD BYTE:'A2:162:<udef_symbol num="162"/>
DBCS LEAD BYTE:'A3:163:<udef_symbol num="163"/>
DBCS LEAD BYTE:'A4:164:<udef_symbol num="164"/>
DBCS LEAD BYTE:'A5:165:<udef_symbol num="165"/>
DBCS LEAD BYTE:'A6:166:<udef_symbol num="166"/>
DBCS LEAD BYTE:'A7:167:<udef_symbol num="167"/>
DBCS LEAD BYTE:'A8:168:<udef_symbol num="168"/>
DBCS LEAD BYTE:'A9:169:<udef_symbol num="169"/>
DBCS LEAD BYTE:'AA:170:<udef_symbol num="170"/>
DBCS LEAD BYTE:'AB:171:<udef_symbol num="171"/>
DBCS LEAD BYTE:'AC:172:<udef_symbol num="172"/>
DBCS LEAD BYTE:'AD:173:<udef_symbol num="173"/>
DBCS LEAD BYTE:'AE:174:<udef_symbol num="174"/>
DBCS LEAD BYTE:'AF:175:<udef_symbol num="175"/>
DBCS LEAD BYTE:'B0:176:<udef_symbol num="176"/>
DBCS LEAD BYTE:'B1:177:<udef_symbol num="177"/>
DBCS LEAD BYTE:'B2:178:<udef_symbol num="178"/>
DBCS LEAD BYTE:'B3:179:<udef_symbol num="179"/>
DBCS LEAD BYTE:'B4:180:<udef_symbol num="180"/>
DBCS LEAD BYTE:'B5:181:<udef_symbol num="181"/>
DBCS LEAD BYTE:'B6:182:<udef_symbol num="182"/>
DBCS LEAD BYTE:'B7:183:<udef_symbol num="183"/>
DBCS LEAD BYTE:'B8:184:<udef_symbol num="184"/>
DBCS LEAD BYTE:'B9:185:<udef_symbol num="185"/>
DBCS LEAD BYTE:'BA:186:<udef_symbol num="186"/>
DBCS LEAD BYTE:'BB:187:<udef_symbol num="187"/>
DBCS LEAD BYTE:'BC:188:<udef_symbol num="188"/>
DBCS LEAD BYTE:'BD:189:<udef_symbol num="189"/>
DBCS LEAD BYTE:'BE:190:<udef_symbol num="190"/>
DBCS LEAD BYTE:'BF:191:<udef_symbol num="191"/>
DBCS LEAD BYTE:'C0:192:<udef_symbol num="192"/>
DBCS LEAD BYTE:'C1:193:<udef_symbol num="193"/>
DBCS LEAD BYTE:'C2:194:<udef_symbol num="194"/>
DBCS LEAD BYTE:'C3:195:<udef_symbol num="195"/>
DBCS LEAD BYTE:'C4:196:<udef_symbol num="196"/>
DBCS LEAD BYTE:'C5:197:<udef_symbol num="197"/>
DBCS LEAD BYTE:'C6:198:<udef_symbol num="198"/>
DBCS LEAD BYTE:'C7:199:<udef_symbol num="199"/>
DBCS LEAD BYTE:'C8:200:<udef_symbol num="200"/>
DBCS LEAD BYTE:'C9:201:<udef_symbol num="201"/>
DBCS LEAD BYTE:'CA:202:<udef_symbol num="202"/>
DBCS LEAD BYTE:'CB:203:<udef_symbol num="203"/>
DBCS LEAD BYTE:'CC:204:<udef_symbol num="204"/>
DBCS LEAD BYTE:'CD:205:<udef_symbol num="205"/>
DBCS LEAD BYTE:'CE:206:<udef_symbol num="206"/>
DBCS LEAD BYTE:'CF:207:<udef_symbol num="207"/>
DBCS LEAD BYTE:'D0:208:<udef_symbol num="208"/>
DBCS LEAD BYTE:'D1:209:<udef_symbol num="209"/>
DBCS LEAD BYTE:'D2:210:<udef_symbol num="210"/>
DBCS LEAD BYTE:'D3:211:<udef_symbol num="211"/>
DBCS LEAD BYTE:'D4:212:<udef_symbol num="212"/>
DBCS LEAD BYTE:'D5:213:<udef_symbol num="213"/>
DBCS LEAD BYTE:'D6:214:<udef_symbol num="214"/>
DBCS LEAD BYTE:'D7:215:<udef_symbol num="215"/>
DBCS LEAD BYTE:'D8:216:<udef_symbol num="216"/>
DBCS LEAD BYTE:'D9:217:<udef_symbol num="217"/>
DBCS LEAD BYTE:'DA:218:<udef_symbol num="218"/>
DBCS LEAD BYTE:'DB:219:<udef_symbol num="219"/>
DBCS LEAD BYTE:'DC:220:<udef_symbol num="220"/>
DBCS LEAD BYTE:'DD:221:<udef_symbol num="221"/>
DBCS LEAD BYTE:'DE:222:<udef_symbol num="222"/>
DBCS LEAD BYTE:'DF:223:<udef_symbol num="223"/>
DBCS LEAD BYTE:'E0:224:<udef_symbol num="224"/>
DBCS LEAD BYTE:'E1:225:<udef_symbol num="225"/>
DBCS LEAD BYTE:'E2:226:<udef_symbol num="226"/>
DBCS LEAD BYTE:'E3:227:<udef_symbol num="227"/>
DBCS LEAD BYTE:'E4:228:<udef_symbol num="228"/>
DBCS LEAD BYTE:'E5:229:<udef_symbol num="229"/>
DBCS LEAD BYTE:'E6:230:<udef_symbol num="230"/>
DBCS LEAD BYTE:'E7:231:<udef_symbol num="231"/>
DBCS LEAD BYTE:'E8:232:<udef_symbol num="232"/>
DBCS LEAD BYTE:'E9:233:<udef_symbol num="233"/>
DBCS LEAD BYTE:'EA:234:<udef_symbol num="234"/>
DBCS LEAD BYTE:'EB:235:<udef_symbol num="235"/>
DBCS LEAD BYTE:'EC:236:<udef_symbol num="236"/>
DBCS LEAD BYTE:'ED:237:<udef_symbol num="237"/>
DBCS LEAD BYTE:'EE:238:<udef_symbol num="238"/>
DBCS LEAD BYTE:'EF:239:<udef_symbol num="239"/>
DBCS LEAD BYTE:'F0:240:<udef_symbol num="240"/>
DBCS LEAD BYTE:'F1:241:<udef_symbol num="241"/>
DBCS LEAD BYTE:'F2:242:<udef_symbol num="242"/>
DBCS LEAD BYTE:'F3:243:<udef_symbol num="243"/>
DBCS LEAD BYTE:'F4:244:<udef_symbol num="244"/>
DBCS LEAD BYTE:'F5:245:<udef_symbol num="245"/>
DBCS LEAD BYTE:'F6:246:<udef_symbol num="246"/>
DBCS LEAD BYTE:'F7:247:<udef_symbol num="247"/>
DBCS LEAD BYTE:'F8:248:<udef_symbol num="248"/>
DBCS LEAD BYTE:'F9:249:<udef_symbol num="249"/>
DBCS LEAD BYTE:'FA:250:<udef_symbol num="250"/>
DBCS LEAD BYTE:'FB:251:<udef_symbol num="251"/>
DBCS LEAD BYTE:'FC:252:<udef_symbol num="252"/>
DBCS LEAD BYTE:'FD:253:<udef_symbol num="253"/>
DBCS LEAD BYTE:'FE:254:<udef_symbol num="254"/>
IDEOGRAPHIC SPACE:'A140:41280: 
FULLWIDTH COMMA:'A141:41281:,
IDEOGRAPHIC COMMA:'A142:41282:、
IDEOGRAPHIC FULL STOP:'A143:41283:。
FULLWIDTH FULL STOP:'A144:41284:.
HYPHENATION POINT:'A145:41285:‧
FULLWIDTH SEMICOLON:'A146:41286:;
FULLWIDTH COLON:'A147:41287::
FULLWIDTH QUESTION MARK:'A148:41288:?
FULLWIDTH EXCLAMATION MARK:'A149:41289:!
PRESENTATION FORM FOR VERTICAL TWO DOT LEADER:'A14A:41290:︰
HORIZONTAL ELLIPSIS:'A14B:41291:…
TWO DOT LEADER:'A14C:41292:‥
SMALL COMMA:'A14D:41293:﹐
SMALL IDEOGRAPHIC COMMA:'A14E:41294:﹑
SMALL FULL STOP:'A14F:41295:﹒
MIDDLE DOT:'A150:41296:·
SMALL SEMICOLON:'A151:41297:﹔
SMALL COLON:'A152:41298:﹕
SMALL QUESTION MARK:'A153:41299:﹖
SMALL EXCLAMATION MARK:'A154:41300:﹗
FULLWIDTH VERTICAL LINE:'A155:41301:|
EN DASH:'A156:41302:–
PRESENTATION FORM FOR VERTICAL EM DASH:'A157:41303:︱
EM DASH:'A158:41304:—
PRESENTATION FORM FOR VERTICAL LOW LINE:'A159:41305:︳
BOX DRAWINGS LIGHT LEFT:'A15A:41306:╴
PRESENTATION FORM FOR VERTICAL WAVY LOW LINE:'A15B:41307:︴
WAVY LOW LINE:'A15C:41308:﹏
FULLWIDTH LEFT PARENTHESIS:'A15D:41309:(
FULLWIDTH RIGHT PARENTHESIS:'A15E:41310:)
PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS:'A15F:41311:︵
PRESENTATION FORM FOR VERTICAL RIGHT PARENTHESIS:'A160:41312:︶
FULLWIDTH LEFT CURLY BRACKET:'A161:41313:{
FULLWIDTH RIGHT CURLY BRACKET:'A162:41314:}
PRESENTATION FORM FOR VERTICAL LEFT CURLY BRACKET:'A163:41315:︷
PRESENTATION FORM FOR VERTICAL RIGHT CURLY BRACKET:'A164:41316:︸
LEFT TORTOISE SHELL BRACKET:'A165:41317:〔
RIGHT TORTOISE SHELL BRACKET:'A166:41318:〕
PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET:'A167:41319:︹
PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET:'A168:41320:︺
LEFT BLACK LENTICULAR BRACKET:'A169:41321:【
RIGHT BLACK LENTICULAR BRACKET:'A16A:41322:】
PRESENTATION FORM FOR VERTICAL LEFT BLACK LENTICULAR BRACKET:'A16B:41323:︻
PRESENTATION FORM FOR VERTICAL RIGHT BLACK LENTICULAR BRACKET:'A16C:41324:︼
LEFT DOUBLE ANGLE BRACKET:'A16D:41325:《
RIGHT DOUBLE ANGLE BRACKET:'A16E:41326:》
PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET:'A16F:41327:︽
PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET:'A170:41328:︾
LEFT ANGLE BRACKET:'A171:41329:〈
RIGHT ANGLE BRACKET:'A172:41330:〉
PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET:'A173:41331:︿
PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET:'A174:41332:﹀
LEFT CORNER BRACKET:'A175:41333:「
RIGHT CORNER BRACKET:'A176:41334:」
PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET:'A177:41335:﹁
PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET:'A178:41336:﹂
LEFT WHITE CORNER BRACKET:'A179:41337:『
RIGHT WHITE CORNER BRACKET:'A17A:41338:』
PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET:'A17B:41339:﹃
PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET:'A17C:41340:﹄
SMALL LEFT PARENTHESIS:'A17D:41341:﹙
SMALL RIGHT PARENTHESIS:'A17E:41342:﹚
SMALL LEFT CURLY BRACKET:'A1A1:41377:﹛
SMALL RIGHT CURLY BRACKET:'A1A2:41378:﹜
SMALL LEFT TORTOISE SHELL BRACKET:'A1A3:41379:﹝
SMALL RIGHT TORTOISE SHELL BRACKET:'A1A4:41380:﹞
LEFT SINGLE QUOTATION MARK:'A1A5:41381:‘
RIGHT SINGLE QUOTATION MARK:'A1A6:41382:’
LEFT DOUBLE QUOTATION MARK:'A1A7:41383:“
RIGHT DOUBLE QUOTATION MARK:'A1A8:41384:”
REVERSED DOUBLE PRIME QUOTATION MARK:'A1A9:41385:〝
DOUBLE PRIME QUOTATION MARK:'A1AA:41386:〞
REVERSED PRIME:'A1AB:41387:‵
PRIME:'A1AC:41388:′
FULLWIDTH NUMBER SIGN:'A1AD:41389:#
FULLWIDTH AMPERSAND:'A1AE:41390:&
FULLWIDTH ASTERISK:'A1AF:41391:*
REFERENCE MARK:'A1B0:41392:※
SECTION SIGN:'A1B1:41393:§
DITTO MARK:'A1B2:41394:〃
WHITE CIRCLE:'A1B3:41395:○
BLACK CIRCLE:'A1B4:41396:●
WHITE UP-POINTING TRIANGLE:'A1B5:41397:△
BLACK UP-POINTING TRIANGLE:'A1B6:41398:▲
BULLSEYE:'A1B7:41399:◎
WHITE STAR:'A1B8:41400:☆
BLACK STAR:'A1B9:41401:★
WHITE DIAMOND:'A1BA:41402:◇
BLACK DIAMOND:'A1BB:41403:◆
WHITE SQUARE:'A1BC:41404:□
BLACK SQUARE:'A1BD:41405:■
WHITE DOWN-POINTING TRIANGLE:'A1BE:41406:▽
BLACK DOWN-POINTING TRIANGLE:'A1BF:41407:▼
CIRCLED IDEOGRAPH CORRECT:'A1C0:41408:㊣
CARE OF:'A1C1:41409:℅
MACRON:'A1C2:41410:¯
FULLWIDTH MACRON:'A1C3:41411: ̄
FULLWIDTH LOW LINE:'A1C4:41412:_
MODIFIER LETTER LOW MACRON:'A1C5:41413:ˍ
DASHED OVERLINE:'A1C6:41414:﹉
CENTRELINE OVERLINE:'A1C7:41415:﹊
DASHED LOW LINE:'A1C8:41416:﹍
CENTRELINE LOW LINE:'A1C9:41417:﹎
WAVY OVERLINE:'A1CA:41418:﹋
DOUBLE WAVY OVERLINE:'A1CB:41419:﹌
SMALL NUMBER SIGN:'A1CC:41420:﹟
SMALL AMPERSAND:'A1CD:41421:﹠
SMALL ASTERISK:'A1CE:41422:﹡
FULLWIDTH PLUS SIGN:'A1CF:41423:+
FULLWIDTH HYPHEN-MINUS:'A1D0:41424:-
MULTIPLICATION SIGN:'A1D1:41425:×
DIVISION SIGN:'A1D2:41426:÷
PLUS-MINUS SIGN:'A1D3:41427:±
SQUARE ROOT:'A1D4:41428:√
FULLWIDTH LESS-THAN SIGN:'A1D5:41429:<
FULLWIDTH GREATER-THAN SIGN:'A1D6:41430:>
FULLWIDTH EQUALS SIGN:'A1D7:41431:=
LESS-THAN OVER EQUAL TO:'A1D8:41432:≦
GREATER-THAN OVER EQUAL TO:'A1D9:41433:≧
NOT EQUAL TO:'A1DA:41434:≠
INFINITY:'A1DB:41435:∞
APPROXIMATELY EQUAL TO OR THE IMAGE OF:'A1DC:41436:≒
IDENTICAL TO:'A1DD:41437:≡
SMALL PLUS SIGN:'A1DE:41438:﹢
SMALL HYPHEN-MINUS:'A1DF:41439:﹣
SMALL LESS-THAN SIGN:'A1E0:41440:﹤
SMALL GREATER-THAN SIGN:'A1E1:41441:﹥
SMALL EQUALS SIGN:'A1E2:41442:﹦
FULLWIDTH TILDE:'A1E3:41443:~
INTERSECTION:'A1E4:41444:∩
UNION:'A1E5:41445:∪
UP TACK:'A1E6:41446:⊥
ANGLE:'A1E7:41447:∠
RIGHT ANGLE:'A1E8:41448:∟
RIGHT TRIANGLE:'A1E9:41449:⊿
SQUARE LOG:'A1EA:41450:㏒
SQUARE LN:'A1EB:41451:㏑
INTEGRAL:'A1EC:41452:∫
CONTOUR INTEGRAL:'A1ED:41453:∮
BECAUSE:'A1EE:41454:∵
THEREFORE:'A1EF:41455:∴
FEMALE SIGN:'A1F0:41456:♀
MALE SIGN:'A1F1:41457:♂
CIRCLED PLUS:'A1F2:41458:⊕
CIRCLED DOT OPERATOR:'A1F3:41459:⊙
UPWARDS ARROW:'A1F4:41460:↑
DOWNWARDS ARROW:'A1F5:41461:↓
LEFTWARDS ARROW:'A1F6:41462:←
RIGHTWARDS ARROW:'A1F7:41463:→
NORTH WEST ARROW:'A1F8:41464:↖
NORTH EAST ARROW:'A1F9:41465:↗
SOUTH WEST ARROW:'A1FA:41466:↙
SOUTH EAST ARROW:'A1FB:41467:↘
PARALLEL TO:'A1FC:41468:∥
DIVIDES:'A1FD:41469:∣
FULLWIDTH SOLIDUS:'A1FE:41470:/
FULLWIDTH REVERSE SOLIDUS:'A240:41536:\
DIVISION SLASH:'A241:41537:∕
SMALL REVERSE SOLIDUS:'A242:41538:﹨
FULLWIDTH DOLLAR SIGN:'A243:41539:$
FULLWIDTH YEN SIGN:'A244:41540:¥
POSTAL MARK:'A245:41541:〒
FULLWIDTH CENT SIGN:'A246:41542:¢
FULLWIDTH POUND SIGN:'A247:41543:£
FULLWIDTH PERCENT SIGN:'A248:41544:%
FULLWIDTH COMMERCIAL AT:'A249:41545:@
DEGREE CELSIUS:'A24A:41546:℃
DEGREE FAHRENHEIT:'A24B:41547:℉
SMALL DOLLAR SIGN:'A24C:41548:﹩
SMALL PERCENT SIGN:'A24D:41549:﹪
SMALL COMMERCIAL AT:'A24E:41550:﹫
SQUARE MIL:'A24F:41551:㏕
SQUARE MM:'A250:41552:㎜
SQUARE CM:'A251:41553:㎝
SQUARE KM:'A252:41554:㎞
SQUARE KM CAPITAL:'A253:41555:㏎
SQUARE M SQUARED:'A254:41556:㎡
SQUARE MG:'A255:41557:㎎
SQUARE KG:'A256:41558:㎏
SQUARE CC:'A257:41559:㏄
DEGREE SIGN:'A258:41560:°
CJK UNIFIED IDEOGRAPH:'A259:41561:兙
CJK UNIFIED IDEOGRAPH:'A25A:41562:兛
CJK UNIFIED IDEOGRAPH:'A25B:41563:兞
CJK UNIFIED IDEOGRAPH:'A25C:41564:兝
CJK UNIFIED IDEOGRAPH:'A25D:41565:兡
CJK UNIFIED IDEOGRAPH:'A25E:41566:兣
CJK UNIFIED IDEOGRAPH:'A25F:41567:嗧
CJK UNIFIED IDEOGRAPH:'A260:41568:瓩
CJK UNIFIED IDEOGRAPH:'A261:41569:糎
LOWER ONE EIGHTH BLOCK:'A262:41570:▁
LOWER ONE QUARTER BLOCK:'A263:41571:▂
LOWER THREE EIGHTHS BLOCK:'A264:41572:▃
LOWER HALF BLOCK:'A265:41573:▄
LOWER FIVE EIGHTHS BLOCK:'A266:41574:▅
LOWER THREE QUARTERS BLOCK:'A267:41575:▆
LOWER SEVEN EIGHTHS BLOCK:'A268:41576:▇
FULL BLOCK:'A269:41577:█
LEFT ONE EIGHTH BLOCK:'A26A:41578:▏
LEFT ONE QUARTER BLOCK:'A26B:41579:▎
LEFT THREE EIGHTHS BLOCK:'A26C:41580:▍
LEFT HALF BLOCK:'A26D:41581:▌
LEFT FIVE EIGHTHS BLOCK:'A26E:41582:▋
LEFT THREE QUARTERS BLOCK:'A26F:41583:▊
LEFT SEVEN EIGHTHS BLOCK:'A270:41584:▉
BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL:'A271:41585:┼
BOX DRAWINGS LIGHT UP AND HORIZONTAL:'A272:41586:┴
BOX DRAWINGS LIGHT DOWN AND HORIZONTAL:'A273:41587:┬
BOX DRAWINGS LIGHT VERTICAL AND LEFT:'A274:41588:┤
BOX DRAWINGS LIGHT VERTICAL AND RIGHT:'A275:41589:├
UPPER ONE EIGHTH BLOCK:'A276:41590:▔
BOX DRAWINGS LIGHT HORIZONTAL:'A277:41591:─
BOX DRAWINGS LIGHT VERTICAL:'A278:41592:│
RIGHT ONE EIGHTH BLOCK:'A279:41593:▕
BOX DRAWINGS LIGHT DOWN AND RIGHT:'A27A:41594:┌
BOX DRAWINGS LIGHT DOWN AND LEFT:'A27B:41595:┐
BOX DRAWINGS LIGHT UP AND RIGHT:'A27C:41596:└
BOX DRAWINGS LIGHT UP AND LEFT:'A27D:41597:┘
BOX DRAWINGS LIGHT ARC DOWN AND RIGHT:'A27E:41598:╭
BOX DRAWINGS LIGHT ARC DOWN AND LEFT:'A2A1:41633:╮
BOX DRAWINGS LIGHT ARC UP AND RIGHT:'A2A2:41634:╰
BOX DRAWINGS LIGHT ARC UP AND LEFT:'A2A3:41635:╯
BOX DRAWINGS DOUBLE HORIZONTAL:'A2A4:41636:═
BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE:'A2A5:41637:╞
BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE:'A2A6:41638:╪
BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE:'A2A7:41639:╡
BLACK LOWER RIGHT TRIANGLE:'A2A8:41640:◢
BLACK LOWER LEFT TRIANGLE:'A2A9:41641:◣
BLACK UPPER RIGHT TRIANGLE:'A2AA:41642:◥
BLACK UPPER LEFT TRIANGLE:'A2AB:41643:◤
BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT:'A2AC:41644:╱
BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT:'A2AD:41645:╲
BOX DRAWINGS LIGHT DIAGONAL CROSS:'A2AE:41646:╳
FULLWIDTH DIGIT ZERO:'A2AF:41647:0
FULLWIDTH DIGIT ONE:'A2B0:41648:1
FULLWIDTH DIGIT TWO:'A2B1:41649:2
FULLWIDTH DIGIT THREE:'A2B2:41650:3
FULLWIDTH DIGIT FOUR:'A2B3:41651:4
FULLWIDTH DIGIT FIVE:'A2B4:41652:5
FULLWIDTH DIGIT SIX:'A2B5:41653:6
FULLWIDTH DIGIT SEVEN:'A2B6:41654:7
FULLWIDTH DIGIT EIGHT:'A2B7:41655:8
FULLWIDTH DIGIT NINE:'A2B8:41656:9
ROMAN NUMERAL ONE:'A2B9:41657:Ⅰ
ROMAN NUMERAL TWO:'A2BA:41658:Ⅱ
ROMAN NUMERAL THREE:'A2BB:41659:Ⅲ
ROMAN NUMERAL FOUR:'A2BC:41660:Ⅳ
ROMAN NUMERAL FIVE:'A2BD:41661:Ⅴ
ROMAN NUMERAL SIX:'A2BE:41662:Ⅵ
ROMAN NUMERAL SEVEN:'A2BF:41663:Ⅶ
ROMAN NUMERAL EIGHT:'A2C0:41664:Ⅷ
ROMAN NUMERAL NINE:'A2C1:41665:Ⅸ
ROMAN NUMERAL TEN:'A2C2:41666:Ⅹ
HANGZHOU NUMERAL ONE:'A2C3:41667:〡
HANGZHOU NUMERAL TWO:'A2C4:41668:〢
HANGZHOU NUMERAL THREE:'A2C5:41669:〣
HANGZHOU NUMERAL FOUR:'A2C6:41670:〤
HANGZHOU NUMERAL FIVE:'A2C7:41671:〥
HANGZHOU NUMERAL SIX:'A2C8:41672:〦
HANGZHOU NUMERAL SEVEN:'A2C9:41673:〧
HANGZHOU NUMERAL EIGHT:'A2CA:41674:〨
HANGZHOU NUMERAL NINE:'A2CB:41675:〩
CJK UNIFIED IDEOGRAPH:'A2CC:41676:十
CJK UNIFIED IDEOGRAPH:'A2CD:41677:卄
CJK UNIFIED IDEOGRAPH:'A2CE:41678:卅
FULLWIDTH LATIN CAPITAL LETTER A:'A2CF:41679:A
FULLWIDTH LATIN CAPITAL LETTER B:'A2D0:41680:B
FULLWIDTH LATIN CAPITAL LETTER C:'A2D1:41681:C
FULLWIDTH LATIN CAPITAL LETTER D:'A2D2:41682:D
FULLWIDTH LATIN CAPITAL LETTER E:'A2D3:41683:E
FULLWIDTH LATIN CAPITAL LETTER F:'A2D4:41684:F
FULLWIDTH LATIN CAPITAL LETTER G:'A2D5:41685:G
FULLWIDTH LATIN CAPITAL LETTER H:'A2D6:41686:H
FULLWIDTH LATIN CAPITAL LETTER I:'A2D7:41687:I
FULLWIDTH LATIN CAPITAL LETTER J:'A2D8:41688:J
FULLWIDTH LATIN CAPITAL LETTER K:'A2D9:41689:K
FULLWIDTH LATIN CAPITAL LETTER L:'A2DA:41690:L
FULLWIDTH LATIN CAPITAL LETTER M:'A2DB:41691:M
FULLWIDTH LATIN CAPITAL LETTER N:'A2DC:41692:N
FULLWIDTH LATIN CAPITAL LETTER O:'A2DD:41693:O
FULLWIDTH LATIN CAPITAL LETTER P:'A2DE:41694:P
FULLWIDTH LATIN CAPITAL LETTER Q:'A2DF:41695:Q
FULLWIDTH LATIN CAPITAL LETTER R:'A2E0:41696:R
FULLWIDTH LATIN CAPITAL LETTER S:'A2E1:41697:S
FULLWIDTH LATIN CAPITAL LETTER T:'A2E2:41698:T
FULLWIDTH LATIN CAPITAL LETTER U:'A2E3:41699:U
FULLWIDTH LATIN CAPITAL LETTER V:'A2E4:41700:V
FULLWIDTH LATIN CAPITAL LETTER W:'A2E5:41701:W
FULLWIDTH LATIN CAPITAL LETTER X:'A2E6:41702:X
FULLWIDTH LATIN CAPITAL LETTER Y:'A2E7:41703:Y
FULLWIDTH LATIN CAPITAL LETTER Z:'A2E8:41704:Z
FULLWIDTH LATIN SMALL LETTER A:'A2E9:41705:a
FULLWIDTH LATIN SMALL LETTER B:'A2EA:41706:b
FULLWIDTH LATIN SMALL LETTER C:'A2EB:41707:c
FULLWIDTH LATIN SMALL LETTER D:'A2EC:41708:d
FULLWIDTH LATIN SMALL LETTER E:'A2ED:41709:e
FULLWIDTH LATIN SMALL LETTER F:'A2EE:41710:f
FULLWIDTH LATIN SMALL LETTER G:'A2EF:41711:g
FULLWIDTH LATIN SMALL LETTER H:'A2F0:41712:h
FULLWIDTH LATIN SMALL LETTER I:'A2F1:41713:i
FULLWIDTH LATIN SMALL LETTER J:'A2F2:41714:j
FULLWIDTH LATIN SMALL LETTER K:'A2F3:41715:k
FULLWIDTH LATIN SMALL LETTER L:'A2F4:41716:l
FULLWIDTH LATIN SMALL LETTER M:'A2F5:41717:m
FULLWIDTH LATIN SMALL LETTER N:'A2F6:41718:n
FULLWIDTH LATIN SMALL LETTER O:'A2F7:41719:o
FULLWIDTH LATIN SMALL LETTER P:'A2F8:41720:p
FULLWIDTH LATIN SMALL LETTER Q:'A2F9:41721:q
FULLWIDTH LATIN SMALL LETTER R:'A2FA:41722:r
FULLWIDTH LATIN SMALL LETTER S:'A2FB:41723:s
FULLWIDTH LATIN SMALL LETTER T:'A2FC:41724:t
FULLWIDTH LATIN SMALL LETTER U:'A2FD:41725:u
FULLWIDTH LATIN SMALL LETTER V:'A2FE:41726:v
FULLWIDTH LATIN SMALL LETTER W:'A340:41792:w
FULLWIDTH LATIN SMALL LETTER X:'A341:41793:x
FULLWIDTH LATIN SMALL LETTER Y:'A342:41794:y
FULLWIDTH LATIN SMALL LETTER Z:'A343:41795:z
GREEK CAPITAL LETTER ALPHA:'A344:41796:Α
GREEK CAPITAL LETTER BETA:'A345:41797:Β
GREEK CAPITAL LETTER GAMMA:'A346:41798:Γ
GREEK CAPITAL LETTER DELTA:'A347:41799:Δ
GREEK CAPITAL LETTER EPSILON:'A348:41800:Ε
GREEK CAPITAL LETTER ZETA:'A349:41801:Ζ
GREEK CAPITAL LETTER ETA:'A34A:41802:Η
GREEK CAPITAL LETTER THETA:'A34B:41803:Θ
GREEK CAPITAL LETTER IOTA:'A34C:41804:Ι
GREEK CAPITAL LETTER KAPPA:'A34D:41805:Κ
GREEK CAPITAL LETTER LAMDA:'A34E:41806:Λ
GREEK CAPITAL LETTER MU:'A34F:41807:Μ
GREEK CAPITAL LETTER NU:'A350:41808:Ν
GREEK CAPITAL LETTER XI:'A351:41809:Ξ
GREEK CAPITAL LETTER OMICRON:'A352:41810:Ο
GREEK CAPITAL LETTER PI:'A353:41811:Π
GREEK CAPITAL LETTER RHO:'A354:41812:Ρ
GREEK CAPITAL LETTER SIGMA:'A355:41813:Σ
GREEK CAPITAL LETTER TAU:'A356:41814:Τ
GREEK CAPITAL LETTER UPSILON:'A357:41815:Υ
GREEK CAPITAL LETTER PHI:'A358:41816:Φ
GREEK CAPITAL LETTER CHI:'A359:41817:Χ
GREEK CAPITAL LETTER PSI:'A35A:41818:Ψ
GREEK CAPITAL LETTER OMEGA:'A35B:41819:Ω
GREEK SMALL LETTER ALPHA:'A35C:41820:α
GREEK SMALL LETTER BETA:'A35D:41821:β
GREEK SMALL LETTER GAMMA:'A35E:41822:γ
GREEK SMALL LETTER DELTA:'A35F:41823:δ
GREEK SMALL LETTER EPSILON:'A360:41824:ε
GREEK SMALL LETTER ZETA:'A361:41825:ζ
GREEK SMALL LETTER ETA:'A362:41826:η
GREEK SMALL LETTER THETA:'A363:41827:θ
GREEK SMALL LETTER IOTA:'A364:41828:ι
GREEK SMALL LETTER KAPPA:'A365:41829:κ
GREEK SMALL LETTER LAMDA:'A366:41830:λ
GREEK SMALL LETTER MU:'A367:41831:μ
GREEK SMALL LETTER NU:'A368:41832:ν
GREEK SMALL LETTER XI:'A369:41833:ξ
GREEK SMALL LETTER OMICRON:'A36A:41834:ο
GREEK SMALL LETTER PI:'A36B:41835:π
GREEK SMALL LETTER RHO:'A36C:41836:ρ
GREEK SMALL LETTER SIGMA:'A36D:41837:σ
GREEK SMALL LETTER TAU:'A36E:41838:τ
GREEK SMALL LETTER UPSILON:'A36F:41839:υ
GREEK SMALL LETTER PHI:'A370:41840:φ
GREEK SMALL LETTER CHI:'A371:41841:χ
GREEK SMALL LETTER PSI:'A372:41842:ψ
GREEK SMALL LETTER OMEGA:'A373:41843:ω
BOPOMOFO LETTER B:'A374:41844:ㄅ
BOPOMOFO LETTER P:'A375:41845:ㄆ
BOPOMOFO LETTER M:'A376:41846:ㄇ
BOPOMOFO LETTER F:'A377:41847:ㄈ
BOPOMOFO LETTER D:'A378:41848:ㄉ
BOPOMOFO LETTER T:'A379:41849:ㄊ
BOPOMOFO LETTER N:'A37A:41850:ㄋ
BOPOMOFO LETTER L:'A37B:41851:ㄌ
BOPOMOFO LETTER G:'A37C:41852:ㄍ
BOPOMOFO LETTER K:'A37D:41853:ㄎ
BOPOMOFO LETTER H:'A37E:41854:ㄏ
BOPOMOFO LETTER J:'A3A1:41889:ㄐ
BOPOMOFO LETTER Q:'A3A2:41890:ㄑ
BOPOMOFO LETTER X:'A3A3:41891:ㄒ
BOPOMOFO LETTER ZH:'A3A4:41892:ㄓ
BOPOMOFO LETTER CH:'A3A5:41893:ㄔ
BOPOMOFO LETTER SH:'A3A6:41894:ㄕ
BOPOMOFO LETTER R:'A3A7:41895:ㄖ
BOPOMOFO LETTER Z:'A3A8:41896:ㄗ
BOPOMOFO LETTER C:'A3A9:41897:ㄘ
BOPOMOFO LETTER S:'A3AA:41898:ㄙ
BOPOMOFO LETTER A:'A3AB:41899:ㄚ
BOPOMOFO LETTER O:'A3AC:41900:ㄛ
BOPOMOFO LETTER E:'A3AD:41901:ㄜ
BOPOMOFO LETTER EH:'A3AE:41902:ㄝ
BOPOMOFO LETTER AI:'A3AF:41903:ㄞ
BOPOMOFO LETTER EI:'A3B0:41904:ㄟ
BOPOMOFO LETTER AU:'A3B1:41905:ㄠ
BOPOMOFO LETTER OU:'A3B2:41906:ㄡ
BOPOMOFO LETTER AN:'A3B3:41907:ㄢ
BOPOMOFO LETTER EN:'A3B4:41908:ㄣ
BOPOMOFO LETTER ANG:'A3B5:41909:ㄤ
BOPOMOFO LETTER ENG:'A3B6:41910:ㄥ
BOPOMOFO LETTER ER:'A3B7:41911:ㄦ
BOPOMOFO LETTER I:'A3B8:41912:ㄧ
BOPOMOFO LETTER U:'A3B9:41913:ㄨ
BOPOMOFO LETTER IU:'A3BA:41914:ㄩ
DOT ABOVE:'A3BB:41915:˙
MODIFIER LETTER MACRON:'A3BC:41916:ˉ
MODIFIER LETTER ACUTE ACCENT:'A3BD:41917:ˊ
CARON:'A3BE:41918:ˇ
MODIFIER LETTER GRAVE ACCENT:'A3BF:41919:ˋ
EURO SIGN:'A3E1:41953:€
CJK UNIFIED IDEOGRAPH:'A440:42048:一
CJK UNIFIED IDEOGRAPH:'A441:42049:乙
CJK UNIFIED IDEOGRAPH:'A442:42050:丁
CJK UNIFIED IDEOGRAPH:'A443:42051:七
CJK UNIFIED IDEOGRAPH:'A444:42052:乃
CJK UNIFIED IDEOGRAPH:'A445:42053:九
CJK UNIFIED IDEOGRAPH:'A446:42054:了
CJK UNIFIED IDEOGRAPH:'A447:42055:二
CJK UNIFIED IDEOGRAPH:'A448:42056:人
CJK UNIFIED IDEOGRAPH:'A449:42057:儿
CJK UNIFIED IDEOGRAPH:'A44A:42058:入
CJK UNIFIED IDEOGRAPH:'A44B:42059:八
CJK UNIFIED IDEOGRAPH:'A44C:42060:几
CJK UNIFIED IDEOGRAPH:'A44D:42061:刀
CJK UNIFIED IDEOGRAPH:'A44E:42062:刁
CJK UNIFIED IDEOGRAPH:'A44F:42063:力
CJK UNIFIED IDEOGRAPH:'A450:42064:匕
CJK UNIFIED IDEOGRAPH:'A451:42065:十
CJK UNIFIED IDEOGRAPH:'A452:42066:卜
CJK UNIFIED IDEOGRAPH:'A453:42067:又
CJK UNIFIED IDEOGRAPH:'A454:42068:三
CJK UNIFIED IDEOGRAPH:'A455:42069:下
CJK UNIFIED IDEOGRAPH:'A456:42070:丈
CJK UNIFIED IDEOGRAPH:'A457:42071:上
CJK UNIFIED IDEOGRAPH:'A458:42072:丫
CJK UNIFIED IDEOGRAPH:'A459:42073:丸
CJK UNIFIED IDEOGRAPH:'A45A:42074:凡
CJK UNIFIED IDEOGRAPH:'A45B:42075:久
CJK UNIFIED IDEOGRAPH:'A45C:42076:么
CJK UNIFIED IDEOGRAPH:'A45D:42077:也
CJK UNIFIED IDEOGRAPH:'A45E:42078:乞
CJK UNIFIED IDEOGRAPH:'A45F:42079:于
CJK UNIFIED IDEOGRAPH:'A460:42080:亡
CJK UNIFIED IDEOGRAPH:'A461:42081:兀
CJK UNIFIED IDEOGRAPH:'A462:42082:刃
CJK UNIFIED IDEOGRAPH:'A463:42083:勺
CJK UNIFIED IDEOGRAPH:'A464:42084:千
CJK UNIFIED IDEOGRAPH:'A465:42085:叉
CJK UNIFIED IDEOGRAPH:'A466:42086:口
CJK UNIFIED IDEOGRAPH:'A467:42087:土
CJK UNIFIED IDEOGRAPH:'A468:42088:士
CJK UNIFIED IDEOGRAPH:'A469:42089:夕
CJK UNIFIED IDEOGRAPH:'A46A:42090:大
CJK UNIFIED IDEOGRAPH:'A46B:42091:女
CJK UNIFIED IDEOGRAPH:'A46C:42092:子
CJK UNIFIED IDEOGRAPH:'A46D:42093:孑
CJK UNIFIED IDEOGRAPH:'A46E:42094:孓
CJK UNIFIED IDEOGRAPH:'A46F:42095:寸
CJK UNIFIED IDEOGRAPH:'A470:42096:小
CJK UNIFIED IDEOGRAPH:'A471:42097:尢
CJK UNIFIED IDEOGRAPH:'A472:42098:尸
CJK UNIFIED IDEOGRAPH:'A473:42099:山
CJK UNIFIED IDEOGRAPH:'A474:42100:川
CJK UNIFIED IDEOGRAPH:'A475:42101:工
CJK UNIFIED IDEOGRAPH:'A476:42102:己
CJK UNIFIED IDEOGRAPH:'A477:42103:已
CJK UNIFIED IDEOGRAPH:'A478:42104:巳
CJK UNIFIED IDEOGRAPH:'A479:42105:巾
CJK UNIFIED IDEOGRAPH:'A47A:42106:干
CJK UNIFIED IDEOGRAPH:'A47B:42107:廾
CJK UNIFIED IDEOGRAPH:'A47C:42108:弋
CJK UNIFIED IDEOGRAPH:'A47D:42109:弓
CJK UNIFIED IDEOGRAPH:'A47E:42110:才
CJK UNIFIED IDEOGRAPH:'A4A1:42145:丑
CJK UNIFIED IDEOGRAPH:'A4A2:42146:丐
CJK UNIFIED IDEOGRAPH:'A4A3:42147:不
CJK UNIFIED IDEOGRAPH:'A4A4:42148:中
CJK UNIFIED IDEOGRAPH:'A4A5:42149:丰
CJK UNIFIED IDEOGRAPH:'A4A6:42150:丹
CJK UNIFIED IDEOGRAPH:'A4A7:42151:之
CJK UNIFIED IDEOGRAPH:'A4A8:42152:尹
CJK UNIFIED IDEOGRAPH:'A4A9:42153:予
CJK UNIFIED IDEOGRAPH:'A4AA:42154:云
CJK UNIFIED IDEOGRAPH:'A4AB:42155:井
CJK UNIFIED IDEOGRAPH:'A4AC:42156:互
CJK UNIFIED IDEOGRAPH:'A4AD:42157:五
CJK UNIFIED IDEOGRAPH:'A4AE:42158:亢
CJK UNIFIED IDEOGRAPH:'A4AF:42159:仁
CJK UNIFIED IDEOGRAPH:'A4B0:42160:什
CJK UNIFIED IDEOGRAPH:'A4B1:42161:仃
CJK UNIFIED IDEOGRAPH:'A4B2:42162:仆
CJK UNIFIED IDEOGRAPH:'A4B3:42163:仇
CJK UNIFIED IDEOGRAPH:'A4B4:42164:仍
CJK UNIFIED IDEOGRAPH:'A4B5:42165:今
CJK UNIFIED IDEOGRAPH:'A4B6:42166:介
CJK UNIFIED IDEOGRAPH:'A4B7:42167:仄
CJK UNIFIED IDEOGRAPH:'A4B8:42168:元
CJK UNIFIED IDEOGRAPH:'A4B9:42169:允
CJK UNIFIED IDEOGRAPH:'A4BA:42170:內
CJK UNIFIED IDEOGRAPH:'A4BB:42171:六
CJK UNIFIED IDEOGRAPH:'A4BC:42172:兮
CJK UNIFIED IDEOGRAPH:'A4BD:42173:公
CJK UNIFIED IDEOGRAPH:'A4BE:42174:冗
CJK UNIFIED IDEOGRAPH:'A4BF:42175:凶
CJK UNIFIED IDEOGRAPH:'A4C0:42176:分
CJK UNIFIED IDEOGRAPH:'A4C1:42177:切
CJK UNIFIED IDEOGRAPH:'A4C2:42178:刈
CJK UNIFIED IDEOGRAPH:'A4C3:42179:勻
CJK UNIFIED IDEOGRAPH:'A4C4:42180:勾
CJK UNIFIED IDEOGRAPH:'A4C5:42181:勿
CJK UNIFIED IDEOGRAPH:'A4C6:42182:化
CJK UNIFIED IDEOGRAPH:'A4C7:42183:匹
CJK UNIFIED IDEOGRAPH:'A4C8:42184:午
CJK UNIFIED IDEOGRAPH:'A4C9:42185:升
CJK UNIFIED IDEOGRAPH:'A4CA:42186:卅
CJK UNIFIED IDEOGRAPH:'A4CB:42187:卞
CJK UNIFIED IDEOGRAPH:'A4CC:42188:厄
CJK UNIFIED IDEOGRAPH:'A4CD:42189:友
CJK UNIFIED IDEOGRAPH:'A4CE:42190:及
CJK UNIFIED IDEOGRAPH:'A4CF:42191:反
CJK UNIFIED IDEOGRAPH:'A4D0:42192:壬
CJK UNIFIED IDEOGRAPH:'A4D1:42193:天
CJK UNIFIED IDEOGRAPH:'A4D2:42194:夫
CJK UNIFIED IDEOGRAPH:'A4D3:42195:太
CJK UNIFIED IDEOGRAPH:'A4D4:42196:夭
CJK UNIFIED IDEOGRAPH:'A4D5:42197:孔
CJK UNIFIED IDEOGRAPH:'A4D6:42198:少
CJK UNIFIED IDEOGRAPH:'A4D7:42199:尤
CJK UNIFIED IDEOGRAPH:'A4D8:42200:尺
CJK UNIFIED IDEOGRAPH:'A4D9:42201:屯
CJK UNIFIED IDEOGRAPH:'A4DA:42202:巴
CJK UNIFIED IDEOGRAPH:'A4DB:42203:幻
CJK UNIFIED IDEOGRAPH:'A4DC:42204:廿
CJK UNIFIED IDEOGRAPH:'A4DD:42205:弔
CJK UNIFIED IDEOGRAPH:'A4DE:42206:引
CJK UNIFIED IDEOGRAPH:'A4DF:42207:心
CJK UNIFIED IDEOGRAPH:'A4E0:42208:戈
CJK UNIFIED IDEOGRAPH:'A4E1:42209:戶
CJK UNIFIED IDEOGRAPH:'A4E2:42210:手
CJK UNIFIED IDEOGRAPH:'A4E3:42211:扎
CJK UNIFIED IDEOGRAPH:'A4E4:42212:支
CJK UNIFIED IDEOGRAPH:'A4E5:42213:文
CJK UNIFIED IDEOGRAPH:'A4E6:42214:斗
CJK UNIFIED IDEOGRAPH:'A4E7:42215:斤
CJK UNIFIED IDEOGRAPH:'A4E8:42216:方
CJK UNIFIED IDEOGRAPH:'A4E9:42217:日
CJK UNIFIED IDEOGRAPH:'A4EA:42218:曰
CJK UNIFIED IDEOGRAPH:'A4EB:42219:月
CJK UNIFIED IDEOGRAPH:'A4EC:42220:木
CJK UNIFIED IDEOGRAPH:'A4ED:42221:欠
CJK UNIFIED IDEOGRAPH:'A4EE:42222:止
CJK UNIFIED IDEOGRAPH:'A4EF:42223:歹
CJK UNIFIED IDEOGRAPH:'A4F0:42224:毋
CJK UNIFIED IDEOGRAPH:'A4F1:42225:比
CJK UNIFIED IDEOGRAPH:'A4F2:42226:毛
CJK UNIFIED IDEOGRAPH:'A4F3:42227:氏
CJK UNIFIED IDEOGRAPH:'A4F4:42228:水
CJK UNIFIED IDEOGRAPH:'A4F5:42229:火
CJK UNIFIED IDEOGRAPH:'A4F6:42230:爪
CJK UNIFIED IDEOGRAPH:'A4F7:42231:父
CJK UNIFIED IDEOGRAPH:'A4F8:42232:爻
CJK UNIFIED IDEOGRAPH:'A4F9:42233:片
CJK UNIFIED IDEOGRAPH:'A4FA:42234:牙
CJK UNIFIED IDEOGRAPH:'A4FB:42235:牛
CJK UNIFIED IDEOGRAPH:'A4FC:42236:犬
CJK UNIFIED IDEOGRAPH:'A4FD:42237:王
CJK UNIFIED IDEOGRAPH:'A4FE:42238:丙
CJK UNIFIED IDEOGRAPH:'A540:42304:世
CJK UNIFIED IDEOGRAPH:'A541:42305:丕
CJK UNIFIED IDEOGRAPH:'A542:42306:且
CJK UNIFIED IDEOGRAPH:'A543:42307:丘
CJK UNIFIED IDEOGRAPH:'A544:42308:主
CJK UNIFIED IDEOGRAPH:'A545:42309:乍
CJK UNIFIED IDEOGRAPH:'A546:42310:乏
CJK UNIFIED IDEOGRAPH:'A547:42311:乎
CJK UNIFIED IDEOGRAPH:'A548:42312:以
CJK UNIFIED IDEOGRAPH:'A549:42313:付
CJK UNIFIED IDEOGRAPH:'A54A:42314:仔
CJK UNIFIED IDEOGRAPH:'A54B:42315:仕
CJK UNIFIED IDEOGRAPH:'A54C:42316:他
CJK UNIFIED IDEOGRAPH:'A54D:42317:仗
CJK UNIFIED IDEOGRAPH:'A54E:42318:代
CJK UNIFIED IDEOGRAPH:'A54F:42319:令
CJK UNIFIED IDEOGRAPH:'A550:42320:仙
CJK UNIFIED IDEOGRAPH:'A551:42321:仞
CJK UNIFIED IDEOGRAPH:'A552:42322:充
CJK UNIFIED IDEOGRAPH:'A553:42323:兄
CJK UNIFIED IDEOGRAPH:'A554:42324:冉
CJK UNIFIED IDEOGRAPH:'A555:42325:冊
CJK UNIFIED IDEOGRAPH:'A556:42326:冬
CJK UNIFIED IDEOGRAPH:'A557:42327:凹
CJK UNIFIED IDEOGRAPH:'A558:42328:出
CJK UNIFIED IDEOGRAPH:'A559:42329:凸
CJK UNIFIED IDEOGRAPH:'A55A:42330:刊
CJK UNIFIED IDEOGRAPH:'A55B:42331:加
CJK UNIFIED IDEOGRAPH:'A55C:42332:功
CJK UNIFIED IDEOGRAPH:'A55D:42333:包
CJK UNIFIED IDEOGRAPH:'A55E:42334:匆
CJK UNIFIED IDEOGRAPH:'A55F:42335:北
CJK UNIFIED IDEOGRAPH:'A560:42336:匝
CJK UNIFIED IDEOGRAPH:'A561:42337:仟
CJK UNIFIED IDEOGRAPH:'A562:42338:半
CJK UNIFIED IDEOGRAPH:'A563:42339:卉
CJK UNIFIED IDEOGRAPH:'A564:42340:卡
CJK UNIFIED IDEOGRAPH:'A565:42341:占
CJK UNIFIED IDEOGRAPH:'A566:42342:卯
CJK UNIFIED IDEOGRAPH:'A567:42343:卮
CJK UNIFIED IDEOGRAPH:'A568:42344:去
CJK UNIFIED IDEOGRAPH:'A569:42345:可
CJK UNIFIED IDEOGRAPH:'A56A:42346:古
CJK UNIFIED IDEOGRAPH:'A56B:42347:右
CJK UNIFIED IDEOGRAPH:'A56C:42348:召
CJK UNIFIED IDEOGRAPH:'A56D:42349:叮
CJK UNIFIED IDEOGRAPH:'A56E:42350:叩
CJK UNIFIED IDEOGRAPH:'A56F:42351:叨
CJK UNIFIED IDEOGRAPH:'A570:42352:叼
CJK UNIFIED IDEOGRAPH:'A571:42353:司
CJK UNIFIED IDEOGRAPH:'A572:42354:叵
CJK UNIFIED IDEOGRAPH:'A573:42355:叫
CJK UNIFIED IDEOGRAPH:'A574:42356:另
CJK UNIFIED IDEOGRAPH:'A575:42357:只
CJK UNIFIED IDEOGRAPH:'A576:42358:史
CJK UNIFIED IDEOGRAPH:'A577:42359:叱
CJK UNIFIED IDEOGRAPH:'A578:42360:台
CJK UNIFIED IDEOGRAPH:'A579:42361:句
CJK UNIFIED IDEOGRAPH:'A57A:42362:叭
CJK UNIFIED IDEOGRAPH:'A57B:42363:叻
CJK UNIFIED IDEOGRAPH:'A57C:42364:四
CJK UNIFIED IDEOGRAPH:'A57D:42365:囚
CJK UNIFIED IDEOGRAPH:'A57E:42366:外
CJK UNIFIED IDEOGRAPH:'A5A1:42401:央
CJK UNIFIED IDEOGRAPH:'A5A2:42402:失
CJK UNIFIED IDEOGRAPH:'A5A3:42403:奴
CJK UNIFIED IDEOGRAPH:'A5A4:42404:奶
CJK UNIFIED IDEOGRAPH:'A5A5:42405:孕
CJK UNIFIED IDEOGRAPH:'A5A6:42406:它
CJK UNIFIED IDEOGRAPH:'A5A7:42407:尼
CJK UNIFIED IDEOGRAPH:'A5A8:42408:巨
CJK UNIFIED IDEOGRAPH:'A5A9:42409:巧
CJK UNIFIED IDEOGRAPH:'A5AA:42410:左
CJK UNIFIED IDEOGRAPH:'A5AB:42411:市
CJK UNIFIED IDEOGRAPH:'A5AC:42412:布
CJK UNIFIED IDEOGRAPH:'A5AD:42413:平
CJK UNIFIED IDEOGRAPH:'A5AE:42414:幼
CJK UNIFIED IDEOGRAPH:'A5AF:42415:弁
CJK UNIFIED IDEOGRAPH:'A5B0:42416:弘
CJK UNIFIED IDEOGRAPH:'A5B1:42417:弗
CJK UNIFIED IDEOGRAPH:'A5B2:42418:必
CJK UNIFIED IDEOGRAPH:'A5B3:42419:戊
CJK UNIFIED IDEOGRAPH:'A5B4:42420:打
CJK UNIFIED IDEOGRAPH:'A5B5:42421:扔
CJK UNIFIED IDEOGRAPH:'A5B6:42422:扒
CJK UNIFIED IDEOGRAPH:'A5B7:42423:扑
CJK UNIFIED IDEOGRAPH:'A5B8:42424:斥
CJK UNIFIED IDEOGRAPH:'A5B9:42425:旦
CJK UNIFIED IDEOGRAPH:'A5BA:42426:朮
CJK UNIFIED IDEOGRAPH:'A5BB:42427:本
CJK UNIFIED IDEOGRAPH:'A5BC:42428:未
CJK UNIFIED IDEOGRAPH:'A5BD:42429:末
CJK UNIFIED IDEOGRAPH:'A5BE:42430:札
CJK UNIFIED IDEOGRAPH:'A5BF:42431:正
CJK UNIFIED IDEOGRAPH:'A5C0:42432:母
CJK UNIFIED IDEOGRAPH:'A5C1:42433:民
CJK UNIFIED IDEOGRAPH:'A5C2:42434:氐
CJK UNIFIED IDEOGRAPH:'A5C3:42435:永
CJK UNIFIED IDEOGRAPH:'A5C4:42436:汁
CJK UNIFIED IDEOGRAPH:'A5C5:42437:汀
CJK UNIFIED IDEOGRAPH:'A5C6:42438:氾
CJK UNIFIED IDEOGRAPH:'A5C7:42439:犯
CJK UNIFIED IDEOGRAPH:'A5C8:42440:玄
CJK UNIFIED IDEOGRAPH:'A5C9:42441:玉
CJK UNIFIED IDEOGRAPH:'A5CA:42442:瓜
CJK UNIFIED IDEOGRAPH:'A5CB:42443:瓦
CJK UNIFIED IDEOGRAPH:'A5CC:42444:甘
CJK UNIFIED IDEOGRAPH:'A5CD:42445:生
CJK UNIFIED IDEOGRAPH:'A5CE:42446:用
CJK UNIFIED IDEOGRAPH:'A5CF:42447:甩
CJK UNIFIED IDEOGRAPH:'A5D0:42448:田
CJK UNIFIED IDEOGRAPH:'A5D1:42449:由
CJK UNIFIED IDEOGRAPH:'A5D2:42450:甲
CJK UNIFIED IDEOGRAPH:'A5D3:42451:申
CJK UNIFIED IDEOGRAPH:'A5D4:42452:疋
CJK UNIFIED IDEOGRAPH:'A5D5:42453:白
CJK UNIFIED IDEOGRAPH:'A5D6:42454:皮
CJK UNIFIED IDEOGRAPH:'A5D7:42455:皿
CJK UNIFIED IDEOGRAPH:'A5D8:42456:目
CJK UNIFIED IDEOGRAPH:'A5D9:42457:矛
CJK UNIFIED IDEOGRAPH:'A5DA:42458:矢
CJK UNIFIED IDEOGRAPH:'A5DB:42459:石
CJK UNIFIED IDEOGRAPH:'A5DC:42460:示
CJK UNIFIED IDEOGRAPH:'A5DD:42461:禾
CJK UNIFIED IDEOGRAPH:'A5DE:42462:穴
CJK UNIFIED IDEOGRAPH:'A5DF:42463:立
CJK UNIFIED IDEOGRAPH:'A5E0:42464:丞
CJK UNIFIED IDEOGRAPH:'A5E1:42465:丟
CJK UNIFIED IDEOGRAPH:'A5E2:42466:乒
CJK UNIFIED IDEOGRAPH:'A5E3:42467:乓
CJK UNIFIED IDEOGRAPH:'A5E4:42468:乩
CJK UNIFIED IDEOGRAPH:'A5E5:42469:亙
CJK UNIFIED IDEOGRAPH:'A5E6:42470:交
CJK UNIFIED IDEOGRAPH:'A5E7:42471:亦
CJK UNIFIED IDEOGRAPH:'A5E8:42472:亥
CJK UNIFIED IDEOGRAPH:'A5E9:42473:仿
CJK UNIFIED IDEOGRAPH:'A5EA:42474:伉
CJK UNIFIED IDEOGRAPH:'A5EB:42475:伙
CJK UNIFIED IDEOGRAPH:'A5EC:42476:伊
CJK UNIFIED IDEOGRAPH:'A5ED:42477:伕
CJK UNIFIED IDEOGRAPH:'A5EE:42478:伍
CJK UNIFIED IDEOGRAPH:'A5EF:42479:伐
CJK UNIFIED IDEOGRAPH:'A5F0:42480:休
CJK UNIFIED IDEOGRAPH:'A5F1:42481:伏
CJK UNIFIED IDEOGRAPH:'A5F2:42482:仲
CJK UNIFIED IDEOGRAPH:'A5F3:42483:件
CJK UNIFIED IDEOGRAPH:'A5F4:42484:任
CJK UNIFIED IDEOGRAPH:'A5F5:42485:仰
CJK UNIFIED IDEOGRAPH:'A5F6:42486:仳
CJK UNIFIED IDEOGRAPH:'A5F7:42487:份
CJK UNIFIED IDEOGRAPH:'A5F8:42488:企
CJK UNIFIED IDEOGRAPH:'A5F9:42489:伋
CJK UNIFIED IDEOGRAPH:'A5FA:42490:光
CJK UNIFIED IDEOGRAPH:'A5FB:42491:兇
CJK UNIFIED IDEOGRAPH:'A5FC:42492:兆
CJK UNIFIED IDEOGRAPH:'A5FD:42493:先
CJK UNIFIED IDEOGRAPH:'A5FE:42494:全
CJK UNIFIED IDEOGRAPH:'A640:42560:共
CJK UNIFIED IDEOGRAPH:'A641:42561:再
CJK UNIFIED IDEOGRAPH:'A642:42562:冰
CJK UNIFIED IDEOGRAPH:'A643:42563:列
CJK UNIFIED IDEOGRAPH:'A644:42564:刑
CJK UNIFIED IDEOGRAPH:'A645:42565:划
CJK UNIFIED IDEOGRAPH:'A646:42566:刎
CJK UNIFIED IDEOGRAPH:'A647:42567:刖
CJK UNIFIED IDEOGRAPH:'A648:42568:劣
CJK UNIFIED IDEOGRAPH:'A649:42569:匈
CJK UNIFIED IDEOGRAPH:'A64A:42570:匡
CJK UNIFIED IDEOGRAPH:'A64B:42571:匠
CJK UNIFIED IDEOGRAPH:'A64C:42572:印
CJK UNIFIED IDEOGRAPH:'A64D:42573:危
CJK UNIFIED IDEOGRAPH:'A64E:42574:吉
CJK UNIFIED IDEOGRAPH:'A64F:42575:吏
CJK UNIFIED IDEOGRAPH:'A650:42576:同
CJK UNIFIED IDEOGRAPH:'A651:42577:吊
CJK UNIFIED IDEOGRAPH:'A652:42578:吐
CJK UNIFIED IDEOGRAPH:'A653:42579:吁
CJK UNIFIED IDEOGRAPH:'A654:42580:吋
CJK UNIFIED IDEOGRAPH:'A655:42581:各
CJK UNIFIED IDEOGRAPH:'A656:42582:向
CJK UNIFIED IDEOGRAPH:'A657:42583:名
CJK UNIFIED IDEOGRAPH:'A658:42584:合
CJK UNIFIED IDEOGRAPH:'A659:42585:吃
CJK UNIFIED IDEOGRAPH:'A65A:42586:后
CJK UNIFIED IDEOGRAPH:'A65B:42587:吆
CJK UNIFIED IDEOGRAPH:'A65C:42588:吒
CJK UNIFIED IDEOGRAPH:'A65D:42589:因
CJK UNIFIED IDEOGRAPH:'A65E:42590:回
CJK UNIFIED IDEOGRAPH:'A65F:42591:囝
CJK UNIFIED IDEOGRAPH:'A660:42592:圳
CJK UNIFIED IDEOGRAPH:'A661:42593:地
CJK UNIFIED IDEOGRAPH:'A662:42594:在
CJK UNIFIED IDEOGRAPH:'A663:42595:圭
CJK UNIFIED IDEOGRAPH:'A664:42596:圬
CJK UNIFIED IDEOGRAPH:'A665:42597:圯
CJK UNIFIED IDEOGRAPH:'A666:42598:圩
CJK UNIFIED IDEOGRAPH:'A667:42599:夙
CJK UNIFIED IDEOGRAPH:'A668:42600:多
CJK UNIFIED IDEOGRAPH:'A669:42601:夷
CJK UNIFIED IDEOGRAPH:'A66A:42602:夸
CJK UNIFIED IDEOGRAPH:'A66B:42603:妄
CJK UNIFIED IDEOGRAPH:'A66C:42604:奸
CJK UNIFIED IDEOGRAPH:'A66D:42605:妃
CJK UNIFIED IDEOGRAPH:'A66E:42606:好
CJK UNIFIED IDEOGRAPH:'A66F:42607:她
CJK UNIFIED IDEOGRAPH:'A670:42608:如
CJK UNIFIED IDEOGRAPH:'A671:42609:妁
CJK UNIFIED IDEOGRAPH:'A672:42610:字
CJK UNIFIED IDEOGRAPH:'A673:42611:存
CJK UNIFIED IDEOGRAPH:'A674:42612:宇
CJK UNIFIED IDEOGRAPH:'A675:42613:守
CJK UNIFIED IDEOGRAPH:'A676:42614:宅
CJK UNIFIED IDEOGRAPH:'A677:42615:安
CJK UNIFIED IDEOGRAPH:'A678:42616:寺
CJK UNIFIED IDEOGRAPH:'A679:42617:尖
CJK UNIFIED IDEOGRAPH:'A67A:42618:屹
CJK UNIFIED IDEOGRAPH:'A67B:42619:州
CJK UNIFIED IDEOGRAPH:'A67C:42620:帆
CJK UNIFIED IDEOGRAPH:'A67D:42621:并
CJK UNIFIED IDEOGRAPH:'A67E:42622:年
CJK UNIFIED IDEOGRAPH:'A6A1:42657:式
CJK UNIFIED IDEOGRAPH:'A6A2:42658:弛
CJK UNIFIED IDEOGRAPH:'A6A3:42659:忙
CJK UNIFIED IDEOGRAPH:'A6A4:42660:忖
CJK UNIFIED IDEOGRAPH:'A6A5:42661:戎
CJK UNIFIED IDEOGRAPH:'A6A6:42662:戌
CJK UNIFIED IDEOGRAPH:'A6A7:42663:戍
CJK UNIFIED IDEOGRAPH:'A6A8:42664:成
CJK UNIFIED IDEOGRAPH:'A6A9:42665:扣
CJK UNIFIED IDEOGRAPH:'A6AA:42666:扛
CJK UNIFIED IDEOGRAPH:'A6AB:42667:托
CJK UNIFIED IDEOGRAPH:'A6AC:42668:收
CJK UNIFIED IDEOGRAPH:'A6AD:42669:早
CJK UNIFIED IDEOGRAPH:'A6AE:42670:旨
CJK UNIFIED IDEOGRAPH:'A6AF:42671:旬
CJK UNIFIED IDEOGRAPH:'A6B0:42672:旭
CJK UNIFIED IDEOGRAPH:'A6B1:42673:曲
CJK UNIFIED IDEOGRAPH:'A6B2:42674:曳
CJK UNIFIED IDEOGRAPH:'A6B3:42675:有
CJK UNIFIED IDEOGRAPH:'A6B4:42676:朽
CJK UNIFIED IDEOGRAPH:'A6B5:42677:朴
CJK UNIFIED IDEOGRAPH:'A6B6:42678:朱
CJK UNIFIED IDEOGRAPH:'A6B7:42679:朵
CJK UNIFIED IDEOGRAPH:'A6B8:42680:次
CJK UNIFIED IDEOGRAPH:'A6B9:42681:此
CJK UNIFIED IDEOGRAPH:'A6BA:42682:死
CJK UNIFIED IDEOGRAPH:'A6BB:42683:氖
CJK UNIFIED IDEOGRAPH:'A6BC:42684:汝
CJK UNIFIED IDEOGRAPH:'A6BD:42685:汗
CJK UNIFIED IDEOGRAPH:'A6BE:42686:汙
CJK UNIFIED IDEOGRAPH:'A6BF:42687:江
CJK UNIFIED IDEOGRAPH:'A6C0:42688:池
CJK UNIFIED IDEOGRAPH:'A6C1:42689:汐
CJK UNIFIED IDEOGRAPH:'A6C2:42690:汕
CJK UNIFIED IDEOGRAPH:'A6C3:42691:污
CJK UNIFIED IDEOGRAPH:'A6C4:42692:汛
CJK UNIFIED IDEOGRAPH:'A6C5:42693:汍
CJK UNIFIED IDEOGRAPH:'A6C6:42694:汎
CJK UNIFIED IDEOGRAPH:'A6C7:42695:灰
CJK UNIFIED IDEOGRAPH:'A6C8:42696:牟
CJK UNIFIED IDEOGRAPH:'A6C9:42697:牝
CJK UNIFIED IDEOGRAPH:'A6CA:42698:百
CJK UNIFIED IDEOGRAPH:'A6CB:42699:竹
CJK UNIFIED IDEOGRAPH:'A6CC:42700:米
CJK UNIFIED IDEOGRAPH:'A6CD:42701:糸
CJK UNIFIED IDEOGRAPH:'A6CE:42702:缶
CJK UNIFIED IDEOGRAPH:'A6CF:42703:羊
CJK UNIFIED IDEOGRAPH:'A6D0:42704:羽
CJK UNIFIED IDEOGRAPH:'A6D1:42705:老
CJK UNIFIED IDEOGRAPH:'A6D2:42706:考
CJK UNIFIED IDEOGRAPH:'A6D3:42707:而
CJK UNIFIED IDEOGRAPH:'A6D4:42708:耒
CJK UNIFIED IDEOGRAPH:'A6D5:42709:耳
CJK UNIFIED IDEOGRAPH:'A6D6:42710:聿
CJK UNIFIED IDEOGRAPH:'A6D7:42711:肉
CJK UNIFIED IDEOGRAPH:'A6D8:42712:肋
CJK UNIFIED IDEOGRAPH:'A6D9:42713:肌
CJK UNIFIED IDEOGRAPH:'A6DA:42714:臣
CJK UNIFIED IDEOGRAPH:'A6DB:42715:自
CJK UNIFIED IDEOGRAPH:'A6DC:42716:至
CJK UNIFIED IDEOGRAPH:'A6DD:42717:臼
CJK UNIFIED IDEOGRAPH:'A6DE:42718:舌
CJK UNIFIED IDEOGRAPH:'A6DF:42719:舛
CJK UNIFIED IDEOGRAPH:'A6E0:42720:舟
CJK UNIFIED IDEOGRAPH:'A6E1:42721:艮
CJK UNIFIED IDEOGRAPH:'A6E2:42722:色
CJK UNIFIED IDEOGRAPH:'A6E3:42723:艾
CJK UNIFIED IDEOGRAPH:'A6E4:42724:虫
CJK UNIFIED IDEOGRAPH:'A6E5:42725:血
CJK UNIFIED IDEOGRAPH:'A6E6:42726:行
CJK UNIFIED IDEOGRAPH:'A6E7:42727:衣
CJK UNIFIED IDEOGRAPH:'A6E8:42728:西
CJK UNIFIED IDEOGRAPH:'A6E9:42729:阡
CJK UNIFIED IDEOGRAPH:'A6EA:42730:串
CJK UNIFIED IDEOGRAPH:'A6EB:42731:亨
CJK UNIFIED IDEOGRAPH:'A6EC:42732:位
CJK UNIFIED IDEOGRAPH:'A6ED:42733:住
CJK UNIFIED IDEOGRAPH:'A6EE:42734:佇
CJK UNIFIED IDEOGRAPH:'A6EF:42735:佗
CJK UNIFIED IDEOGRAPH:'A6F0:42736:佞
CJK UNIFIED IDEOGRAPH:'A6F1:42737:伴
CJK UNIFIED IDEOGRAPH:'A6F2:42738:佛
CJK UNIFIED IDEOGRAPH:'A6F3:42739:何
CJK UNIFIED IDEOGRAPH:'A6F4:42740:估
CJK UNIFIED IDEOGRAPH:'A6F5:42741:佐
CJK UNIFIED IDEOGRAPH:'A6F6:42742:佑
CJK UNIFIED IDEOGRAPH:'A6F7:42743:伽
CJK UNIFIED IDEOGRAPH:'A6F8:42744:伺
CJK UNIFIED IDEOGRAPH:'A6F9:42745:伸
CJK UNIFIED IDEOGRAPH:'A6FA:42746:佃
CJK UNIFIED IDEOGRAPH:'A6FB:42747:佔
CJK UNIFIED IDEOGRAPH:'A6FC:42748:似
CJK UNIFIED IDEOGRAPH:'A6FD:42749:但
CJK UNIFIED IDEOGRAPH:'A6FE:42750:佣
CJK UNIFIED IDEOGRAPH:'A740:42816:作
CJK UNIFIED IDEOGRAPH:'A741:42817:你
CJK UNIFIED IDEOGRAPH:'A742:42818:伯
CJK UNIFIED IDEOGRAPH:'A743:42819:低
CJK UNIFIED IDEOGRAPH:'A744:42820:伶
CJK UNIFIED IDEOGRAPH:'A745:42821:余
CJK UNIFIED IDEOGRAPH:'A746:42822:佝
CJK UNIFIED IDEOGRAPH:'A747:42823:佈
CJK UNIFIED IDEOGRAPH:'A748:42824:佚
CJK UNIFIED IDEOGRAPH:'A749:42825:兌
CJK UNIFIED IDEOGRAPH:'A74A:42826:克
CJK UNIFIED IDEOGRAPH:'A74B:42827:免
CJK UNIFIED IDEOGRAPH:'A74C:42828:兵
CJK UNIFIED IDEOGRAPH:'A74D:42829:冶
CJK UNIFIED IDEOGRAPH:'A74E:42830:冷
CJK UNIFIED IDEOGRAPH:'A74F:42831:別
CJK UNIFIED IDEOGRAPH:'A750:42832:判
CJK UNIFIED IDEOGRAPH:'A751:42833:利
CJK UNIFIED IDEOGRAPH:'A752:42834:刪
CJK UNIFIED IDEOGRAPH:'A753:42835:刨
CJK UNIFIED IDEOGRAPH:'A754:42836:劫
CJK UNIFIED IDEOGRAPH:'A755:42837:助
CJK UNIFIED IDEOGRAPH:'A756:42838:努
CJK UNIFIED IDEOGRAPH:'A757:42839:劬
CJK UNIFIED IDEOGRAPH:'A758:42840:匣
CJK UNIFIED IDEOGRAPH:'A759:42841:即
CJK UNIFIED IDEOGRAPH:'A75A:42842:卵
CJK UNIFIED IDEOGRAPH:'A75B:42843:吝
CJK UNIFIED IDEOGRAPH:'A75C:42844:吭
CJK UNIFIED IDEOGRAPH:'A75D:42845:吞
CJK UNIFIED IDEOGRAPH:'A75E:42846:吾
CJK UNIFIED IDEOGRAPH:'A75F:42847:否
CJK UNIFIED IDEOGRAPH:'A760:42848:呎
CJK UNIFIED IDEOGRAPH:'A761:42849:吧
CJK UNIFIED IDEOGRAPH:'A762:42850:呆
CJK UNIFIED IDEOGRAPH:'A763:42851:呃
CJK UNIFIED IDEOGRAPH:'A764:42852:吳
CJK UNIFIED IDEOGRAPH:'A765:42853:呈
CJK UNIFIED IDEOGRAPH:'A766:42854:呂
CJK UNIFIED IDEOGRAPH:'A767:42855:君
CJK UNIFIED IDEOGRAPH:'A768:42856:吩
CJK UNIFIED IDEOGRAPH:'A769:42857:告
CJK UNIFIED IDEOGRAPH:'A76A:42858:吹
CJK UNIFIED IDEOGRAPH:'A76B:42859:吻
CJK UNIFIED IDEOGRAPH:'A76C:42860:吸
CJK UNIFIED IDEOGRAPH:'A76D:42861:吮
CJK UNIFIED IDEOGRAPH:'A76E:42862:吵
CJK UNIFIED IDEOGRAPH:'A76F:42863:吶
CJK UNIFIED IDEOGRAPH:'A770:42864:吠
CJK UNIFIED IDEOGRAPH:'A771:42865:吼
CJK UNIFIED IDEOGRAPH:'A772:42866:呀
CJK UNIFIED IDEOGRAPH:'A773:42867:吱
CJK UNIFIED IDEOGRAPH:'A774:42868:含
CJK UNIFIED IDEOGRAPH:'A775:42869:吟
CJK UNIFIED IDEOGRAPH:'A776:42870:听
CJK UNIFIED IDEOGRAPH:'A777:42871:囪
CJK UNIFIED IDEOGRAPH:'A778:42872:困
CJK UNIFIED IDEOGRAPH:'A779:42873:囤
CJK UNIFIED IDEOGRAPH:'A77A:42874:囫
CJK UNIFIED IDEOGRAPH:'A77B:42875:坊
CJK UNIFIED IDEOGRAPH:'A77C:42876:坑
CJK UNIFIED IDEOGRAPH:'A77D:42877:址
CJK UNIFIED IDEOGRAPH:'A77E:42878:坍
CJK UNIFIED IDEOGRAPH:'A7A1:42913:均
CJK UNIFIED IDEOGRAPH:'A7A2:42914:坎
CJK UNIFIED IDEOGRAPH:'A7A3:42915:圾
CJK UNIFIED IDEOGRAPH:'A7A4:42916:坐
CJK UNIFIED IDEOGRAPH:'A7A5:42917:坏
CJK UNIFIED IDEOGRAPH:'A7A6:42918:圻
CJK UNIFIED IDEOGRAPH:'A7A7:42919:壯
CJK UNIFIED IDEOGRAPH:'A7A8:42920:夾
CJK UNIFIED IDEOGRAPH:'A7A9:42921:妝
CJK UNIFIED IDEOGRAPH:'A7AA:42922:妒
CJK UNIFIED IDEOGRAPH:'A7AB:42923:妨
CJK UNIFIED IDEOGRAPH:'A7AC:42924:妞
CJK UNIFIED IDEOGRAPH:'A7AD:42925:妣
CJK UNIFIED IDEOGRAPH:'A7AE:42926:妙
CJK UNIFIED IDEOGRAPH:'A7AF:42927:妖
CJK UNIFIED IDEOGRAPH:'A7B0:42928:妍
CJK UNIFIED IDEOGRAPH:'A7B1:42929:妤
CJK UNIFIED IDEOGRAPH:'A7B2:42930:妓
CJK UNIFIED IDEOGRAPH:'A7B3:42931:妊
CJK UNIFIED IDEOGRAPH:'A7B4:42932:妥
CJK UNIFIED IDEOGRAPH:'A7B5:42933:孝
CJK UNIFIED IDEOGRAPH:'A7B6:42934:孜
CJK UNIFIED IDEOGRAPH:'A7B7:42935:孚
CJK UNIFIED IDEOGRAPH:'A7B8:42936:孛
CJK UNIFIED IDEOGRAPH:'A7B9:42937:完
CJK UNIFIED IDEOGRAPH:'A7BA:42938:宋
CJK UNIFIED IDEOGRAPH:'A7BB:42939:宏
CJK UNIFIED IDEOGRAPH:'A7BC:42940:尬
CJK UNIFIED IDEOGRAPH:'A7BD:42941:局
CJK UNIFIED IDEOGRAPH:'A7BE:42942:屁
CJK UNIFIED IDEOGRAPH:'A7BF:42943:尿
CJK UNIFIED IDEOGRAPH:'A7C0:42944:尾
CJK UNIFIED IDEOGRAPH:'A7C1:42945:岐
CJK UNIFIED IDEOGRAPH:'A7C2:42946:岑
CJK UNIFIED IDEOGRAPH:'A7C3:42947:岔
CJK UNIFIED IDEOGRAPH:'A7C4:42948:岌
CJK UNIFIED IDEOGRAPH:'A7C5:42949:巫
CJK UNIFIED IDEOGRAPH:'A7C6:42950:希
CJK UNIFIED IDEOGRAPH:'A7C7:42951:序
CJK UNIFIED IDEOGRAPH:'A7C8:42952:庇
CJK UNIFIED IDEOGRAPH:'A7C9:42953:床
CJK UNIFIED IDEOGRAPH:'A7CA:42954:廷
CJK UNIFIED IDEOGRAPH:'A7CB:42955:弄
CJK UNIFIED IDEOGRAPH:'A7CC:42956:弟
CJK UNIFIED IDEOGRAPH:'A7CD:42957:彤
CJK UNIFIED IDEOGRAPH:'A7CE:42958:形
CJK UNIFIED IDEOGRAPH:'A7CF:42959:彷
CJK UNIFIED IDEOGRAPH:'A7D0:42960:役
CJK UNIFIED IDEOGRAPH:'A7D1:42961:忘
CJK UNIFIED IDEOGRAPH:'A7D2:42962:忌
CJK UNIFIED IDEOGRAPH:'A7D3:42963:志
CJK UNIFIED IDEOGRAPH:'A7D4:42964:忍
CJK UNIFIED IDEOGRAPH:'A7D5:42965:忱
CJK UNIFIED IDEOGRAPH:'A7D6:42966:快
CJK UNIFIED IDEOGRAPH:'A7D7:42967:忸
CJK UNIFIED IDEOGRAPH:'A7D8:42968:忪
CJK UNIFIED IDEOGRAPH:'A7D9:42969:戒
CJK UNIFIED IDEOGRAPH:'A7DA:42970:我
CJK UNIFIED IDEOGRAPH:'A7DB:42971:抄
CJK UNIFIED IDEOGRAPH:'A7DC:42972:抗
CJK UNIFIED IDEOGRAPH:'A7DD:42973:抖
CJK UNIFIED IDEOGRAPH:'A7DE:42974:技
CJK UNIFIED IDEOGRAPH:'A7DF:42975:扶
CJK UNIFIED IDEOGRAPH:'A7E0:42976:抉
CJK UNIFIED IDEOGRAPH:'A7E1:42977:扭
CJK UNIFIED IDEOGRAPH:'A7E2:42978:把
CJK UNIFIED IDEOGRAPH:'A7E3:42979:扼
CJK UNIFIED IDEOGRAPH:'A7E4:42980:找
CJK UNIFIED IDEOGRAPH:'A7E5:42981:批
CJK UNIFIED IDEOGRAPH:'A7E6:42982:扳
CJK UNIFIED IDEOGRAPH:'A7E7:42983:抒
CJK UNIFIED IDEOGRAPH:'A7E8:42984:扯
CJK UNIFIED IDEOGRAPH:'A7E9:42985:折
CJK UNIFIED IDEOGRAPH:'A7EA:42986:扮
CJK UNIFIED IDEOGRAPH:'A7EB:42987:投
CJK UNIFIED IDEOGRAPH:'A7EC:42988:抓
CJK UNIFIED IDEOGRAPH:'A7ED:42989:抑
CJK UNIFIED IDEOGRAPH:'A7EE:42990:抆
CJK UNIFIED IDEOGRAPH:'A7EF:42991:改
CJK UNIFIED IDEOGRAPH:'A7F0:42992:攻
CJK UNIFIED IDEOGRAPH:'A7F1:42993:攸
CJK UNIFIED IDEOGRAPH:'A7F2:42994:旱
CJK UNIFIED IDEOGRAPH:'A7F3:42995:更
CJK UNIFIED IDEOGRAPH:'A7F4:42996:束
CJK UNIFIED IDEOGRAPH:'A7F5:42997:李
CJK UNIFIED IDEOGRAPH:'A7F6:42998:杏
CJK UNIFIED IDEOGRAPH:'A7F7:42999:材
CJK UNIFIED IDEOGRAPH:'A7F8:43000:村
CJK UNIFIED IDEOGRAPH:'A7F9:43001:杜
CJK UNIFIED IDEOGRAPH:'A7FA:43002:杖
CJK UNIFIED IDEOGRAPH:'A7FB:43003:杞
CJK UNIFIED IDEOGRAPH:'A7FC:43004:杉
CJK UNIFIED IDEOGRAPH:'A7FD:43005:杆
CJK UNIFIED IDEOGRAPH:'A7FE:43006:杠
CJK UNIFIED IDEOGRAPH:'A840:43072:杓
CJK UNIFIED IDEOGRAPH:'A841:43073:杗
CJK UNIFIED IDEOGRAPH:'A842:43074:步
CJK UNIFIED IDEOGRAPH:'A843:43075:每
CJK UNIFIED IDEOGRAPH:'A844:43076:求
CJK UNIFIED IDEOGRAPH:'A845:43077:汞
CJK UNIFIED IDEOGRAPH:'A846:43078:沙
CJK UNIFIED IDEOGRAPH:'A847:43079:沁
CJK UNIFIED IDEOGRAPH:'A848:43080:沈
CJK UNIFIED IDEOGRAPH:'A849:43081:沉
CJK UNIFIED IDEOGRAPH:'A84A:43082:沅
CJK UNIFIED IDEOGRAPH:'A84B:43083:沛
CJK UNIFIED IDEOGRAPH:'A84C:43084:汪
CJK UNIFIED IDEOGRAPH:'A84D:43085:決
CJK UNIFIED IDEOGRAPH:'A84E:43086:沐
CJK UNIFIED IDEOGRAPH:'A84F:43087:汰
CJK UNIFIED IDEOGRAPH:'A850:43088:沌
CJK UNIFIED IDEOGRAPH:'A851:43089:汨
CJK UNIFIED IDEOGRAPH:'A852:43090:沖
CJK UNIFIED IDEOGRAPH:'A853:43091:沒
CJK UNIFIED IDEOGRAPH:'A854:43092:汽
CJK UNIFIED IDEOGRAPH:'A855:43093:沃
CJK UNIFIED IDEOGRAPH:'A856:43094:汲
CJK UNIFIED IDEOGRAPH:'A857:43095:汾
CJK UNIFIED IDEOGRAPH:'A858:43096:汴
CJK UNIFIED IDEOGRAPH:'A859:43097:沆
CJK UNIFIED IDEOGRAPH:'A85A:43098:汶
CJK UNIFIED IDEOGRAPH:'A85B:43099:沍
CJK UNIFIED IDEOGRAPH:'A85C:43100:沔
CJK UNIFIED IDEOGRAPH:'A85D:43101:沘
CJK UNIFIED IDEOGRAPH:'A85E:43102:沂
CJK UNIFIED IDEOGRAPH:'A85F:43103:灶
CJK UNIFIED IDEOGRAPH:'A860:43104:灼
CJK UNIFIED IDEOGRAPH:'A861:43105:災
CJK UNIFIED IDEOGRAPH:'A862:43106:灸
CJK UNIFIED IDEOGRAPH:'A863:43107:牢
CJK UNIFIED IDEOGRAPH:'A864:43108:牡
CJK UNIFIED IDEOGRAPH:'A865:43109:牠
CJK UNIFIED IDEOGRAPH:'A866:43110:狄
CJK UNIFIED IDEOGRAPH:'A867:43111:狂
CJK UNIFIED IDEOGRAPH:'A868:43112:玖
CJK UNIFIED IDEOGRAPH:'A869:43113:甬
CJK UNIFIED IDEOGRAPH:'A86A:43114:甫
CJK UNIFIED IDEOGRAPH:'A86B:43115:男
CJK UNIFIED IDEOGRAPH:'A86C:43116:甸
CJK UNIFIED IDEOGRAPH:'A86D:43117:皂
CJK UNIFIED IDEOGRAPH:'A86E:43118:盯
CJK UNIFIED IDEOGRAPH:'A86F:43119:矣
CJK UNIFIED IDEOGRAPH:'A870:43120:私
CJK UNIFIED IDEOGRAPH:'A871:43121:秀
CJK UNIFIED IDEOGRAPH:'A872:43122:禿
CJK UNIFIED IDEOGRAPH:'A873:43123:究
CJK UNIFIED IDEOGRAPH:'A874:43124:系
CJK UNIFIED IDEOGRAPH:'A875:43125:罕
CJK UNIFIED IDEOGRAPH:'A876:43126:肖
CJK UNIFIED IDEOGRAPH:'A877:43127:肓
CJK UNIFIED IDEOGRAPH:'A878:43128:肝
CJK UNIFIED IDEOGRAPH:'A879:43129:肘
CJK UNIFIED IDEOGRAPH:'A87A:43130:肛
CJK UNIFIED IDEOGRAPH:'A87B:43131:肚
CJK UNIFIED IDEOGRAPH:'A87C:43132:育
CJK UNIFIED IDEOGRAPH:'A87D:43133:良
CJK UNIFIED IDEOGRAPH:'A87E:43134:芒
CJK UNIFIED IDEOGRAPH:'A8A1:43169:芋
CJK UNIFIED IDEOGRAPH:'A8A2:43170:芍
CJK UNIFIED IDEOGRAPH:'A8A3:43171:見
CJK UNIFIED IDEOGRAPH:'A8A4:43172:角
CJK UNIFIED IDEOGRAPH:'A8A5:43173:言
CJK UNIFIED IDEOGRAPH:'A8A6:43174:谷
CJK UNIFIED IDEOGRAPH:'A8A7:43175:豆
CJK UNIFIED IDEOGRAPH:'A8A8:43176:豕
CJK UNIFIED IDEOGRAPH:'A8A9:43177:貝
CJK UNIFIED IDEOGRAPH:'A8AA:43178:赤
CJK UNIFIED IDEOGRAPH:'A8AB:43179:走
CJK UNIFIED IDEOGRAPH:'A8AC:43180:足
CJK UNIFIED IDEOGRAPH:'A8AD:43181:身
CJK UNIFIED IDEOGRAPH:'A8AE:43182:車
CJK UNIFIED IDEOGRAPH:'A8AF:43183:辛
CJK UNIFIED IDEOGRAPH:'A8B0:43184:辰
CJK UNIFIED IDEOGRAPH:'A8B1:43185:迂
CJK UNIFIED IDEOGRAPH:'A8B2:43186:迆
CJK UNIFIED IDEOGRAPH:'A8B3:43187:迅
CJK UNIFIED IDEOGRAPH:'A8B4:43188:迄
CJK UNIFIED IDEOGRAPH:'A8B5:43189:巡
CJK UNIFIED IDEOGRAPH:'A8B6:43190:邑
CJK UNIFIED IDEOGRAPH:'A8B7:43191:邢
CJK UNIFIED IDEOGRAPH:'A8B8:43192:邪
CJK UNIFIED IDEOGRAPH:'A8B9:43193:邦
CJK UNIFIED IDEOGRAPH:'A8BA:43194:那
CJK UNIFIED IDEOGRAPH:'A8BB:43195:酉
CJK UNIFIED IDEOGRAPH:'A8BC:43196:釆
CJK UNIFIED IDEOGRAPH:'A8BD:43197:里
CJK UNIFIED IDEOGRAPH:'A8BE:43198:防
CJK UNIFIED IDEOGRAPH:'A8BF:43199:阮
CJK UNIFIED IDEOGRAPH:'A8C0:43200:阱
CJK UNIFIED IDEOGRAPH:'A8C1:43201:阪
CJK UNIFIED IDEOGRAPH:'A8C2:43202:阬
CJK UNIFIED IDEOGRAPH:'A8C3:43203:並
CJK UNIFIED IDEOGRAPH:'A8C4:43204:乖
CJK UNIFIED IDEOGRAPH:'A8C5:43205:乳
CJK UNIFIED IDEOGRAPH:'A8C6:43206:事
CJK UNIFIED IDEOGRAPH:'A8C7:43207:些
CJK UNIFIED IDEOGRAPH:'A8C8:43208:亞
CJK UNIFIED IDEOGRAPH:'A8C9:43209:享
CJK UNIFIED IDEOGRAPH:'A8CA:43210:京
CJK UNIFIED IDEOGRAPH:'A8CB:43211:佯
CJK UNIFIED IDEOGRAPH:'A8CC:43212:依
CJK UNIFIED IDEOGRAPH:'A8CD:43213:侍
CJK UNIFIED IDEOGRAPH:'A8CE:43214:佳
CJK UNIFIED IDEOGRAPH:'A8CF:43215:使
CJK UNIFIED IDEOGRAPH:'A8D0:43216:佬
CJK UNIFIED IDEOGRAPH:'A8D1:43217:供
CJK UNIFIED IDEOGRAPH:'A8D2:43218:例
CJK UNIFIED IDEOGRAPH:'A8D3:43219:來
CJK UNIFIED IDEOGRAPH:'A8D4:43220:侃
CJK UNIFIED IDEOGRAPH:'A8D5:43221:佰
CJK UNIFIED IDEOGRAPH:'A8D6:43222:併
CJK UNIFIED IDEOGRAPH:'A8D7:43223:侈
CJK UNIFIED IDEOGRAPH:'A8D8:43224:佩
CJK UNIFIED IDEOGRAPH:'A8D9:43225:佻
CJK UNIFIED IDEOGRAPH:'A8DA:43226:侖
CJK UNIFIED IDEOGRAPH:'A8DB:43227:佾
CJK UNIFIED IDEOGRAPH:'A8DC:43228:侏
CJK UNIFIED IDEOGRAPH:'A8DD:43229:侑
CJK UNIFIED IDEOGRAPH:'A8DE:43230:佺
CJK UNIFIED IDEOGRAPH:'A8DF:43231:兔
CJK UNIFIED IDEOGRAPH:'A8E0:43232:兒
CJK UNIFIED IDEOGRAPH:'A8E1:43233:兕
CJK UNIFIED IDEOGRAPH:'A8E2:43234:兩
CJK UNIFIED IDEOGRAPH:'A8E3:43235:具
CJK UNIFIED IDEOGRAPH:'A8E4:43236:其
CJK UNIFIED IDEOGRAPH:'A8E5:43237:典
CJK UNIFIED IDEOGRAPH:'A8E6:43238:冽
CJK UNIFIED IDEOGRAPH:'A8E7:43239:函
CJK UNIFIED IDEOGRAPH:'A8E8:43240:刻
CJK UNIFIED IDEOGRAPH:'A8E9:43241:券
CJK UNIFIED IDEOGRAPH:'A8EA:43242:刷
CJK UNIFIED IDEOGRAPH:'A8EB:43243:刺
CJK UNIFIED IDEOGRAPH:'A8EC:43244:到
CJK UNIFIED IDEOGRAPH:'A8ED:43245:刮
CJK UNIFIED IDEOGRAPH:'A8EE:43246:制
CJK UNIFIED IDEOGRAPH:'A8EF:43247:剁
CJK UNIFIED IDEOGRAPH:'A8F0:43248:劾
CJK UNIFIED IDEOGRAPH:'A8F1:43249:劻
CJK UNIFIED IDEOGRAPH:'A8F2:43250:卒
CJK UNIFIED IDEOGRAPH:'A8F3:43251:協
CJK UNIFIED IDEOGRAPH:'A8F4:43252:卓
CJK UNIFIED IDEOGRAPH:'A8F5:43253:卑
CJK UNIFIED IDEOGRAPH:'A8F6:43254:卦
CJK UNIFIED IDEOGRAPH:'A8F7:43255:卷
CJK UNIFIED IDEOGRAPH:'A8F8:43256:卸
CJK UNIFIED IDEOGRAPH:'A8F9:43257:卹
CJK UNIFIED IDEOGRAPH:'A8FA:43258:取
CJK UNIFIED IDEOGRAPH:'A8FB:43259:叔
CJK UNIFIED IDEOGRAPH:'A8FC:43260:受
CJK UNIFIED IDEOGRAPH:'A8FD:43261:味
CJK UNIFIED IDEOGRAPH:'A8FE:43262:呵
CJK UNIFIED IDEOGRAPH:'A940:43328:咖
CJK UNIFIED IDEOGRAPH:'A941:43329:呸
CJK UNIFIED IDEOGRAPH:'A942:43330:咕
CJK UNIFIED IDEOGRAPH:'A943:43331:咀
CJK UNIFIED IDEOGRAPH:'A944:43332:呻
CJK UNIFIED IDEOGRAPH:'A945:43333:呷
CJK UNIFIED IDEOGRAPH:'A946:43334:咄
CJK UNIFIED IDEOGRAPH:'A947:43335:咒
CJK UNIFIED IDEOGRAPH:'A948:43336:咆
CJK UNIFIED IDEOGRAPH:'A949:43337:呼
CJK UNIFIED IDEOGRAPH:'A94A:43338:咐
CJK UNIFIED IDEOGRAPH:'A94B:43339:呱
CJK UNIFIED IDEOGRAPH:'A94C:43340:呶
CJK UNIFIED IDEOGRAPH:'A94D:43341:和
CJK UNIFIED IDEOGRAPH:'A94E:43342:咚
CJK UNIFIED IDEOGRAPH:'A94F:43343:呢
CJK UNIFIED IDEOGRAPH:'A950:43344:周
CJK UNIFIED IDEOGRAPH:'A951:43345:咋
CJK UNIFIED IDEOGRAPH:'A952:43346:命
CJK UNIFIED IDEOGRAPH:'A953:43347:咎
CJK UNIFIED IDEOGRAPH:'A954:43348:固
CJK UNIFIED IDEOGRAPH:'A955:43349:垃
CJK UNIFIED IDEOGRAPH:'A956:43350:坷
CJK UNIFIED IDEOGRAPH:'A957:43351:坪
CJK UNIFIED IDEOGRAPH:'A958:43352:坩
CJK UNIFIED IDEOGRAPH:'A959:43353:坡
CJK UNIFIED IDEOGRAPH:'A95A:43354:坦
CJK UNIFIED IDEOGRAPH:'A95B:43355:坤
CJK UNIFIED IDEOGRAPH:'A95C:43356:坼
CJK UNIFIED IDEOGRAPH:'A95D:43357:夜
CJK UNIFIED IDEOGRAPH:'A95E:43358:奉
CJK UNIFIED IDEOGRAPH:'A95F:43359:奇
CJK UNIFIED IDEOGRAPH:'A960:43360:奈
CJK UNIFIED IDEOGRAPH:'A961:43361:奄
CJK UNIFIED IDEOGRAPH:'A962:43362:奔
CJK UNIFIED IDEOGRAPH:'A963:43363:妾
CJK UNIFIED IDEOGRAPH:'A964:43364:妻
CJK UNIFIED IDEOGRAPH:'A965:43365:委
CJK UNIFIED IDEOGRAPH:'A966:43366:妹
CJK UNIFIED IDEOGRAPH:'A967:43367:妮
CJK UNIFIED IDEOGRAPH:'A968:43368:姑
CJK UNIFIED IDEOGRAPH:'A969:43369:姆
CJK UNIFIED IDEOGRAPH:'A96A:43370:姐
CJK UNIFIED IDEOGRAPH:'A96B:43371:姍
CJK UNIFIED IDEOGRAPH:'A96C:43372:始
CJK UNIFIED IDEOGRAPH:'A96D:43373:姓
CJK UNIFIED IDEOGRAPH:'A96E:43374:姊
CJK UNIFIED IDEOGRAPH:'A96F:43375:妯
CJK UNIFIED IDEOGRAPH:'A970:43376:妳
CJK UNIFIED IDEOGRAPH:'A971:43377:姒
CJK UNIFIED IDEOGRAPH:'A972:43378:姅
CJK UNIFIED IDEOGRAPH:'A973:43379:孟
CJK UNIFIED IDEOGRAPH:'A974:43380:孤
CJK UNIFIED IDEOGRAPH:'A975:43381:季
CJK UNIFIED IDEOGRAPH:'A976:43382:宗
CJK UNIFIED IDEOGRAPH:'A977:43383:定
CJK UNIFIED IDEOGRAPH:'A978:43384:官
CJK UNIFIED IDEOGRAPH:'A979:43385:宜
CJK UNIFIED IDEOGRAPH:'A97A:43386:宙
CJK UNIFIED IDEOGRAPH:'A97B:43387:宛
CJK UNIFIED IDEOGRAPH:'A97C:43388:尚
CJK UNIFIED IDEOGRAPH:'A97D:43389:屈
CJK UNIFIED IDEOGRAPH:'A97E:43390:居
CJK UNIFIED IDEOGRAPH:'A9A1:43425:屆
CJK UNIFIED IDEOGRAPH:'A9A2:43426:岷
CJK UNIFIED IDEOGRAPH:'A9A3:43427:岡
CJK UNIFIED IDEOGRAPH:'A9A4:43428:岸
CJK UNIFIED IDEOGRAPH:'A9A5:43429:岩
CJK UNIFIED IDEOGRAPH:'A9A6:43430:岫
CJK UNIFIED IDEOGRAPH:'A9A7:43431:岱
CJK UNIFIED IDEOGRAPH:'A9A8:43432:岳
CJK UNIFIED IDEOGRAPH:'A9A9:43433:帘
CJK UNIFIED IDEOGRAPH:'A9AA:43434:帚
CJK UNIFIED IDEOGRAPH:'A9AB:43435:帖
CJK UNIFIED IDEOGRAPH:'A9AC:43436:帕
CJK UNIFIED IDEOGRAPH:'A9AD:43437:帛
CJK UNIFIED IDEOGRAPH:'A9AE:43438:帑
CJK UNIFIED IDEOGRAPH:'A9AF:43439:幸
CJK UNIFIED IDEOGRAPH:'A9B0:43440:庚
CJK UNIFIED IDEOGRAPH:'A9B1:43441:店
CJK UNIFIED IDEOGRAPH:'A9B2:43442:府
CJK UNIFIED IDEOGRAPH:'A9B3:43443:底
CJK UNIFIED IDEOGRAPH:'A9B4:43444:庖
CJK UNIFIED IDEOGRAPH:'A9B5:43445:延
CJK UNIFIED IDEOGRAPH:'A9B6:43446:弦
CJK UNIFIED IDEOGRAPH:'A9B7:43447:弧
CJK UNIFIED IDEOGRAPH:'A9B8:43448:弩
CJK UNIFIED IDEOGRAPH:'A9B9:43449:往
CJK UNIFIED IDEOGRAPH:'A9BA:43450:征
CJK UNIFIED IDEOGRAPH:'A9BB:43451:彿
CJK UNIFIED IDEOGRAPH:'A9BC:43452:彼
CJK UNIFIED IDEOGRAPH:'A9BD:43453:忝
CJK UNIFIED IDEOGRAPH:'A9BE:43454:忠
CJK UNIFIED IDEOGRAPH:'A9BF:43455:忽
CJK UNIFIED IDEOGRAPH:'A9C0:43456:念
CJK UNIFIED IDEOGRAPH:'A9C1:43457:忿
CJK UNIFIED IDEOGRAPH:'A9C2:43458:怏
CJK UNIFIED IDEOGRAPH:'A9C3:43459:怔
CJK UNIFIED IDEOGRAPH:'A9C4:43460:怯
CJK UNIFIED IDEOGRAPH:'A9C5:43461:怵
CJK UNIFIED IDEOGRAPH:'A9C6:43462:怖
CJK UNIFIED IDEOGRAPH:'A9C7:43463:怪
CJK UNIFIED IDEOGRAPH:'A9C8:43464:怕
CJK UNIFIED IDEOGRAPH:'A9C9:43465:怡
CJK UNIFIED IDEOGRAPH:'A9CA:43466:性
CJK UNIFIED IDEOGRAPH:'A9CB:43467:怩
CJK UNIFIED IDEOGRAPH:'A9CC:43468:怫
CJK UNIFIED IDEOGRAPH:'A9CD:43469:怛
CJK UNIFIED IDEOGRAPH:'A9CE:43470:或
CJK UNIFIED IDEOGRAPH:'A9CF:43471:戕
CJK UNIFIED IDEOGRAPH:'A9D0:43472:房
CJK UNIFIED IDEOGRAPH:'A9D1:43473:戾
CJK UNIFIED IDEOGRAPH:'A9D2:43474:所
CJK UNIFIED IDEOGRAPH:'A9D3:43475:承
CJK UNIFIED IDEOGRAPH:'A9D4:43476:拉
CJK UNIFIED IDEOGRAPH:'A9D5:43477:拌
CJK UNIFIED IDEOGRAPH:'A9D6:43478:拄
CJK UNIFIED IDEOGRAPH:'A9D7:43479:抿
CJK UNIFIED IDEOGRAPH:'A9D8:43480:拂
CJK UNIFIED IDEOGRAPH:'A9D9:43481:抹
CJK UNIFIED IDEOGRAPH:'A9DA:43482:拒
CJK UNIFIED IDEOGRAPH:'A9DB:43483:招
CJK UNIFIED IDEOGRAPH:'A9DC:43484:披
CJK UNIFIED IDEOGRAPH:'A9DD:43485:拓
CJK UNIFIED IDEOGRAPH:'A9DE:43486:拔
CJK UNIFIED IDEOGRAPH:'A9DF:43487:拋
CJK UNIFIED IDEOGRAPH:'A9E0:43488:拈
CJK UNIFIED IDEOGRAPH:'A9E1:43489:抨
CJK UNIFIED IDEOGRAPH:'A9E2:43490:抽
CJK UNIFIED IDEOGRAPH:'A9E3:43491:押
CJK UNIFIED IDEOGRAPH:'A9E4:43492:拐
CJK UNIFIED IDEOGRAPH:'A9E5:43493:拙
CJK UNIFIED IDEOGRAPH:'A9E6:43494:拇
CJK UNIFIED IDEOGRAPH:'A9E7:43495:拍
CJK UNIFIED IDEOGRAPH:'A9E8:43496:抵
CJK UNIFIED IDEOGRAPH:'A9E9:43497:拚
CJK UNIFIED IDEOGRAPH:'A9EA:43498:抱
CJK UNIFIED IDEOGRAPH:'A9EB:43499:拘
CJK UNIFIED IDEOGRAPH:'A9EC:43500:拖
CJK UNIFIED IDEOGRAPH:'A9ED:43501:拗
CJK UNIFIED IDEOGRAPH:'A9EE:43502:拆
CJK UNIFIED IDEOGRAPH:'A9EF:43503:抬
CJK UNIFIED IDEOGRAPH:'A9F0:43504:拎
CJK UNIFIED IDEOGRAPH:'A9F1:43505:放
CJK UNIFIED IDEOGRAPH:'A9F2:43506:斧
CJK UNIFIED IDEOGRAPH:'A9F3:43507:於
CJK UNIFIED IDEOGRAPH:'A9F4:43508:旺
CJK UNIFIED IDEOGRAPH:'A9F5:43509:昔
CJK UNIFIED IDEOGRAPH:'A9F6:43510:易
CJK UNIFIED IDEOGRAPH:'A9F7:43511:昌
CJK UNIFIED IDEOGRAPH:'A9F8:43512:昆
CJK UNIFIED IDEOGRAPH:'A9F9:43513:昂
CJK UNIFIED IDEOGRAPH:'A9FA:43514:明
CJK UNIFIED IDEOGRAPH:'A9FB:43515:昀
CJK UNIFIED IDEOGRAPH:'A9FC:43516:昏
CJK UNIFIED IDEOGRAPH:'A9FD:43517:昕
CJK UNIFIED IDEOGRAPH:'A9FE:43518:昊
CJK UNIFIED IDEOGRAPH:'AA40:43584:昇
CJK UNIFIED IDEOGRAPH:'AA41:43585:服
CJK UNIFIED IDEOGRAPH:'AA42:43586:朋
CJK UNIFIED IDEOGRAPH:'AA43:43587:杭
CJK UNIFIED IDEOGRAPH:'AA44:43588:枋
CJK UNIFIED IDEOGRAPH:'AA45:43589:枕
CJK UNIFIED IDEOGRAPH:'AA46:43590:東
CJK UNIFIED IDEOGRAPH:'AA47:43591:果
CJK UNIFIED IDEOGRAPH:'AA48:43592:杳
CJK UNIFIED IDEOGRAPH:'AA49:43593:杷
CJK UNIFIED IDEOGRAPH:'AA4A:43594:枇
CJK UNIFIED IDEOGRAPH:'AA4B:43595:枝
CJK UNIFIED IDEOGRAPH:'AA4C:43596:林
CJK UNIFIED IDEOGRAPH:'AA4D:43597:杯
CJK UNIFIED IDEOGRAPH:'AA4E:43598:杰
CJK UNIFIED IDEOGRAPH:'AA4F:43599:板
CJK UNIFIED IDEOGRAPH:'AA50:43600:枉
CJK UNIFIED IDEOGRAPH:'AA51:43601:松
CJK UNIFIED IDEOGRAPH:'AA52:43602:析
CJK UNIFIED IDEOGRAPH:'AA53:43603:杵
CJK UNIFIED IDEOGRAPH:'AA54:43604:枚
CJK UNIFIED IDEOGRAPH:'AA55:43605:枓
CJK UNIFIED IDEOGRAPH:'AA56:43606:杼
CJK UNIFIED IDEOGRAPH:'AA57:43607:杪
CJK UNIFIED IDEOGRAPH:'AA58:43608:杲
CJK UNIFIED IDEOGRAPH:'AA59:43609:欣
CJK UNIFIED IDEOGRAPH:'AA5A:43610:武
CJK UNIFIED IDEOGRAPH:'AA5B:43611:歧
CJK UNIFIED IDEOGRAPH:'AA5C:43612:歿
CJK UNIFIED IDEOGRAPH:'AA5D:43613:氓
CJK UNIFIED IDEOGRAPH:'AA5E:43614:氛
CJK UNIFIED IDEOGRAPH:'AA5F:43615:泣
CJK UNIFIED IDEOGRAPH:'AA60:43616:注
CJK UNIFIED IDEOGRAPH:'AA61:43617:泳
CJK UNIFIED IDEOGRAPH:'AA62:43618:沱
CJK UNIFIED IDEOGRAPH:'AA63:43619:泌
CJK UNIFIED IDEOGRAPH:'AA64:43620:泥
CJK UNIFIED IDEOGRAPH:'AA65:43621:河
CJK UNIFIED IDEOGRAPH:'AA66:43622:沽
CJK UNIFIED IDEOGRAPH:'AA67:43623:沾
CJK UNIFIED IDEOGRAPH:'AA68:43624:沼
CJK UNIFIED IDEOGRAPH:'AA69:43625:波
CJK UNIFIED IDEOGRAPH:'AA6A:43626:沫
CJK UNIFIED IDEOGRAPH:'AA6B:43627:法
CJK UNIFIED IDEOGRAPH:'AA6C:43628:泓
CJK UNIFIED IDEOGRAPH:'AA6D:43629:沸
CJK UNIFIED IDEOGRAPH:'AA6E:43630:泄
CJK UNIFIED IDEOGRAPH:'AA6F:43631:油
CJK UNIFIED IDEOGRAPH:'AA70:43632:況
CJK UNIFIED IDEOGRAPH:'AA71:43633:沮
CJK UNIFIED IDEOGRAPH:'AA72:43634:泗
CJK UNIFIED IDEOGRAPH:'AA73:43635:泅
CJK UNIFIED IDEOGRAPH:'AA74:43636:泱
CJK UNIFIED IDEOGRAPH:'AA75:43637:沿
CJK UNIFIED IDEOGRAPH:'AA76:43638:治
CJK UNIFIED IDEOGRAPH:'AA77:43639:泡
CJK UNIFIED IDEOGRAPH:'AA78:43640:泛
CJK UNIFIED IDEOGRAPH:'AA79:43641:泊
CJK UNIFIED IDEOGRAPH:'AA7A:43642:沬
CJK UNIFIED IDEOGRAPH:'AA7B:43643:泯
CJK UNIFIED IDEOGRAPH:'AA7C:43644:泜
CJK UNIFIED IDEOGRAPH:'AA7D:43645:泖
CJK UNIFIED IDEOGRAPH:'AA7E:43646:泠
CJK UNIFIED IDEOGRAPH:'AAA1:43681:炕
CJK UNIFIED IDEOGRAPH:'AAA2:43682:炎
CJK UNIFIED IDEOGRAPH:'AAA3:43683:炒
CJK UNIFIED IDEOGRAPH:'AAA4:43684:炊
CJK UNIFIED IDEOGRAPH:'AAA5:43685:炙
CJK UNIFIED IDEOGRAPH:'AAA6:43686:爬
CJK UNIFIED IDEOGRAPH:'AAA7:43687:爭
CJK UNIFIED IDEOGRAPH:'AAA8:43688:爸
CJK UNIFIED IDEOGRAPH:'AAA9:43689:版
CJK UNIFIED IDEOGRAPH:'AAAA:43690:牧
CJK UNIFIED IDEOGRAPH:'AAAB:43691:物
CJK UNIFIED IDEOGRAPH:'AAAC:43692:狀
CJK UNIFIED IDEOGRAPH:'AAAD:43693:狎
CJK UNIFIED IDEOGRAPH:'AAAE:43694:狙
CJK UNIFIED IDEOGRAPH:'AAAF:43695:狗
CJK UNIFIED IDEOGRAPH:'AAB0:43696:狐
CJK UNIFIED IDEOGRAPH:'AAB1:43697:玩
CJK UNIFIED IDEOGRAPH:'AAB2:43698:玨
CJK UNIFIED IDEOGRAPH:'AAB3:43699:玟
CJK UNIFIED IDEOGRAPH:'AAB4:43700:玫
CJK UNIFIED IDEOGRAPH:'AAB5:43701:玥
CJK UNIFIED IDEOGRAPH:'AAB6:43702:甽
CJK UNIFIED IDEOGRAPH:'AAB7:43703:疝
CJK UNIFIED IDEOGRAPH:'AAB8:43704:疙
CJK UNIFIED IDEOGRAPH:'AAB9:43705:疚
CJK UNIFIED IDEOGRAPH:'AABA:43706:的
CJK UNIFIED IDEOGRAPH:'AABB:43707:盂
CJK UNIFIED IDEOGRAPH:'AABC:43708:盲
CJK UNIFIED IDEOGRAPH:'AABD:43709:直
CJK UNIFIED IDEOGRAPH:'AABE:43710:知
CJK UNIFIED IDEOGRAPH:'AABF:43711:矽
CJK UNIFIED IDEOGRAPH:'AAC0:43712:社
CJK UNIFIED IDEOGRAPH:'AAC1:43713:祀
CJK UNIFIED IDEOGRAPH:'AAC2:43714:祁
CJK UNIFIED IDEOGRAPH:'AAC3:43715:秉
CJK UNIFIED IDEOGRAPH:'AAC4:43716:秈
CJK UNIFIED IDEOGRAPH:'AAC5:43717:空
CJK UNIFIED IDEOGRAPH:'AAC6:43718:穹
CJK UNIFIED IDEOGRAPH:'AAC7:43719:竺
CJK UNIFIED IDEOGRAPH:'AAC8:43720:糾
CJK UNIFIED IDEOGRAPH:'AAC9:43721:罔
CJK UNIFIED IDEOGRAPH:'AACA:43722:羌
CJK UNIFIED IDEOGRAPH:'AACB:43723:羋
CJK UNIFIED IDEOGRAPH:'AACC:43724:者
CJK UNIFIED IDEOGRAPH:'AACD:43725:肺
CJK UNIFIED IDEOGRAPH:'AACE:43726:肥
CJK UNIFIED IDEOGRAPH:'AACF:43727:肢
CJK UNIFIED IDEOGRAPH:'AAD0:43728:肱
CJK UNIFIED IDEOGRAPH:'AAD1:43729:股
CJK UNIFIED IDEOGRAPH:'AAD2:43730:肫
CJK UNIFIED IDEOGRAPH:'AAD3:43731:肩
CJK UNIFIED IDEOGRAPH:'AAD4:43732:肴
CJK UNIFIED IDEOGRAPH:'AAD5:43733:肪
CJK UNIFIED IDEOGRAPH:'AAD6:43734:肯
CJK UNIFIED IDEOGRAPH:'AAD7:43735:臥
CJK UNIFIED IDEOGRAPH:'AAD8:43736:臾
CJK UNIFIED IDEOGRAPH:'AAD9:43737:舍
CJK UNIFIED IDEOGRAPH:'AADA:43738:芳
CJK UNIFIED IDEOGRAPH:'AADB:43739:芝
CJK UNIFIED IDEOGRAPH:'AADC:43740:芙
CJK UNIFIED IDEOGRAPH:'AADD:43741:芭
CJK UNIFIED IDEOGRAPH:'AADE:43742:芽
CJK UNIFIED IDEOGRAPH:'AADF:43743:芟
CJK UNIFIED IDEOGRAPH:'AAE0:43744:芹
CJK UNIFIED IDEOGRAPH:'AAE1:43745:花
CJK UNIFIED IDEOGRAPH:'AAE2:43746:芬
CJK UNIFIED IDEOGRAPH:'AAE3:43747:芥
CJK UNIFIED IDEOGRAPH:'AAE4:43748:芯
CJK UNIFIED IDEOGRAPH:'AAE5:43749:芸
CJK UNIFIED IDEOGRAPH:'AAE6:43750:芣
CJK UNIFIED IDEOGRAPH:'AAE7:43751:芰
CJK UNIFIED IDEOGRAPH:'AAE8:43752:芾
CJK UNIFIED IDEOGRAPH:'AAE9:43753:芷
CJK UNIFIED IDEOGRAPH:'AAEA:43754:虎
CJK UNIFIED IDEOGRAPH:'AAEB:43755:虱
CJK UNIFIED IDEOGRAPH:'AAEC:43756:初
CJK UNIFIED IDEOGRAPH:'AAED:43757:表
CJK UNIFIED IDEOGRAPH:'AAEE:43758:軋
CJK UNIFIED IDEOGRAPH:'AAEF:43759:迎
CJK UNIFIED IDEOGRAPH:'AAF0:43760:返
CJK UNIFIED IDEOGRAPH:'AAF1:43761:近
CJK UNIFIED IDEOGRAPH:'AAF2:43762:邵
CJK UNIFIED IDEOGRAPH:'AAF3:43763:邸
CJK UNIFIED IDEOGRAPH:'AAF4:43764:邱
CJK UNIFIED IDEOGRAPH:'AAF5:43765:邶
CJK UNIFIED IDEOGRAPH:'AAF6:43766:采
CJK UNIFIED IDEOGRAPH:'AAF7:43767:金
CJK UNIFIED IDEOGRAPH:'AAF8:43768:長
CJK UNIFIED IDEOGRAPH:'AAF9:43769:門
CJK UNIFIED IDEOGRAPH:'AAFA:43770:阜
CJK UNIFIED IDEOGRAPH:'AAFB:43771:陀
CJK UNIFIED IDEOGRAPH:'AAFC:43772:阿
CJK UNIFIED IDEOGRAPH:'AAFD:43773:阻
CJK UNIFIED IDEOGRAPH:'AAFE:43774:附
CJK UNIFIED IDEOGRAPH:'AB40:43840:陂
CJK UNIFIED IDEOGRAPH:'AB41:43841:隹
CJK UNIFIED IDEOGRAPH:'AB42:43842:雨
CJK UNIFIED IDEOGRAPH:'AB43:43843:青
CJK UNIFIED IDEOGRAPH:'AB44:43844:非
CJK UNIFIED IDEOGRAPH:'AB45:43845:亟
CJK UNIFIED IDEOGRAPH:'AB46:43846:亭
CJK UNIFIED IDEOGRAPH:'AB47:43847:亮
CJK UNIFIED IDEOGRAPH:'AB48:43848:信
CJK UNIFIED IDEOGRAPH:'AB49:43849:侵
CJK UNIFIED IDEOGRAPH:'AB4A:43850:侯
CJK UNIFIED IDEOGRAPH:'AB4B:43851:便
CJK UNIFIED IDEOGRAPH:'AB4C:43852:俠
CJK UNIFIED IDEOGRAPH:'AB4D:43853:俑
CJK UNIFIED IDEOGRAPH:'AB4E:43854:俏
CJK UNIFIED IDEOGRAPH:'AB4F:43855:保
CJK UNIFIED IDEOGRAPH:'AB50:43856:促
CJK UNIFIED IDEOGRAPH:'AB51:43857:侶
CJK UNIFIED IDEOGRAPH:'AB52:43858:俘
CJK UNIFIED IDEOGRAPH:'AB53:43859:俟
CJK UNIFIED IDEOGRAPH:'AB54:43860:俊
CJK UNIFIED IDEOGRAPH:'AB55:43861:俗
CJK UNIFIED IDEOGRAPH:'AB56:43862:侮
CJK UNIFIED IDEOGRAPH:'AB57:43863:俐
CJK UNIFIED IDEOGRAPH:'AB58:43864:俄
CJK UNIFIED IDEOGRAPH:'AB59:43865:係
CJK UNIFIED IDEOGRAPH:'AB5A:43866:俚
CJK UNIFIED IDEOGRAPH:'AB5B:43867:俎
CJK UNIFIED IDEOGRAPH:'AB5C:43868:俞
CJK UNIFIED IDEOGRAPH:'AB5D:43869:侷
CJK UNIFIED IDEOGRAPH:'AB5E:43870:兗
CJK UNIFIED IDEOGRAPH:'AB5F:43871:冒
CJK UNIFIED IDEOGRAPH:'AB60:43872:冑
CJK UNIFIED IDEOGRAPH:'AB61:43873:冠
CJK UNIFIED IDEOGRAPH:'AB62:43874:剎
CJK UNIFIED IDEOGRAPH:'AB63:43875:剃
CJK UNIFIED IDEOGRAPH:'AB64:43876:削
CJK UNIFIED IDEOGRAPH:'AB65:43877:前
CJK UNIFIED IDEOGRAPH:'AB66:43878:剌
CJK UNIFIED IDEOGRAPH:'AB67:43879:剋
CJK UNIFIED IDEOGRAPH:'AB68:43880:則
CJK UNIFIED IDEOGRAPH:'AB69:43881:勇
CJK UNIFIED IDEOGRAPH:'AB6A:43882:勉
CJK UNIFIED IDEOGRAPH:'AB6B:43883:勃
CJK UNIFIED IDEOGRAPH:'AB6C:43884:勁
CJK UNIFIED IDEOGRAPH:'AB6D:43885:匍
CJK UNIFIED IDEOGRAPH:'AB6E:43886:南
CJK UNIFIED IDEOGRAPH:'AB6F:43887:卻
CJK UNIFIED IDEOGRAPH:'AB70:43888:厚
CJK UNIFIED IDEOGRAPH:'AB71:43889:叛
CJK UNIFIED IDEOGRAPH:'AB72:43890:咬
CJK UNIFIED IDEOGRAPH:'AB73:43891:哀
CJK UNIFIED IDEOGRAPH:'AB74:43892:咨
CJK UNIFIED IDEOGRAPH:'AB75:43893:哎
CJK UNIFIED IDEOGRAPH:'AB76:43894:哉
CJK UNIFIED IDEOGRAPH:'AB77:43895:咸
CJK UNIFIED IDEOGRAPH:'AB78:43896:咦
CJK UNIFIED IDEOGRAPH:'AB79:43897:咳
CJK UNIFIED IDEOGRAPH:'AB7A:43898:哇
CJK UNIFIED IDEOGRAPH:'AB7B:43899:哂
CJK UNIFIED IDEOGRAPH:'AB7C:43900:咽
CJK UNIFIED IDEOGRAPH:'AB7D:43901:咪
CJK UNIFIED IDEOGRAPH:'AB7E:43902:品
CJK UNIFIED IDEOGRAPH:'ABA1:43937:哄
CJK UNIFIED IDEOGRAPH:'ABA2:43938:哈
CJK UNIFIED IDEOGRAPH:'ABA3:43939:咯
CJK UNIFIED IDEOGRAPH:'ABA4:43940:咫
CJK UNIFIED IDEOGRAPH:'ABA5:43941:咱
CJK UNIFIED IDEOGRAPH:'ABA6:43942:咻
CJK UNIFIED IDEOGRAPH:'ABA7:43943:咩
CJK UNIFIED IDEOGRAPH:'ABA8:43944:咧
CJK UNIFIED IDEOGRAPH:'ABA9:43945:咿
CJK UNIFIED IDEOGRAPH:'ABAA:43946:囿
CJK UNIFIED IDEOGRAPH:'ABAB:43947:垂
CJK UNIFIED IDEOGRAPH:'ABAC:43948:型
CJK UNIFIED IDEOGRAPH:'ABAD:43949:垠
CJK UNIFIED IDEOGRAPH:'ABAE:43950:垣
CJK UNIFIED IDEOGRAPH:'ABAF:43951:垢
CJK UNIFIED IDEOGRAPH:'ABB0:43952:城
CJK UNIFIED IDEOGRAPH:'ABB1:43953:垮
CJK UNIFIED IDEOGRAPH:'ABB2:43954:垓
CJK UNIFIED IDEOGRAPH:'ABB3:43955:奕
CJK UNIFIED IDEOGRAPH:'ABB4:43956:契
CJK UNIFIED IDEOGRAPH:'ABB5:43957:奏
CJK UNIFIED IDEOGRAPH:'ABB6:43958:奎
CJK UNIFIED IDEOGRAPH:'ABB7:43959:奐
CJK UNIFIED IDEOGRAPH:'ABB8:43960:姜
CJK UNIFIED IDEOGRAPH:'ABB9:43961:姘
CJK UNIFIED IDEOGRAPH:'ABBA:43962:姿
CJK UNIFIED IDEOGRAPH:'ABBB:43963:姣
CJK UNIFIED IDEOGRAPH:'ABBC:43964:姨
CJK UNIFIED IDEOGRAPH:'ABBD:43965:娃
CJK UNIFIED IDEOGRAPH:'ABBE:43966:姥
CJK UNIFIED IDEOGRAPH:'ABBF:43967:姪
CJK UNIFIED IDEOGRAPH:'ABC0:43968:姚
CJK UNIFIED IDEOGRAPH:'ABC1:43969:姦
CJK UNIFIED IDEOGRAPH:'ABC2:43970:威
CJK UNIFIED IDEOGRAPH:'ABC3:43971:姻
CJK UNIFIED IDEOGRAPH:'ABC4:43972:孩
CJK UNIFIED IDEOGRAPH:'ABC5:43973:宣
CJK UNIFIED IDEOGRAPH:'ABC6:43974:宦
CJK UNIFIED IDEOGRAPH:'ABC7:43975:室
CJK UNIFIED IDEOGRAPH:'ABC8:43976:客
CJK UNIFIED IDEOGRAPH:'ABC9:43977:宥
CJK UNIFIED IDEOGRAPH:'ABCA:43978:封
CJK UNIFIED IDEOGRAPH:'ABCB:43979:屎
CJK UNIFIED IDEOGRAPH:'ABCC:43980:屏
CJK UNIFIED IDEOGRAPH:'ABCD:43981:屍
CJK UNIFIED IDEOGRAPH:'ABCE:43982:屋
CJK UNIFIED IDEOGRAPH:'ABCF:43983:峙
CJK UNIFIED IDEOGRAPH:'ABD0:43984:峒
CJK UNIFIED IDEOGRAPH:'ABD1:43985:巷
CJK UNIFIED IDEOGRAPH:'ABD2:43986:帝
CJK UNIFIED IDEOGRAPH:'ABD3:43987:帥
CJK UNIFIED IDEOGRAPH:'ABD4:43988:帟
CJK UNIFIED IDEOGRAPH:'ABD5:43989:幽
CJK UNIFIED IDEOGRAPH:'ABD6:43990:庠
CJK UNIFIED IDEOGRAPH:'ABD7:43991:度
CJK UNIFIED IDEOGRAPH:'ABD8:43992:建
CJK UNIFIED IDEOGRAPH:'ABD9:43993:弈
CJK UNIFIED IDEOGRAPH:'ABDA:43994:弭
CJK UNIFIED IDEOGRAPH:'ABDB:43995:彥
CJK UNIFIED IDEOGRAPH:'ABDC:43996:很
CJK UNIFIED IDEOGRAPH:'ABDD:43997:待
CJK UNIFIED IDEOGRAPH:'ABDE:43998:徊
CJK UNIFIED IDEOGRAPH:'ABDF:43999:律
CJK UNIFIED IDEOGRAPH:'ABE0:44000:徇
CJK UNIFIED IDEOGRAPH:'ABE1:44001:後
CJK UNIFIED IDEOGRAPH:'ABE2:44002:徉
CJK UNIFIED IDEOGRAPH:'ABE3:44003:怒
CJK UNIFIED IDEOGRAPH:'ABE4:44004:思
CJK UNIFIED IDEOGRAPH:'ABE5:44005:怠
CJK UNIFIED IDEOGRAPH:'ABE6:44006:急
CJK UNIFIED IDEOGRAPH:'ABE7:44007:怎
CJK UNIFIED IDEOGRAPH:'ABE8:44008:怨
CJK UNIFIED IDEOGRAPH:'ABE9:44009:恍
CJK UNIFIED IDEOGRAPH:'ABEA:44010:恰
CJK UNIFIED IDEOGRAPH:'ABEB:44011:恨
CJK UNIFIED IDEOGRAPH:'ABEC:44012:恢
CJK UNIFIED IDEOGRAPH:'ABED:44013:恆
CJK UNIFIED IDEOGRAPH:'ABEE:44014:恃
CJK UNIFIED IDEOGRAPH:'ABEF:44015:恬
CJK UNIFIED IDEOGRAPH:'ABF0:44016:恫
CJK UNIFIED IDEOGRAPH:'ABF1:44017:恪
CJK UNIFIED IDEOGRAPH:'ABF2:44018:恤
CJK UNIFIED IDEOGRAPH:'ABF3:44019:扁
CJK UNIFIED IDEOGRAPH:'ABF4:44020:拜
CJK UNIFIED IDEOGRAPH:'ABF5:44021:挖
CJK UNIFIED IDEOGRAPH:'ABF6:44022:按
CJK UNIFIED IDEOGRAPH:'ABF7:44023:拼
CJK UNIFIED IDEOGRAPH:'ABF8:44024:拭
CJK UNIFIED IDEOGRAPH:'ABF9:44025:持
CJK UNIFIED IDEOGRAPH:'ABFA:44026:拮
CJK UNIFIED IDEOGRAPH:'ABFB:44027:拽
CJK UNIFIED IDEOGRAPH:'ABFC:44028:指
CJK UNIFIED IDEOGRAPH:'ABFD:44029:拱
CJK UNIFIED IDEOGRAPH:'ABFE:44030:拷
CJK UNIFIED IDEOGRAPH:'AC40:44096:拯
CJK UNIFIED IDEOGRAPH:'AC41:44097:括
CJK UNIFIED IDEOGRAPH:'AC42:44098:拾
CJK UNIFIED IDEOGRAPH:'AC43:44099:拴
CJK UNIFIED IDEOGRAPH:'AC44:44100:挑
CJK UNIFIED IDEOGRAPH:'AC45:44101:挂
CJK UNIFIED IDEOGRAPH:'AC46:44102:政
CJK UNIFIED IDEOGRAPH:'AC47:44103:故
CJK UNIFIED IDEOGRAPH:'AC48:44104:斫
CJK UNIFIED IDEOGRAPH:'AC49:44105:施
CJK UNIFIED IDEOGRAPH:'AC4A:44106:既
CJK UNIFIED IDEOGRAPH:'AC4B:44107:春
CJK UNIFIED IDEOGRAPH:'AC4C:44108:昭
CJK UNIFIED IDEOGRAPH:'AC4D:44109:映
CJK UNIFIED IDEOGRAPH:'AC4E:44110:昧
CJK UNIFIED IDEOGRAPH:'AC4F:44111:是
CJK UNIFIED IDEOGRAPH:'AC50:44112:星
CJK UNIFIED IDEOGRAPH:'AC51:44113:昨
CJK UNIFIED IDEOGRAPH:'AC52:44114:昱
CJK UNIFIED IDEOGRAPH:'AC53:44115:昤
CJK UNIFIED IDEOGRAPH:'AC54:44116:曷
CJK UNIFIED IDEOGRAPH:'AC55:44117:柿
CJK UNIFIED IDEOGRAPH:'AC56:44118:染
CJK UNIFIED IDEOGRAPH:'AC57:44119:柱
CJK UNIFIED IDEOGRAPH:'AC58:44120:柔
CJK UNIFIED IDEOGRAPH:'AC59:44121:某
CJK UNIFIED IDEOGRAPH:'AC5A:44122:柬
CJK UNIFIED IDEOGRAPH:'AC5B:44123:架
CJK UNIFIED IDEOGRAPH:'AC5C:44124:枯
CJK UNIFIED IDEOGRAPH:'AC5D:44125:柵
CJK UNIFIED IDEOGRAPH:'AC5E:44126:柩
CJK UNIFIED IDEOGRAPH:'AC5F:44127:柯
CJK UNIFIED IDEOGRAPH:'AC60:44128:柄
CJK UNIFIED IDEOGRAPH:'AC61:44129:柑
CJK UNIFIED IDEOGRAPH:'AC62:44130:枴
CJK UNIFIED IDEOGRAPH:'AC63:44131:柚
CJK UNIFIED IDEOGRAPH:'AC64:44132:查
CJK UNIFIED IDEOGRAPH:'AC65:44133:枸
CJK UNIFIED IDEOGRAPH:'AC66:44134:柏
CJK UNIFIED IDEOGRAPH:'AC67:44135:柞
CJK UNIFIED IDEOGRAPH:'AC68:44136:柳
CJK UNIFIED IDEOGRAPH:'AC69:44137:枰
CJK UNIFIED IDEOGRAPH:'AC6A:44138:柙
CJK UNIFIED IDEOGRAPH:'AC6B:44139:柢
CJK UNIFIED IDEOGRAPH:'AC6C:44140:柝
CJK UNIFIED IDEOGRAPH:'AC6D:44141:柒
CJK UNIFIED IDEOGRAPH:'AC6E:44142:歪
CJK UNIFIED IDEOGRAPH:'AC6F:44143:殃
CJK UNIFIED IDEOGRAPH:'AC70:44144:殆
CJK UNIFIED IDEOGRAPH:'AC71:44145:段
CJK UNIFIED IDEOGRAPH:'AC72:44146:毒
CJK UNIFIED IDEOGRAPH:'AC73:44147:毗
CJK UNIFIED IDEOGRAPH:'AC74:44148:氟
CJK UNIFIED IDEOGRAPH:'AC75:44149:泉
CJK UNIFIED IDEOGRAPH:'AC76:44150:洋
CJK UNIFIED IDEOGRAPH:'AC77:44151:洲
CJK UNIFIED IDEOGRAPH:'AC78:44152:洪
CJK UNIFIED IDEOGRAPH:'AC79:44153:流
CJK UNIFIED IDEOGRAPH:'AC7A:44154:津
CJK UNIFIED IDEOGRAPH:'AC7B:44155:洌
CJK UNIFIED IDEOGRAPH:'AC7C:44156:洱
CJK UNIFIED IDEOGRAPH:'AC7D:44157:洞
CJK UNIFIED IDEOGRAPH:'AC7E:44158:洗
CJK UNIFIED IDEOGRAPH:'ACA1:44193:活
CJK UNIFIED IDEOGRAPH:'ACA2:44194:洽
CJK UNIFIED IDEOGRAPH:'ACA3:44195:派
CJK UNIFIED IDEOGRAPH:'ACA4:44196:洶
CJK UNIFIED IDEOGRAPH:'ACA5:44197:洛
CJK UNIFIED IDEOGRAPH:'ACA6:44198:泵
CJK UNIFIED IDEOGRAPH:'ACA7:44199:洹
CJK UNIFIED IDEOGRAPH:'ACA8:44200:洧
CJK UNIFIED IDEOGRAPH:'ACA9:44201:洸
CJK UNIFIED IDEOGRAPH:'ACAA:44202:洩
CJK UNIFIED IDEOGRAPH:'ACAB:44203:洮
CJK UNIFIED IDEOGRAPH:'ACAC:44204:洵
CJK UNIFIED IDEOGRAPH:'ACAD:44205:洎
CJK UNIFIED IDEOGRAPH:'ACAE:44206:洫
CJK UNIFIED IDEOGRAPH:'ACAF:44207:炫
CJK UNIFIED IDEOGRAPH:'ACB0:44208:為
CJK UNIFIED IDEOGRAPH:'ACB1:44209:炳
CJK UNIFIED IDEOGRAPH:'ACB2:44210:炬
CJK UNIFIED IDEOGRAPH:'ACB3:44211:炯
CJK UNIFIED IDEOGRAPH:'ACB4:44212:炭
CJK UNIFIED IDEOGRAPH:'ACB5:44213:炸
CJK UNIFIED IDEOGRAPH:'ACB6:44214:炮
CJK UNIFIED IDEOGRAPH:'ACB7:44215:炤
CJK UNIFIED IDEOGRAPH:'ACB8:44216:爰
CJK UNIFIED IDEOGRAPH:'ACB9:44217:牲
CJK UNIFIED IDEOGRAPH:'ACBA:44218:牯
CJK UNIFIED IDEOGRAPH:'ACBB:44219:牴
CJK UNIFIED IDEOGRAPH:'ACBC:44220:狩
CJK UNIFIED IDEOGRAPH:'ACBD:44221:狠
CJK UNIFIED IDEOGRAPH:'ACBE:44222:狡
CJK UNIFIED IDEOGRAPH:'ACBF:44223:玷
CJK UNIFIED IDEOGRAPH:'ACC0:44224:珊
CJK UNIFIED IDEOGRAPH:'ACC1:44225:玻
CJK UNIFIED IDEOGRAPH:'ACC2:44226:玲
CJK UNIFIED IDEOGRAPH:'ACC3:44227:珍
CJK UNIFIED IDEOGRAPH:'ACC4:44228:珀
CJK UNIFIED IDEOGRAPH:'ACC5:44229:玳
CJK UNIFIED IDEOGRAPH:'ACC6:44230:甚
CJK UNIFIED IDEOGRAPH:'ACC7:44231:甭
CJK UNIFIED IDEOGRAPH:'ACC8:44232:畏
CJK UNIFIED IDEOGRAPH:'ACC9:44233:界
CJK UNIFIED IDEOGRAPH:'ACCA:44234:畎
CJK UNIFIED IDEOGRAPH:'ACCB:44235:畋
CJK UNIFIED IDEOGRAPH:'ACCC:44236:疫
CJK UNIFIED IDEOGRAPH:'ACCD:44237:疤
CJK UNIFIED IDEOGRAPH:'ACCE:44238:疥
CJK UNIFIED IDEOGRAPH:'ACCF:44239:疢
CJK UNIFIED IDEOGRAPH:'ACD0:44240:疣
CJK UNIFIED IDEOGRAPH:'ACD1:44241:癸
CJK UNIFIED IDEOGRAPH:'ACD2:44242:皆
CJK UNIFIED IDEOGRAPH:'ACD3:44243:皇
CJK UNIFIED IDEOGRAPH:'ACD4:44244:皈
CJK UNIFIED IDEOGRAPH:'ACD5:44245:盈
CJK UNIFIED IDEOGRAPH:'ACD6:44246:盆
CJK UNIFIED IDEOGRAPH:'ACD7:44247:盃
CJK UNIFIED IDEOGRAPH:'ACD8:44248:盅
CJK UNIFIED IDEOGRAPH:'ACD9:44249:省
CJK UNIFIED IDEOGRAPH:'ACDA:44250:盹
CJK UNIFIED IDEOGRAPH:'ACDB:44251:相
CJK UNIFIED IDEOGRAPH:'ACDC:44252:眉
CJK UNIFIED IDEOGRAPH:'ACDD:44253:看
CJK UNIFIED IDEOGRAPH:'ACDE:44254:盾
CJK UNIFIED IDEOGRAPH:'ACDF:44255:盼
CJK UNIFIED IDEOGRAPH:'ACE0:44256:眇
CJK UNIFIED IDEOGRAPH:'ACE1:44257:矜
CJK UNIFIED IDEOGRAPH:'ACE2:44258:砂
CJK UNIFIED IDEOGRAPH:'ACE3:44259:研
CJK UNIFIED IDEOGRAPH:'ACE4:44260:砌
CJK UNIFIED IDEOGRAPH:'ACE5:44261:砍
CJK UNIFIED IDEOGRAPH:'ACE6:44262:祆
CJK UNIFIED IDEOGRAPH:'ACE7:44263:祉
CJK UNIFIED IDEOGRAPH:'ACE8:44264:祈
CJK UNIFIED IDEOGRAPH:'ACE9:44265:祇
CJK UNIFIED IDEOGRAPH:'ACEA:44266:禹
CJK UNIFIED IDEOGRAPH:'ACEB:44267:禺
CJK UNIFIED IDEOGRAPH:'ACEC:44268:科
CJK UNIFIED IDEOGRAPH:'ACED:44269:秒
CJK UNIFIED IDEOGRAPH:'ACEE:44270:秋
CJK UNIFIED IDEOGRAPH:'ACEF:44271:穿
CJK UNIFIED IDEOGRAPH:'ACF0:44272:突
CJK UNIFIED IDEOGRAPH:'ACF1:44273:竿
CJK UNIFIED IDEOGRAPH:'ACF2:44274:竽
CJK UNIFIED IDEOGRAPH:'ACF3:44275:籽
CJK UNIFIED IDEOGRAPH:'ACF4:44276:紂
CJK UNIFIED IDEOGRAPH:'ACF5:44277:紅
CJK UNIFIED IDEOGRAPH:'ACF6:44278:紀
CJK UNIFIED IDEOGRAPH:'ACF7:44279:紉
CJK UNIFIED IDEOGRAPH:'ACF8:44280:紇
CJK UNIFIED IDEOGRAPH:'ACF9:44281:約
CJK UNIFIED IDEOGRAPH:'ACFA:44282:紆
CJK UNIFIED IDEOGRAPH:'ACFB:44283:缸
CJK UNIFIED IDEOGRAPH:'ACFC:44284:美
CJK UNIFIED IDEOGRAPH:'ACFD:44285:羿
CJK UNIFIED IDEOGRAPH:'ACFE:44286:耄
CJK UNIFIED IDEOGRAPH:'AD40:44352:耐
CJK UNIFIED IDEOGRAPH:'AD41:44353:耍
CJK UNIFIED IDEOGRAPH:'AD42:44354:耑
CJK UNIFIED IDEOGRAPH:'AD43:44355:耶
CJK UNIFIED IDEOGRAPH:'AD44:44356:胖
CJK UNIFIED IDEOGRAPH:'AD45:44357:胥
CJK UNIFIED IDEOGRAPH:'AD46:44358:胚
CJK UNIFIED IDEOGRAPH:'AD47:44359:胃
CJK UNIFIED IDEOGRAPH:'AD48:44360:胄
CJK UNIFIED IDEOGRAPH:'AD49:44361:背
CJK UNIFIED IDEOGRAPH:'AD4A:44362:胡
CJK UNIFIED IDEOGRAPH:'AD4B:44363:胛
CJK UNIFIED IDEOGRAPH:'AD4C:44364:胎
CJK UNIFIED IDEOGRAPH:'AD4D:44365:胞
CJK UNIFIED IDEOGRAPH:'AD4E:44366:胤
CJK UNIFIED IDEOGRAPH:'AD4F:44367:胝
CJK UNIFIED IDEOGRAPH:'AD50:44368:致
CJK UNIFIED IDEOGRAPH:'AD51:44369:舢
CJK UNIFIED IDEOGRAPH:'AD52:44370:苧
CJK UNIFIED IDEOGRAPH:'AD53:44371:范
CJK UNIFIED IDEOGRAPH:'AD54:44372:茅
CJK UNIFIED IDEOGRAPH:'AD55:44373:苣
CJK UNIFIED IDEOGRAPH:'AD56:44374:苛
CJK UNIFIED IDEOGRAPH:'AD57:44375:苦
CJK UNIFIED IDEOGRAPH:'AD58:44376:茄
CJK UNIFIED IDEOGRAPH:'AD59:44377:若
CJK UNIFIED IDEOGRAPH:'AD5A:44378:茂
CJK UNIFIED IDEOGRAPH:'AD5B:44379:茉
CJK UNIFIED IDEOGRAPH:'AD5C:44380:苒
CJK UNIFIED IDEOGRAPH:'AD5D:44381:苗
CJK UNIFIED IDEOGRAPH:'AD5E:44382:英
CJK UNIFIED IDEOGRAPH:'AD5F:44383:茁
CJK UNIFIED IDEOGRAPH:'AD60:44384:苜
CJK UNIFIED IDEOGRAPH:'AD61:44385:苔
CJK UNIFIED IDEOGRAPH:'AD62:44386:苑
CJK UNIFIED IDEOGRAPH:'AD63:44387:苞
CJK UNIFIED IDEOGRAPH:'AD64:44388:苓
CJK UNIFIED IDEOGRAPH:'AD65:44389:苟
CJK UNIFIED IDEOGRAPH:'AD66:44390:苯
CJK UNIFIED IDEOGRAPH:'AD67:44391:茆
CJK UNIFIED IDEOGRAPH:'AD68:44392:虐
CJK UNIFIED IDEOGRAPH:'AD69:44393:虹
CJK UNIFIED IDEOGRAPH:'AD6A:44394:虻
CJK UNIFIED IDEOGRAPH:'AD6B:44395:虺
CJK UNIFIED IDEOGRAPH:'AD6C:44396:衍
CJK UNIFIED IDEOGRAPH:'AD6D:44397:衫
CJK UNIFIED IDEOGRAPH:'AD6E:44398:要
CJK UNIFIED IDEOGRAPH:'AD6F:44399:觔
CJK UNIFIED IDEOGRAPH:'AD70:44400:計
CJK UNIFIED IDEOGRAPH:'AD71:44401:訂
CJK UNIFIED IDEOGRAPH:'AD72:44402:訃
CJK UNIFIED IDEOGRAPH:'AD73:44403:貞
CJK UNIFIED IDEOGRAPH:'AD74:44404:負
CJK UNIFIED IDEOGRAPH:'AD75:44405:赴
CJK UNIFIED IDEOGRAPH:'AD76:44406:赳
CJK UNIFIED IDEOGRAPH:'AD77:44407:趴
CJK UNIFIED IDEOGRAPH:'AD78:44408:軍
CJK UNIFIED IDEOGRAPH:'AD79:44409:軌
CJK UNIFIED IDEOGRAPH:'AD7A:44410:述
CJK UNIFIED IDEOGRAPH:'AD7B:44411:迦
CJK UNIFIED IDEOGRAPH:'AD7C:44412:迢
CJK UNIFIED IDEOGRAPH:'AD7D:44413:迪
CJK UNIFIED IDEOGRAPH:'AD7E:44414:迥
CJK UNIFIED IDEOGRAPH:'ADA1:44449:迭
CJK UNIFIED IDEOGRAPH:'ADA2:44450:迫
CJK UNIFIED IDEOGRAPH:'ADA3:44451:迤
CJK UNIFIED IDEOGRAPH:'ADA4:44452:迨
CJK UNIFIED IDEOGRAPH:'ADA5:44453:郊
CJK UNIFIED IDEOGRAPH:'ADA6:44454:郎
CJK UNIFIED IDEOGRAPH:'ADA7:44455:郁
CJK UNIFIED IDEOGRAPH:'ADA8:44456:郃
CJK UNIFIED IDEOGRAPH:'ADA9:44457:酋
CJK UNIFIED IDEOGRAPH:'ADAA:44458:酊
CJK UNIFIED IDEOGRAPH:'ADAB:44459:重
CJK UNIFIED IDEOGRAPH:'ADAC:44460:閂
CJK UNIFIED IDEOGRAPH:'ADAD:44461:限
CJK UNIFIED IDEOGRAPH:'ADAE:44462:陋
CJK UNIFIED IDEOGRAPH:'ADAF:44463:陌
CJK UNIFIED IDEOGRAPH:'ADB0:44464:降
CJK UNIFIED IDEOGRAPH:'ADB1:44465:面
CJK UNIFIED IDEOGRAPH:'ADB2:44466:革
CJK UNIFIED IDEOGRAPH:'ADB3:44467:韋
CJK UNIFIED IDEOGRAPH:'ADB4:44468:韭
CJK UNIFIED IDEOGRAPH:'ADB5:44469:音
CJK UNIFIED IDEOGRAPH:'ADB6:44470:頁
CJK UNIFIED IDEOGRAPH:'ADB7:44471:風
CJK UNIFIED IDEOGRAPH:'ADB8:44472:飛
CJK UNIFIED IDEOGRAPH:'ADB9:44473:食
CJK UNIFIED IDEOGRAPH:'ADBA:44474:首
CJK UNIFIED IDEOGRAPH:'ADBB:44475:香
CJK UNIFIED IDEOGRAPH:'ADBC:44476:乘
CJK UNIFIED IDEOGRAPH:'ADBD:44477:亳
CJK UNIFIED IDEOGRAPH:'ADBE:44478:倌
CJK UNIFIED IDEOGRAPH:'ADBF:44479:倍
CJK UNIFIED IDEOGRAPH:'ADC0:44480:倣
CJK UNIFIED IDEOGRAPH:'ADC1:44481:俯
CJK UNIFIED IDEOGRAPH:'ADC2:44482:倦
CJK UNIFIED IDEOGRAPH:'ADC3:44483:倥
CJK UNIFIED IDEOGRAPH:'ADC4:44484:俸
CJK UNIFIED IDEOGRAPH:'ADC5:44485:倩
CJK UNIFIED IDEOGRAPH:'ADC6:44486:倖
CJK UNIFIED IDEOGRAPH:'ADC7:44487:倆
CJK UNIFIED IDEOGRAPH:'ADC8:44488:值
CJK UNIFIED IDEOGRAPH:'ADC9:44489:借
CJK UNIFIED IDEOGRAPH:'ADCA:44490:倚
CJK UNIFIED IDEOGRAPH:'ADCB:44491:倒
CJK UNIFIED IDEOGRAPH:'ADCC:44492:們
CJK UNIFIED IDEOGRAPH:'ADCD:44493:俺
CJK UNIFIED IDEOGRAPH:'ADCE:44494:倀
CJK UNIFIED IDEOGRAPH:'ADCF:44495:倔
CJK UNIFIED IDEOGRAPH:'ADD0:44496:倨
CJK UNIFIED IDEOGRAPH:'ADD1:44497:俱
CJK UNIFIED IDEOGRAPH:'ADD2:44498:倡
CJK UNIFIED IDEOGRAPH:'ADD3:44499:個
CJK UNIFIED IDEOGRAPH:'ADD4:44500:候
CJK UNIFIED IDEOGRAPH:'ADD5:44501:倘
CJK UNIFIED IDEOGRAPH:'ADD6:44502:俳
CJK UNIFIED IDEOGRAPH:'ADD7:44503:修
CJK UNIFIED IDEOGRAPH:'ADD8:44504:倭
CJK UNIFIED IDEOGRAPH:'ADD9:44505:倪
CJK UNIFIED IDEOGRAPH:'ADDA:44506:俾
CJK UNIFIED IDEOGRAPH:'ADDB:44507:倫
CJK UNIFIED IDEOGRAPH:'ADDC:44508:倉
CJK UNIFIED IDEOGRAPH:'ADDD:44509:兼
CJK UNIFIED IDEOGRAPH:'ADDE:44510:冤
CJK UNIFIED IDEOGRAPH:'ADDF:44511:冥
CJK UNIFIED IDEOGRAPH:'ADE0:44512:冢
CJK UNIFIED IDEOGRAPH:'ADE1:44513:凍
CJK UNIFIED IDEOGRAPH:'ADE2:44514:凌
CJK UNIFIED IDEOGRAPH:'ADE3:44515:准
CJK UNIFIED IDEOGRAPH:'ADE4:44516:凋
CJK UNIFIED IDEOGRAPH:'ADE5:44517:剖
CJK UNIFIED IDEOGRAPH:'ADE6:44518:剜
CJK UNIFIED IDEOGRAPH:'ADE7:44519:剔
CJK UNIFIED IDEOGRAPH:'ADE8:44520:剛
CJK UNIFIED IDEOGRAPH:'ADE9:44521:剝
CJK UNIFIED IDEOGRAPH:'ADEA:44522:匪
CJK UNIFIED IDEOGRAPH:'ADEB:44523:卿
CJK UNIFIED IDEOGRAPH:'ADEC:44524:原
CJK UNIFIED IDEOGRAPH:'ADED:44525:厝
CJK UNIFIED IDEOGRAPH:'ADEE:44526:叟
CJK UNIFIED IDEOGRAPH:'ADEF:44527:哨
CJK UNIFIED IDEOGRAPH:'ADF0:44528:唐
CJK UNIFIED IDEOGRAPH:'ADF1:44529:唁
CJK UNIFIED IDEOGRAPH:'ADF2:44530:唷
CJK UNIFIED IDEOGRAPH:'ADF3:44531:哼
CJK UNIFIED IDEOGRAPH:'ADF4:44532:哥
CJK UNIFIED IDEOGRAPH:'ADF5:44533:哲
CJK UNIFIED IDEOGRAPH:'ADF6:44534:唆
CJK UNIFIED IDEOGRAPH:'ADF7:44535:哺
CJK UNIFIED IDEOGRAPH:'ADF8:44536:唔
CJK UNIFIED IDEOGRAPH:'ADF9:44537:哩
CJK UNIFIED IDEOGRAPH:'ADFA:44538:哭
CJK UNIFIED IDEOGRAPH:'ADFB:44539:員
CJK UNIFIED IDEOGRAPH:'ADFC:44540:唉
CJK UNIFIED IDEOGRAPH:'ADFD:44541:哮
CJK UNIFIED IDEOGRAPH:'ADFE:44542:哪
CJK UNIFIED IDEOGRAPH:'AE40:44608:哦
CJK UNIFIED IDEOGRAPH:'AE41:44609:唧
CJK UNIFIED IDEOGRAPH:'AE42:44610:唇
CJK UNIFIED IDEOGRAPH:'AE43:44611:哽
CJK UNIFIED IDEOGRAPH:'AE44:44612:唏
CJK UNIFIED IDEOGRAPH:'AE45:44613:圃
CJK UNIFIED IDEOGRAPH:'AE46:44614:圄
CJK UNIFIED IDEOGRAPH:'AE47:44615:埂
CJK UNIFIED IDEOGRAPH:'AE48:44616:埔
CJK UNIFIED IDEOGRAPH:'AE49:44617:埋
CJK UNIFIED IDEOGRAPH:'AE4A:44618:埃
CJK UNIFIED IDEOGRAPH:'AE4B:44619:堉
CJK UNIFIED IDEOGRAPH:'AE4C:44620:夏
CJK UNIFIED IDEOGRAPH:'AE4D:44621:套
CJK UNIFIED IDEOGRAPH:'AE4E:44622:奘
CJK UNIFIED IDEOGRAPH:'AE4F:44623:奚
CJK UNIFIED IDEOGRAPH:'AE50:44624:娑
CJK UNIFIED IDEOGRAPH:'AE51:44625:娘
CJK UNIFIED IDEOGRAPH:'AE52:44626:娜
CJK UNIFIED IDEOGRAPH:'AE53:44627:娟
CJK UNIFIED IDEOGRAPH:'AE54:44628:娛
CJK UNIFIED IDEOGRAPH:'AE55:44629:娓
CJK UNIFIED IDEOGRAPH:'AE56:44630:姬
CJK UNIFIED IDEOGRAPH:'AE57:44631:娠
CJK UNIFIED IDEOGRAPH:'AE58:44632:娣
CJK UNIFIED IDEOGRAPH:'AE59:44633:娩
CJK UNIFIED IDEOGRAPH:'AE5A:44634:娥
CJK UNIFIED IDEOGRAPH:'AE5B:44635:娌
CJK UNIFIED IDEOGRAPH:'AE5C:44636:娉
CJK UNIFIED IDEOGRAPH:'AE5D:44637:孫
CJK UNIFIED IDEOGRAPH:'AE5E:44638:屘
CJK UNIFIED IDEOGRAPH:'AE5F:44639:宰
CJK UNIFIED IDEOGRAPH:'AE60:44640:害
CJK UNIFIED IDEOGRAPH:'AE61:44641:家
CJK UNIFIED IDEOGRAPH:'AE62:44642:宴
CJK UNIFIED IDEOGRAPH:'AE63:44643:宮
CJK UNIFIED IDEOGRAPH:'AE64:44644:宵
CJK UNIFIED IDEOGRAPH:'AE65:44645:容
CJK UNIFIED IDEOGRAPH:'AE66:44646:宸
CJK UNIFIED IDEOGRAPH:'AE67:44647:射
CJK UNIFIED IDEOGRAPH:'AE68:44648:屑
CJK UNIFIED IDEOGRAPH:'AE69:44649:展
CJK UNIFIED IDEOGRAPH:'AE6A:44650:屐
CJK UNIFIED IDEOGRAPH:'AE6B:44651:峭
CJK UNIFIED IDEOGRAPH:'AE6C:44652:峽
CJK UNIFIED IDEOGRAPH:'AE6D:44653:峻
CJK UNIFIED IDEOGRAPH:'AE6E:44654:峪
CJK UNIFIED IDEOGRAPH:'AE6F:44655:峨
CJK UNIFIED IDEOGRAPH:'AE70:44656:峰
CJK UNIFIED IDEOGRAPH:'AE71:44657:島
CJK UNIFIED IDEOGRAPH:'AE72:44658:崁
CJK UNIFIED IDEOGRAPH:'AE73:44659:峴
CJK UNIFIED IDEOGRAPH:'AE74:44660:差
CJK UNIFIED IDEOGRAPH:'AE75:44661:席
CJK UNIFIED IDEOGRAPH:'AE76:44662:師
CJK UNIFIED IDEOGRAPH:'AE77:44663:庫
CJK UNIFIED IDEOGRAPH:'AE78:44664:庭
CJK UNIFIED IDEOGRAPH:'AE79:44665:座
CJK UNIFIED IDEOGRAPH:'AE7A:44666:弱
CJK UNIFIED IDEOGRAPH:'AE7B:44667:徒
CJK UNIFIED IDEOGRAPH:'AE7C:44668:徑
CJK UNIFIED IDEOGRAPH:'AE7D:44669:徐
CJK UNIFIED IDEOGRAPH:'AE7E:44670:恙
CJK UNIFIED IDEOGRAPH:'AEA1:44705:恣
CJK UNIFIED IDEOGRAPH:'AEA2:44706:恥
CJK UNIFIED IDEOGRAPH:'AEA3:44707:恐
CJK UNIFIED IDEOGRAPH:'AEA4:44708:恕
CJK UNIFIED IDEOGRAPH:'AEA5:44709:恭
CJK UNIFIED IDEOGRAPH:'AEA6:44710:恩
CJK UNIFIED IDEOGRAPH:'AEA7:44711:息
CJK UNIFIED IDEOGRAPH:'AEA8:44712:悄
CJK UNIFIED IDEOGRAPH:'AEA9:44713:悟
CJK UNIFIED IDEOGRAPH:'AEAA:44714:悚
CJK UNIFIED IDEOGRAPH:'AEAB:44715:悍
CJK UNIFIED IDEOGRAPH:'AEAC:44716:悔
CJK UNIFIED IDEOGRAPH:'AEAD:44717:悌
CJK UNIFIED IDEOGRAPH:'AEAE:44718:悅
CJK UNIFIED IDEOGRAPH:'AEAF:44719:悖
CJK UNIFIED IDEOGRAPH:'AEB0:44720:扇
CJK UNIFIED IDEOGRAPH:'AEB1:44721:拳
CJK UNIFIED IDEOGRAPH:'AEB2:44722:挈
CJK UNIFIED IDEOGRAPH:'AEB3:44723:拿
CJK UNIFIED IDEOGRAPH:'AEB4:44724:捎
CJK UNIFIED IDEOGRAPH:'AEB5:44725:挾
CJK UNIFIED IDEOGRAPH:'AEB6:44726:振
CJK UNIFIED IDEOGRAPH:'AEB7:44727:捕
CJK UNIFIED IDEOGRAPH:'AEB8:44728:捂
CJK UNIFIED IDEOGRAPH:'AEB9:44729:捆
CJK UNIFIED IDEOGRAPH:'AEBA:44730:捏
CJK UNIFIED IDEOGRAPH:'AEBB:44731:捉
CJK UNIFIED IDEOGRAPH:'AEBC:44732:挺
CJK UNIFIED IDEOGRAPH:'AEBD:44733:捐
CJK UNIFIED IDEOGRAPH:'AEBE:44734:挽
CJK UNIFIED IDEOGRAPH:'AEBF:44735:挪
CJK UNIFIED IDEOGRAPH:'AEC0:44736:挫
CJK UNIFIED IDEOGRAPH:'AEC1:44737:挨
CJK UNIFIED IDEOGRAPH:'AEC2:44738:捍
CJK UNIFIED IDEOGRAPH:'AEC3:44739:捌
CJK UNIFIED IDEOGRAPH:'AEC4:44740:效
CJK UNIFIED IDEOGRAPH:'AEC5:44741:敉
CJK UNIFIED IDEOGRAPH:'AEC6:44742:料
CJK UNIFIED IDEOGRAPH:'AEC7:44743:旁
CJK UNIFIED IDEOGRAPH:'AEC8:44744:旅
CJK UNIFIED IDEOGRAPH:'AEC9:44745:時
CJK UNIFIED IDEOGRAPH:'AECA:44746:晉
CJK UNIFIED IDEOGRAPH:'AECB:44747:晏
CJK UNIFIED IDEOGRAPH:'AECC:44748:晃
CJK UNIFIED IDEOGRAPH:'AECD:44749:晒
CJK UNIFIED IDEOGRAPH:'AECE:44750:晌
CJK UNIFIED IDEOGRAPH:'AECF:44751:晅
CJK UNIFIED IDEOGRAPH:'AED0:44752:晁
CJK UNIFIED IDEOGRAPH:'AED1:44753:書
CJK UNIFIED IDEOGRAPH:'AED2:44754:朔
CJK UNIFIED IDEOGRAPH:'AED3:44755:朕
CJK UNIFIED IDEOGRAPH:'AED4:44756:朗
CJK UNIFIED IDEOGRAPH:'AED5:44757:校
CJK UNIFIED IDEOGRAPH:'AED6:44758:核
CJK UNIFIED IDEOGRAPH:'AED7:44759:案
CJK UNIFIED IDEOGRAPH:'AED8:44760:框
CJK UNIFIED IDEOGRAPH:'AED9:44761:桓
CJK UNIFIED IDEOGRAPH:'AEDA:44762:根
CJK UNIFIED IDEOGRAPH:'AEDB:44763:桂
CJK UNIFIED IDEOGRAPH:'AEDC:44764:桔
CJK UNIFIED IDEOGRAPH:'AEDD:44765:栩
CJK UNIFIED IDEOGRAPH:'AEDE:44766:梳
CJK UNIFIED IDEOGRAPH:'AEDF:44767:栗
CJK UNIFIED IDEOGRAPH:'AEE0:44768:桌
CJK UNIFIED IDEOGRAPH:'AEE1:44769:桑
CJK UNIFIED IDEOGRAPH:'AEE2:44770:栽
CJK UNIFIED IDEOGRAPH:'AEE3:44771:柴
CJK UNIFIED IDEOGRAPH:'AEE4:44772:桐
CJK UNIFIED IDEOGRAPH:'AEE5:44773:桀
CJK UNIFIED IDEOGRAPH:'AEE6:44774:格
CJK UNIFIED IDEOGRAPH:'AEE7:44775:桃
CJK UNIFIED IDEOGRAPH:'AEE8:44776:株
CJK UNIFIED IDEOGRAPH:'AEE9:44777:桅
CJK UNIFIED IDEOGRAPH:'AEEA:44778:栓
CJK UNIFIED IDEOGRAPH:'AEEB:44779:栘
CJK UNIFIED IDEOGRAPH:'AEEC:44780:桁
CJK UNIFIED IDEOGRAPH:'AEED:44781:殊
CJK UNIFIED IDEOGRAPH:'AEEE:44782:殉
CJK UNIFIED IDEOGRAPH:'AEEF:44783:殷
CJK UNIFIED IDEOGRAPH:'AEF0:44784:氣
CJK UNIFIED IDEOGRAPH:'AEF1:44785:氧
CJK UNIFIED IDEOGRAPH:'AEF2:44786:氨
CJK UNIFIED IDEOGRAPH:'AEF3:44787:氦
CJK UNIFIED IDEOGRAPH:'AEF4:44788:氤
CJK UNIFIED IDEOGRAPH:'AEF5:44789:泰
CJK UNIFIED IDEOGRAPH:'AEF6:44790:浪
CJK UNIFIED IDEOGRAPH:'AEF7:44791:涕
CJK UNIFIED IDEOGRAPH:'AEF8:44792:消
CJK UNIFIED IDEOGRAPH:'AEF9:44793:涇
CJK UNIFIED IDEOGRAPH:'AEFA:44794:浦
CJK UNIFIED IDEOGRAPH:'AEFB:44795:浸
CJK UNIFIED IDEOGRAPH:'AEFC:44796:海
CJK UNIFIED IDEOGRAPH:'AEFD:44797:浙
CJK UNIFIED IDEOGRAPH:'AEFE:44798:涓
CJK UNIFIED IDEOGRAPH:'AF40:44864:浬
CJK UNIFIED IDEOGRAPH:'AF41:44865:涉
CJK UNIFIED IDEOGRAPH:'AF42:44866:浮
CJK UNIFIED IDEOGRAPH:'AF43:44867:浚
CJK UNIFIED IDEOGRAPH:'AF44:44868:浴
CJK UNIFIED IDEOGRAPH:'AF45:44869:浩
CJK UNIFIED IDEOGRAPH:'AF46:44870:涌
CJK UNIFIED IDEOGRAPH:'AF47:44871:涊
CJK UNIFIED IDEOGRAPH:'AF48:44872:浹
CJK UNIFIED IDEOGRAPH:'AF49:44873:涅
CJK UNIFIED IDEOGRAPH:'AF4A:44874:浥
CJK UNIFIED IDEOGRAPH:'AF4B:44875:涔
CJK UNIFIED IDEOGRAPH:'AF4C:44876:烊
CJK UNIFIED IDEOGRAPH:'AF4D:44877:烘
CJK UNIFIED IDEOGRAPH:'AF4E:44878:烤
CJK UNIFIED IDEOGRAPH:'AF4F:44879:烙
CJK UNIFIED IDEOGRAPH:'AF50:44880:烈
CJK UNIFIED IDEOGRAPH:'AF51:44881:烏
CJK UNIFIED IDEOGRAPH:'AF52:44882:爹
CJK UNIFIED IDEOGRAPH:'AF53:44883:特
CJK UNIFIED IDEOGRAPH:'AF54:44884:狼
CJK UNIFIED IDEOGRAPH:'AF55:44885:狹
CJK UNIFIED IDEOGRAPH:'AF56:44886:狽
CJK UNIFIED IDEOGRAPH:'AF57:44887:狸
CJK UNIFIED IDEOGRAPH:'AF58:44888:狷
CJK UNIFIED IDEOGRAPH:'AF59:44889:玆
CJK UNIFIED IDEOGRAPH:'AF5A:44890:班
CJK UNIFIED IDEOGRAPH:'AF5B:44891:琉
CJK UNIFIED IDEOGRAPH:'AF5C:44892:珮
CJK UNIFIED IDEOGRAPH:'AF5D:44893:珠
CJK UNIFIED IDEOGRAPH:'AF5E:44894:珪
CJK UNIFIED IDEOGRAPH:'AF5F:44895:珞
CJK UNIFIED IDEOGRAPH:'AF60:44896:畔
CJK UNIFIED IDEOGRAPH:'AF61:44897:畝
CJK UNIFIED IDEOGRAPH:'AF62:44898:畜
CJK UNIFIED IDEOGRAPH:'AF63:44899:畚
CJK UNIFIED IDEOGRAPH:'AF64:44900:留
CJK UNIFIED IDEOGRAPH:'AF65:44901:疾
CJK UNIFIED IDEOGRAPH:'AF66:44902:病
CJK UNIFIED IDEOGRAPH:'AF67:44903:症
CJK UNIFIED IDEOGRAPH:'AF68:44904:疲
CJK UNIFIED IDEOGRAPH:'AF69:44905:疳
CJK UNIFIED IDEOGRAPH:'AF6A:44906:疽
CJK UNIFIED IDEOGRAPH:'AF6B:44907:疼
CJK UNIFIED IDEOGRAPH:'AF6C:44908:疹
CJK UNIFIED IDEOGRAPH:'AF6D:44909:痂
CJK UNIFIED IDEOGRAPH:'AF6E:44910:疸
CJK UNIFIED IDEOGRAPH:'AF6F:44911:皋
CJK UNIFIED IDEOGRAPH:'AF70:44912:皰
CJK UNIFIED IDEOGRAPH:'AF71:44913:益
CJK UNIFIED IDEOGRAPH:'AF72:44914:盍
CJK UNIFIED IDEOGRAPH:'AF73:44915:盎
CJK UNIFIED IDEOGRAPH:'AF74:44916:眩
CJK UNIFIED IDEOGRAPH:'AF75:44917:真
CJK UNIFIED IDEOGRAPH:'AF76:44918:眠
CJK UNIFIED IDEOGRAPH:'AF77:44919:眨
CJK UNIFIED IDEOGRAPH:'AF78:44920:矩
CJK UNIFIED IDEOGRAPH:'AF79:44921:砰
CJK UNIFIED IDEOGRAPH:'AF7A:44922:砧
CJK UNIFIED IDEOGRAPH:'AF7B:44923:砸
CJK UNIFIED IDEOGRAPH:'AF7C:44924:砝
CJK UNIFIED IDEOGRAPH:'AF7D:44925:破
CJK UNIFIED IDEOGRAPH:'AF7E:44926:砷
CJK UNIFIED IDEOGRAPH:'AFA1:44961:砥
CJK UNIFIED IDEOGRAPH:'AFA2:44962:砭
CJK UNIFIED IDEOGRAPH:'AFA3:44963:砠
CJK UNIFIED IDEOGRAPH:'AFA4:44964:砟
CJK UNIFIED IDEOGRAPH:'AFA5:44965:砲
CJK UNIFIED IDEOGRAPH:'AFA6:44966:祕
CJK UNIFIED IDEOGRAPH:'AFA7:44967:祐
CJK UNIFIED IDEOGRAPH:'AFA8:44968:祠
CJK UNIFIED IDEOGRAPH:'AFA9:44969:祟
CJK UNIFIED IDEOGRAPH:'AFAA:44970:祖
CJK UNIFIED IDEOGRAPH:'AFAB:44971:神
CJK UNIFIED IDEOGRAPH:'AFAC:44972:祝
CJK UNIFIED IDEOGRAPH:'AFAD:44973:祗
CJK UNIFIED IDEOGRAPH:'AFAE:44974:祚
CJK UNIFIED IDEOGRAPH:'AFAF:44975:秤
CJK UNIFIED IDEOGRAPH:'AFB0:44976:秣
CJK UNIFIED IDEOGRAPH:'AFB1:44977:秧
CJK UNIFIED IDEOGRAPH:'AFB2:44978:租
CJK UNIFIED IDEOGRAPH:'AFB3:44979:秦
CJK UNIFIED IDEOGRAPH:'AFB4:44980:秩
CJK UNIFIED IDEOGRAPH:'AFB5:44981:秘
CJK UNIFIED IDEOGRAPH:'AFB6:44982:窄
CJK UNIFIED IDEOGRAPH:'AFB7:44983:窈
CJK UNIFIED IDEOGRAPH:'AFB8:44984:站
CJK UNIFIED IDEOGRAPH:'AFB9:44985:笆
CJK UNIFIED IDEOGRAPH:'AFBA:44986:笑
CJK UNIFIED IDEOGRAPH:'AFBB:44987:粉
CJK UNIFIED IDEOGRAPH:'AFBC:44988:紡
CJK UNIFIED IDEOGRAPH:'AFBD:44989:紗
CJK UNIFIED IDEOGRAPH:'AFBE:44990:紋
CJK UNIFIED IDEOGRAPH:'AFBF:44991:紊
CJK UNIFIED IDEOGRAPH:'AFC0:44992:素
CJK UNIFIED IDEOGRAPH:'AFC1:44993:索
CJK UNIFIED IDEOGRAPH:'AFC2:44994:純
CJK UNIFIED IDEOGRAPH:'AFC3:44995:紐
CJK UNIFIED IDEOGRAPH:'AFC4:44996:紕
CJK UNIFIED IDEOGRAPH:'AFC5:44997:級
CJK UNIFIED IDEOGRAPH:'AFC6:44998:紜
CJK UNIFIED IDEOGRAPH:'AFC7:44999:納
CJK UNIFIED IDEOGRAPH:'AFC8:45000:紙
CJK UNIFIED IDEOGRAPH:'AFC9:45001:紛
CJK UNIFIED IDEOGRAPH:'AFCA:45002:缺
CJK UNIFIED IDEOGRAPH:'AFCB:45003:罟
CJK UNIFIED IDEOGRAPH:'AFCC:45004:羔
CJK UNIFIED IDEOGRAPH:'AFCD:45005:翅
CJK UNIFIED IDEOGRAPH:'AFCE:45006:翁
CJK UNIFIED IDEOGRAPH:'AFCF:45007:耆
CJK UNIFIED IDEOGRAPH:'AFD0:45008:耘
CJK UNIFIED IDEOGRAPH:'AFD1:45009:耕
CJK UNIFIED IDEOGRAPH:'AFD2:45010:耙
CJK UNIFIED IDEOGRAPH:'AFD3:45011:耗
CJK UNIFIED IDEOGRAPH:'AFD4:45012:耽
CJK UNIFIED IDEOGRAPH:'AFD5:45013:耿
CJK UNIFIED IDEOGRAPH:'AFD6:45014:胱
CJK UNIFIED IDEOGRAPH:'AFD7:45015:脂
CJK UNIFIED IDEOGRAPH:'AFD8:45016:胰
CJK UNIFIED IDEOGRAPH:'AFD9:45017:脅
CJK UNIFIED IDEOGRAPH:'AFDA:45018:胭
CJK UNIFIED IDEOGRAPH:'AFDB:45019:胴
CJK UNIFIED IDEOGRAPH:'AFDC:45020:脆
CJK UNIFIED IDEOGRAPH:'AFDD:45021:胸
CJK UNIFIED IDEOGRAPH:'AFDE:45022:胳
CJK UNIFIED IDEOGRAPH:'AFDF:45023:脈
CJK UNIFIED IDEOGRAPH:'AFE0:45024:能
CJK UNIFIED IDEOGRAPH:'AFE1:45025:脊
CJK UNIFIED IDEOGRAPH:'AFE2:45026:胼
CJK UNIFIED IDEOGRAPH:'AFE3:45027:胯
CJK UNIFIED IDEOGRAPH:'AFE4:45028:臭
CJK UNIFIED IDEOGRAPH:'AFE5:45029:臬
CJK UNIFIED IDEOGRAPH:'AFE6:45030:舀
CJK UNIFIED IDEOGRAPH:'AFE7:45031:舐
CJK UNIFIED IDEOGRAPH:'AFE8:45032:航
CJK UNIFIED IDEOGRAPH:'AFE9:45033:舫
CJK UNIFIED IDEOGRAPH:'AFEA:45034:舨
CJK UNIFIED IDEOGRAPH:'AFEB:45035:般
CJK UNIFIED IDEOGRAPH:'AFEC:45036:芻
CJK UNIFIED IDEOGRAPH:'AFED:45037:茫
CJK UNIFIED IDEOGRAPH:'AFEE:45038:荒
CJK UNIFIED IDEOGRAPH:'AFEF:45039:荔
CJK UNIFIED IDEOGRAPH:'AFF0:45040:荊
CJK UNIFIED IDEOGRAPH:'AFF1:45041:茸
CJK UNIFIED IDEOGRAPH:'AFF2:45042:荐
CJK UNIFIED IDEOGRAPH:'AFF3:45043:草
CJK UNIFIED IDEOGRAPH:'AFF4:45044:茵
CJK UNIFIED IDEOGRAPH:'AFF5:45045:茴
CJK UNIFIED IDEOGRAPH:'AFF6:45046:荏
CJK UNIFIED IDEOGRAPH:'AFF7:45047:茲
CJK UNIFIED IDEOGRAPH:'AFF8:45048:茹
CJK UNIFIED IDEOGRAPH:'AFF9:45049:茶
CJK UNIFIED IDEOGRAPH:'AFFA:45050:茗
CJK UNIFIED IDEOGRAPH:'AFFB:45051:荀
CJK UNIFIED IDEOGRAPH:'AFFC:45052:茱
CJK UNIFIED IDEOGRAPH:'AFFD:45053:茨
CJK UNIFIED IDEOGRAPH:'AFFE:45054:荃
CJK UNIFIED IDEOGRAPH:'B040:45120:虔
CJK UNIFIED IDEOGRAPH:'B041:45121:蚊
CJK UNIFIED IDEOGRAPH:'B042:45122:蚪
CJK UNIFIED IDEOGRAPH:'B043:45123:蚓
CJK UNIFIED IDEOGRAPH:'B044:45124:蚤
CJK UNIFIED IDEOGRAPH:'B045:45125:蚩
CJK UNIFIED IDEOGRAPH:'B046:45126:蚌
CJK UNIFIED IDEOGRAPH:'B047:45127:蚣
CJK UNIFIED IDEOGRAPH:'B048:45128:蚜
CJK UNIFIED IDEOGRAPH:'B049:45129:衰
CJK UNIFIED IDEOGRAPH:'B04A:45130:衷
CJK UNIFIED IDEOGRAPH:'B04B:45131:袁
CJK UNIFIED IDEOGRAPH:'B04C:45132:袂
CJK UNIFIED IDEOGRAPH:'B04D:45133:衽
CJK UNIFIED IDEOGRAPH:'B04E:45134:衹
CJK UNIFIED IDEOGRAPH:'B04F:45135:記
CJK UNIFIED IDEOGRAPH:'B050:45136:訐
CJK UNIFIED IDEOGRAPH:'B051:45137:討
CJK UNIFIED IDEOGRAPH:'B052:45138:訌
CJK UNIFIED IDEOGRAPH:'B053:45139:訕
CJK UNIFIED IDEOGRAPH:'B054:45140:訊
CJK UNIFIED IDEOGRAPH:'B055:45141:託
CJK UNIFIED IDEOGRAPH:'B056:45142:訓
CJK UNIFIED IDEOGRAPH:'B057:45143:訖
CJK UNIFIED IDEOGRAPH:'B058:45144:訏
CJK UNIFIED IDEOGRAPH:'B059:45145:訑
CJK UNIFIED IDEOGRAPH:'B05A:45146:豈
CJK UNIFIED IDEOGRAPH:'B05B:45147:豺
CJK UNIFIED IDEOGRAPH:'B05C:45148:豹
CJK UNIFIED IDEOGRAPH:'B05D:45149:財
CJK UNIFIED IDEOGRAPH:'B05E:45150:貢
CJK UNIFIED IDEOGRAPH:'B05F:45151:起
CJK UNIFIED IDEOGRAPH:'B060:45152:躬
CJK UNIFIED IDEOGRAPH:'B061:45153:軒
CJK UNIFIED IDEOGRAPH:'B062:45154:軔
CJK UNIFIED IDEOGRAPH:'B063:45155:軏
CJK UNIFIED IDEOGRAPH:'B064:45156:辱
CJK UNIFIED IDEOGRAPH:'B065:45157:送
CJK UNIFIED IDEOGRAPH:'B066:45158:逆
CJK UNIFIED IDEOGRAPH:'B067:45159:迷
CJK UNIFIED IDEOGRAPH:'B068:45160:退
CJK UNIFIED IDEOGRAPH:'B069:45161:迺
CJK UNIFIED IDEOGRAPH:'B06A:45162:迴
CJK UNIFIED IDEOGRAPH:'B06B:45163:逃
CJK UNIFIED IDEOGRAPH:'B06C:45164:追
CJK UNIFIED IDEOGRAPH:'B06D:45165:逅
CJK UNIFIED IDEOGRAPH:'B06E:45166:迸
CJK UNIFIED IDEOGRAPH:'B06F:45167:邕
CJK UNIFIED IDEOGRAPH:'B070:45168:郡
CJK UNIFIED IDEOGRAPH:'B071:45169:郝
CJK UNIFIED IDEOGRAPH:'B072:45170:郢
CJK UNIFIED IDEOGRAPH:'B073:45171:酒
CJK UNIFIED IDEOGRAPH:'B074:45172:配
CJK UNIFIED IDEOGRAPH:'B075:45173:酌
CJK UNIFIED IDEOGRAPH:'B076:45174:釘
CJK UNIFIED IDEOGRAPH:'B077:45175:針
CJK UNIFIED IDEOGRAPH:'B078:45176:釗
CJK UNIFIED IDEOGRAPH:'B079:45177:釜
CJK UNIFIED IDEOGRAPH:'B07A:45178:釙
CJK UNIFIED IDEOGRAPH:'B07B:45179:閃
CJK UNIFIED IDEOGRAPH:'B07C:45180:院
CJK UNIFIED IDEOGRAPH:'B07D:45181:陣
CJK UNIFIED IDEOGRAPH:'B07E:45182:陡
CJK UNIFIED IDEOGRAPH:'B0A1:45217:陛
CJK UNIFIED IDEOGRAPH:'B0A2:45218:陝
CJK UNIFIED IDEOGRAPH:'B0A3:45219:除
CJK UNIFIED IDEOGRAPH:'B0A4:45220:陘
CJK UNIFIED IDEOGRAPH:'B0A5:45221:陞
CJK UNIFIED IDEOGRAPH:'B0A6:45222:隻
CJK UNIFIED IDEOGRAPH:'B0A7:45223:飢
CJK UNIFIED IDEOGRAPH:'B0A8:45224:馬
CJK UNIFIED IDEOGRAPH:'B0A9:45225:骨
CJK UNIFIED IDEOGRAPH:'B0AA:45226:高
CJK UNIFIED IDEOGRAPH:'B0AB:45227:鬥
CJK UNIFIED IDEOGRAPH:'B0AC:45228:鬲
CJK UNIFIED IDEOGRAPH:'B0AD:45229:鬼
CJK UNIFIED IDEOGRAPH:'B0AE:45230:乾
CJK UNIFIED IDEOGRAPH:'B0AF:45231:偺
CJK UNIFIED IDEOGRAPH:'B0B0:45232:偽
CJK UNIFIED IDEOGRAPH:'B0B1:45233:停
CJK UNIFIED IDEOGRAPH:'B0B2:45234:假
CJK UNIFIED IDEOGRAPH:'B0B3:45235:偃
CJK UNIFIED IDEOGRAPH:'B0B4:45236:偌
CJK UNIFIED IDEOGRAPH:'B0B5:45237:做
CJK UNIFIED IDEOGRAPH:'B0B6:45238:偉
CJK UNIFIED IDEOGRAPH:'B0B7:45239:健
CJK UNIFIED IDEOGRAPH:'B0B8:45240:偶
CJK UNIFIED IDEOGRAPH:'B0B9:45241:偎
CJK UNIFIED IDEOGRAPH:'B0BA:45242:偕
CJK UNIFIED IDEOGRAPH:'B0BB:45243:偵
CJK UNIFIED IDEOGRAPH:'B0BC:45244:側
CJK UNIFIED IDEOGRAPH:'B0BD:45245:偷
CJK UNIFIED IDEOGRAPH:'B0BE:45246:偏
CJK UNIFIED IDEOGRAPH:'B0BF:45247:倏
CJK UNIFIED IDEOGRAPH:'B0C0:45248:偯
CJK UNIFIED IDEOGRAPH:'B0C1:45249:偭
CJK UNIFIED IDEOGRAPH:'B0C2:45250:兜
CJK UNIFIED IDEOGRAPH:'B0C3:45251:冕
CJK UNIFIED IDEOGRAPH:'B0C4:45252:凰
CJK UNIFIED IDEOGRAPH:'B0C5:45253:剪
CJK UNIFIED IDEOGRAPH:'B0C6:45254:副
CJK UNIFIED IDEOGRAPH:'B0C7:45255:勒
CJK UNIFIED IDEOGRAPH:'B0C8:45256:務
CJK UNIFIED IDEOGRAPH:'B0C9:45257:勘
CJK UNIFIED IDEOGRAPH:'B0CA:45258:動
CJK UNIFIED IDEOGRAPH:'B0CB:45259:匐
CJK UNIFIED IDEOGRAPH:'B0CC:45260:匏
CJK UNIFIED IDEOGRAPH:'B0CD:45261:匙
CJK UNIFIED IDEOGRAPH:'B0CE:45262:匿
CJK UNIFIED IDEOGRAPH:'B0CF:45263:區
CJK UNIFIED IDEOGRAPH:'B0D0:45264:匾
CJK UNIFIED IDEOGRAPH:'B0D1:45265:參
CJK UNIFIED IDEOGRAPH:'B0D2:45266:曼
CJK UNIFIED IDEOGRAPH:'B0D3:45267:商
CJK UNIFIED IDEOGRAPH:'B0D4:45268:啪
CJK UNIFIED IDEOGRAPH:'B0D5:45269:啦
CJK UNIFIED IDEOGRAPH:'B0D6:45270:啄
CJK UNIFIED IDEOGRAPH:'B0D7:45271:啞
CJK UNIFIED IDEOGRAPH:'B0D8:45272:啡
CJK UNIFIED IDEOGRAPH:'B0D9:45273:啃
CJK UNIFIED IDEOGRAPH:'B0DA:45274:啊
CJK UNIFIED IDEOGRAPH:'B0DB:45275:唱
CJK UNIFIED IDEOGRAPH:'B0DC:45276:啖
CJK UNIFIED IDEOGRAPH:'B0DD:45277:問
CJK UNIFIED IDEOGRAPH:'B0DE:45278:啕
CJK UNIFIED IDEOGRAPH:'B0DF:45279:唯
CJK UNIFIED IDEOGRAPH:'B0E0:45280:啤
CJK UNIFIED IDEOGRAPH:'B0E1:45281:唸
CJK UNIFIED IDEOGRAPH:'B0E2:45282:售
CJK UNIFIED IDEOGRAPH:'B0E3:45283:啜
CJK UNIFIED IDEOGRAPH:'B0E4:45284:唬
CJK UNIFIED IDEOGRAPH:'B0E5:45285:啣
CJK UNIFIED IDEOGRAPH:'B0E6:45286:唳
CJK UNIFIED IDEOGRAPH:'B0E7:45287:啁
CJK UNIFIED IDEOGRAPH:'B0E8:45288:啗
CJK UNIFIED IDEOGRAPH:'B0E9:45289:圈
CJK UNIFIED IDEOGRAPH:'B0EA:45290:國
CJK UNIFIED IDEOGRAPH:'B0EB:45291:圉
CJK UNIFIED IDEOGRAPH:'B0EC:45292:域
CJK UNIFIED IDEOGRAPH:'B0ED:45293:堅
CJK UNIFIED IDEOGRAPH:'B0EE:45294:堊
CJK UNIFIED IDEOGRAPH:'B0EF:45295:堆
CJK UNIFIED IDEOGRAPH:'B0F0:45296:埠
CJK UNIFIED IDEOGRAPH:'B0F1:45297:埤
CJK UNIFIED IDEOGRAPH:'B0F2:45298:基
CJK UNIFIED IDEOGRAPH:'B0F3:45299:堂
CJK UNIFIED IDEOGRAPH:'B0F4:45300:堵
CJK UNIFIED IDEOGRAPH:'B0F5:45301:執
CJK UNIFIED IDEOGRAPH:'B0F6:45302:培
CJK UNIFIED IDEOGRAPH:'B0F7:45303:夠
CJK UNIFIED IDEOGRAPH:'B0F8:45304:奢
CJK UNIFIED IDEOGRAPH:'B0F9:45305:娶
CJK UNIFIED IDEOGRAPH:'B0FA:45306:婁
CJK UNIFIED IDEOGRAPH:'B0FB:45307:婉
CJK UNIFIED IDEOGRAPH:'B0FC:45308:婦
CJK UNIFIED IDEOGRAPH:'B0FD:45309:婪
CJK UNIFIED IDEOGRAPH:'B0FE:45310:婀
CJK UNIFIED IDEOGRAPH:'B140:45376:娼
CJK UNIFIED IDEOGRAPH:'B141:45377:婢
CJK UNIFIED IDEOGRAPH:'B142:45378:婚
CJK UNIFIED IDEOGRAPH:'B143:45379:婆
CJK UNIFIED IDEOGRAPH:'B144:45380:婊
CJK UNIFIED IDEOGRAPH:'B145:45381:孰
CJK UNIFIED IDEOGRAPH:'B146:45382:寇
CJK UNIFIED IDEOGRAPH:'B147:45383:寅
CJK UNIFIED IDEOGRAPH:'B148:45384:寄
CJK UNIFIED IDEOGRAPH:'B149:45385:寂
CJK UNIFIED IDEOGRAPH:'B14A:45386:宿
CJK UNIFIED IDEOGRAPH:'B14B:45387:密
CJK UNIFIED IDEOGRAPH:'B14C:45388:尉
CJK UNIFIED IDEOGRAPH:'B14D:45389:專
CJK UNIFIED IDEOGRAPH:'B14E:45390:將
CJK UNIFIED IDEOGRAPH:'B14F:45391:屠
CJK UNIFIED IDEOGRAPH:'B150:45392:屜
CJK UNIFIED IDEOGRAPH:'B151:45393:屝
CJK UNIFIED IDEOGRAPH:'B152:45394:崇
CJK UNIFIED IDEOGRAPH:'B153:45395:崆
CJK UNIFIED IDEOGRAPH:'B154:45396:崎
CJK UNIFIED IDEOGRAPH:'B155:45397:崛
CJK UNIFIED IDEOGRAPH:'B156:45398:崖
CJK UNIFIED IDEOGRAPH:'B157:45399:崢
CJK UNIFIED IDEOGRAPH:'B158:45400:崑
CJK UNIFIED IDEOGRAPH:'B159:45401:崩
CJK UNIFIED IDEOGRAPH:'B15A:45402:崔
CJK UNIFIED IDEOGRAPH:'B15B:45403:崙
CJK UNIFIED IDEOGRAPH:'B15C:45404:崤
CJK UNIFIED IDEOGRAPH:'B15D:45405:崧
CJK UNIFIED IDEOGRAPH:'B15E:45406:崗
CJK UNIFIED IDEOGRAPH:'B15F:45407:巢
CJK UNIFIED IDEOGRAPH:'B160:45408:常
CJK UNIFIED IDEOGRAPH:'B161:45409:帶
CJK UNIFIED IDEOGRAPH:'B162:45410:帳
CJK UNIFIED IDEOGRAPH:'B163:45411:帷
CJK UNIFIED IDEOGRAPH:'B164:45412:康
CJK UNIFIED IDEOGRAPH:'B165:45413:庸
CJK UNIFIED IDEOGRAPH:'B166:45414:庶
CJK UNIFIED IDEOGRAPH:'B167:45415:庵
CJK UNIFIED IDEOGRAPH:'B168:45416:庾
CJK UNIFIED IDEOGRAPH:'B169:45417:張
CJK UNIFIED IDEOGRAPH:'B16A:45418:強
CJK UNIFIED IDEOGRAPH:'B16B:45419:彗
CJK UNIFIED IDEOGRAPH:'B16C:45420:彬
CJK UNIFIED IDEOGRAPH:'B16D:45421:彩
CJK UNIFIED IDEOGRAPH:'B16E:45422:彫
CJK UNIFIED IDEOGRAPH:'B16F:45423:得
CJK UNIFIED IDEOGRAPH:'B170:45424:徙
CJK UNIFIED IDEOGRAPH:'B171:45425:從
CJK UNIFIED IDEOGRAPH:'B172:45426:徘
CJK UNIFIED IDEOGRAPH:'B173:45427:御
CJK UNIFIED IDEOGRAPH:'B174:45428:徠
CJK UNIFIED IDEOGRAPH:'B175:45429:徜
CJK UNIFIED IDEOGRAPH:'B176:45430:恿
CJK UNIFIED IDEOGRAPH:'B177:45431:患
CJK UNIFIED IDEOGRAPH:'B178:45432:悉
CJK UNIFIED IDEOGRAPH:'B179:45433:悠
CJK UNIFIED IDEOGRAPH:'B17A:45434:您
CJK UNIFIED IDEOGRAPH:'B17B:45435:惋
CJK UNIFIED IDEOGRAPH:'B17C:45436:悴
CJK UNIFIED IDEOGRAPH:'B17D:45437:惦
CJK UNIFIED IDEOGRAPH:'B17E:45438:悽
CJK UNIFIED IDEOGRAPH:'B1A1:45473:情
CJK UNIFIED IDEOGRAPH:'B1A2:45474:悻
CJK UNIFIED IDEOGRAPH:'B1A3:45475:悵
CJK UNIFIED IDEOGRAPH:'B1A4:45476:惜
CJK UNIFIED IDEOGRAPH:'B1A5:45477:悼
CJK UNIFIED IDEOGRAPH:'B1A6:45478:惘
CJK UNIFIED IDEOGRAPH:'B1A7:45479:惕
CJK UNIFIED IDEOGRAPH:'B1A8:45480:惆
CJK UNIFIED IDEOGRAPH:'B1A9:45481:惟
CJK UNIFIED IDEOGRAPH:'B1AA:45482:悸
CJK UNIFIED IDEOGRAPH:'B1AB:45483:惚
CJK UNIFIED IDEOGRAPH:'B1AC:45484:惇
CJK UNIFIED IDEOGRAPH:'B1AD:45485:戚
CJK UNIFIED IDEOGRAPH:'B1AE:45486:戛
CJK UNIFIED IDEOGRAPH:'B1AF:45487:扈
CJK UNIFIED IDEOGRAPH:'B1B0:45488:掠
CJK UNIFIED IDEOGRAPH:'B1B1:45489:控
CJK UNIFIED IDEOGRAPH:'B1B2:45490:捲
CJK UNIFIED IDEOGRAPH:'B1B3:45491:掖
CJK UNIFIED IDEOGRAPH:'B1B4:45492:探
CJK UNIFIED IDEOGRAPH:'B1B5:45493:接
CJK UNIFIED IDEOGRAPH:'B1B6:45494:捷
CJK UNIFIED IDEOGRAPH:'B1B7:45495:捧
CJK UNIFIED IDEOGRAPH:'B1B8:45496:掘
CJK UNIFIED IDEOGRAPH:'B1B9:45497:措
CJK UNIFIED IDEOGRAPH:'B1BA:45498:捱
CJK UNIFIED IDEOGRAPH:'B1BB:45499:掩
CJK UNIFIED IDEOGRAPH:'B1BC:45500:掉
CJK UNIFIED IDEOGRAPH:'B1BD:45501:掃
CJK UNIFIED IDEOGRAPH:'B1BE:45502:掛
CJK UNIFIED IDEOGRAPH:'B1BF:45503:捫
CJK UNIFIED IDEOGRAPH:'B1C0:45504:推
CJK UNIFIED IDEOGRAPH:'B1C1:45505:掄
CJK UNIFIED IDEOGRAPH:'B1C2:45506:授
CJK UNIFIED IDEOGRAPH:'B1C3:45507:掙
CJK UNIFIED IDEOGRAPH:'B1C4:45508:採
CJK UNIFIED IDEOGRAPH:'B1C5:45509:掬
CJK UNIFIED IDEOGRAPH:'B1C6:45510:排
CJK UNIFIED IDEOGRAPH:'B1C7:45511:掏
CJK UNIFIED IDEOGRAPH:'B1C8:45512:掀
CJK UNIFIED IDEOGRAPH:'B1C9:45513:捻
CJK UNIFIED IDEOGRAPH:'B1CA:45514:捩
CJK UNIFIED IDEOGRAPH:'B1CB:45515:捨
CJK UNIFIED IDEOGRAPH:'B1CC:45516:捺
CJK UNIFIED IDEOGRAPH:'B1CD:45517:敝
CJK UNIFIED IDEOGRAPH:'B1CE:45518:敖
CJK UNIFIED IDEOGRAPH:'B1CF:45519:救
CJK UNIFIED IDEOGRAPH:'B1D0:45520:教
CJK UNIFIED IDEOGRAPH:'B1D1:45521:敗
CJK UNIFIED IDEOGRAPH:'B1D2:45522:啟
CJK UNIFIED IDEOGRAPH:'B1D3:45523:敏
CJK UNIFIED IDEOGRAPH:'B1D4:45524:敘
CJK UNIFIED IDEOGRAPH:'B1D5:45525:敕
CJK UNIFIED IDEOGRAPH:'B1D6:45526:敔
CJK UNIFIED IDEOGRAPH:'B1D7:45527:斜
CJK UNIFIED IDEOGRAPH:'B1D8:45528:斛
CJK UNIFIED IDEOGRAPH:'B1D9:45529:斬
CJK UNIFIED IDEOGRAPH:'B1DA:45530:族
CJK UNIFIED IDEOGRAPH:'B1DB:45531:旋
CJK UNIFIED IDEOGRAPH:'B1DC:45532:旌
CJK UNIFIED IDEOGRAPH:'B1DD:45533:旎
CJK UNIFIED IDEOGRAPH:'B1DE:45534:晝
CJK UNIFIED IDEOGRAPH:'B1DF:45535:晚
CJK UNIFIED IDEOGRAPH:'B1E0:45536:晤
CJK UNIFIED IDEOGRAPH:'B1E1:45537:晨
CJK UNIFIED IDEOGRAPH:'B1E2:45538:晦
CJK UNIFIED IDEOGRAPH:'B1E3:45539:晞
CJK UNIFIED IDEOGRAPH:'B1E4:45540:曹
CJK UNIFIED IDEOGRAPH:'B1E5:45541:勗
CJK UNIFIED IDEOGRAPH:'B1E6:45542:望
CJK UNIFIED IDEOGRAPH:'B1E7:45543:梁
CJK UNIFIED IDEOGRAPH:'B1E8:45544:梯
CJK UNIFIED IDEOGRAPH:'B1E9:45545:梢
CJK UNIFIED IDEOGRAPH:'B1EA:45546:梓
CJK UNIFIED IDEOGRAPH:'B1EB:45547:梵
CJK UNIFIED IDEOGRAPH:'B1EC:45548:桿
CJK UNIFIED IDEOGRAPH:'B1ED:45549:桶
CJK UNIFIED IDEOGRAPH:'B1EE:45550:梱
CJK UNIFIED IDEOGRAPH:'B1EF:45551:梧
CJK UNIFIED IDEOGRAPH:'B1F0:45552:梗
CJK UNIFIED IDEOGRAPH:'B1F1:45553:械
CJK UNIFIED IDEOGRAPH:'B1F2:45554:梃
CJK UNIFIED IDEOGRAPH:'B1F3:45555:棄
CJK UNIFIED IDEOGRAPH:'B1F4:45556:梭
CJK UNIFIED IDEOGRAPH:'B1F5:45557:梆
CJK UNIFIED IDEOGRAPH:'B1F6:45558:梅
CJK UNIFIED IDEOGRAPH:'B1F7:45559:梔
CJK UNIFIED IDEOGRAPH:'B1F8:45560:條
CJK UNIFIED IDEOGRAPH:'B1F9:45561:梨
CJK UNIFIED IDEOGRAPH:'B1FA:45562:梟
CJK UNIFIED IDEOGRAPH:'B1FB:45563:梡
CJK UNIFIED IDEOGRAPH:'B1FC:45564:梂
CJK UNIFIED IDEOGRAPH:'B1FD:45565:欲
CJK UNIFIED IDEOGRAPH:'B1FE:45566:殺
CJK UNIFIED IDEOGRAPH:'B240:45632:毫
CJK UNIFIED IDEOGRAPH:'B241:45633:毬
CJK UNIFIED IDEOGRAPH:'B242:45634:氫
CJK UNIFIED IDEOGRAPH:'B243:45635:涎
CJK UNIFIED IDEOGRAPH:'B244:45636:涼
CJK UNIFIED IDEOGRAPH:'B245:45637:淳
CJK UNIFIED IDEOGRAPH:'B246:45638:淙
CJK UNIFIED IDEOGRAPH:'B247:45639:液
CJK UNIFIED IDEOGRAPH:'B248:45640:淡
CJK UNIFIED IDEOGRAPH:'B249:45641:淌
CJK UNIFIED IDEOGRAPH:'B24A:45642:淤
CJK UNIFIED IDEOGRAPH:'B24B:45643:添
CJK UNIFIED IDEOGRAPH:'B24C:45644:淺
CJK UNIFIED IDEOGRAPH:'B24D:45645:清
CJK UNIFIED IDEOGRAPH:'B24E:45646:淇
CJK UNIFIED IDEOGRAPH:'B24F:45647:淋
CJK UNIFIED IDEOGRAPH:'B250:45648:涯
CJK UNIFIED IDEOGRAPH:'B251:45649:淑
CJK UNIFIED IDEOGRAPH:'B252:45650:涮
CJK UNIFIED IDEOGRAPH:'B253:45651:淞
CJK UNIFIED IDEOGRAPH:'B254:45652:淹
CJK UNIFIED IDEOGRAPH:'B255:45653:涸
CJK UNIFIED IDEOGRAPH:'B256:45654:混
CJK UNIFIED IDEOGRAPH:'B257:45655:淵
CJK UNIFIED IDEOGRAPH:'B258:45656:淅
CJK UNIFIED IDEOGRAPH:'B259:45657:淒
CJK UNIFIED IDEOGRAPH:'B25A:45658:渚
CJK UNIFIED IDEOGRAPH:'B25B:45659:涵
CJK UNIFIED IDEOGRAPH:'B25C:45660:淚
CJK UNIFIED IDEOGRAPH:'B25D:45661:淫
CJK UNIFIED IDEOGRAPH:'B25E:45662:淘
CJK UNIFIED IDEOGRAPH:'B25F:45663:淪
CJK UNIFIED IDEOGRAPH:'B260:45664:深
CJK UNIFIED IDEOGRAPH:'B261:45665:淮
CJK UNIFIED IDEOGRAPH:'B262:45666:淨
CJK UNIFIED IDEOGRAPH:'B263:45667:淆
CJK UNIFIED IDEOGRAPH:'B264:45668:淄
CJK UNIFIED IDEOGRAPH:'B265:45669:涪
CJK UNIFIED IDEOGRAPH:'B266:45670:淬
CJK UNIFIED IDEOGRAPH:'B267:45671:涿
CJK UNIFIED IDEOGRAPH:'B268:45672:淦
CJK UNIFIED IDEOGRAPH:'B269:45673:烹
CJK UNIFIED IDEOGRAPH:'B26A:45674:焉
CJK UNIFIED IDEOGRAPH:'B26B:45675:焊
CJK UNIFIED IDEOGRAPH:'B26C:45676:烽
CJK UNIFIED IDEOGRAPH:'B26D:45677:烯
CJK UNIFIED IDEOGRAPH:'B26E:45678:爽
CJK UNIFIED IDEOGRAPH:'B26F:45679:牽
CJK UNIFIED IDEOGRAPH:'B270:45680:犁
CJK UNIFIED IDEOGRAPH:'B271:45681:猜
CJK UNIFIED IDEOGRAPH:'B272:45682:猛
CJK UNIFIED IDEOGRAPH:'B273:45683:猖
CJK UNIFIED IDEOGRAPH:'B274:45684:猓
CJK UNIFIED IDEOGRAPH:'B275:45685:猙
CJK UNIFIED IDEOGRAPH:'B276:45686:率
CJK UNIFIED IDEOGRAPH:'B277:45687:琅
CJK UNIFIED IDEOGRAPH:'B278:45688:琊
CJK UNIFIED IDEOGRAPH:'B279:45689:球
CJK UNIFIED IDEOGRAPH:'B27A:45690:理
CJK UNIFIED IDEOGRAPH:'B27B:45691:現
CJK UNIFIED IDEOGRAPH:'B27C:45692:琍
CJK UNIFIED IDEOGRAPH:'B27D:45693:瓠
CJK UNIFIED IDEOGRAPH:'B27E:45694:瓶
CJK UNIFIED IDEOGRAPH:'B2A1:45729:瓷
CJK UNIFIED IDEOGRAPH:'B2A2:45730:甜
CJK UNIFIED IDEOGRAPH:'B2A3:45731:產
CJK UNIFIED IDEOGRAPH:'B2A4:45732:略
CJK UNIFIED IDEOGRAPH:'B2A5:45733:畦
CJK UNIFIED IDEOGRAPH:'B2A6:45734:畢
CJK UNIFIED IDEOGRAPH:'B2A7:45735:異
CJK UNIFIED IDEOGRAPH:'B2A8:45736:疏
CJK UNIFIED IDEOGRAPH:'B2A9:45737:痔
CJK UNIFIED IDEOGRAPH:'B2AA:45738:痕
CJK UNIFIED IDEOGRAPH:'B2AB:45739:疵
CJK UNIFIED IDEOGRAPH:'B2AC:45740:痊
CJK UNIFIED IDEOGRAPH:'B2AD:45741:痍
CJK UNIFIED IDEOGRAPH:'B2AE:45742:皎
CJK UNIFIED IDEOGRAPH:'B2AF:45743:盔
CJK UNIFIED IDEOGRAPH:'B2B0:45744:盒
CJK UNIFIED IDEOGRAPH:'B2B1:45745:盛
CJK UNIFIED IDEOGRAPH:'B2B2:45746:眷
CJK UNIFIED IDEOGRAPH:'B2B3:45747:眾
CJK UNIFIED IDEOGRAPH:'B2B4:45748:眼
CJK UNIFIED IDEOGRAPH:'B2B5:45749:眶
CJK UNIFIED IDEOGRAPH:'B2B6:45750:眸
CJK UNIFIED IDEOGRAPH:'B2B7:45751:眺
CJK UNIFIED IDEOGRAPH:'B2B8:45752:硫
CJK UNIFIED IDEOGRAPH:'B2B9:45753:硃
CJK UNIFIED IDEOGRAPH:'B2BA:45754:硎
CJK UNIFIED IDEOGRAPH:'B2BB:45755:祥
CJK UNIFIED IDEOGRAPH:'B2BC:45756:票
CJK UNIFIED IDEOGRAPH:'B2BD:45757:祭
CJK UNIFIED IDEOGRAPH:'B2BE:45758:移
CJK UNIFIED IDEOGRAPH:'B2BF:45759:窒
CJK UNIFIED IDEOGRAPH:'B2C0:45760:窕
CJK UNIFIED IDEOGRAPH:'B2C1:45761:笠
CJK UNIFIED IDEOGRAPH:'B2C2:45762:笨
CJK UNIFIED IDEOGRAPH:'B2C3:45763:笛
CJK UNIFIED IDEOGRAPH:'B2C4:45764:第
CJK UNIFIED IDEOGRAPH:'B2C5:45765:符
CJK UNIFIED IDEOGRAPH:'B2C6:45766:笙
CJK UNIFIED IDEOGRAPH:'B2C7:45767:笞
CJK UNIFIED IDEOGRAPH:'B2C8:45768:笮
CJK UNIFIED IDEOGRAPH:'B2C9:45769:粒
CJK UNIFIED IDEOGRAPH:'B2CA:45770:粗
CJK UNIFIED IDEOGRAPH:'B2CB:45771:粕
CJK UNIFIED IDEOGRAPH:'B2CC:45772:絆
CJK UNIFIED IDEOGRAPH:'B2CD:45773:絃
CJK UNIFIED IDEOGRAPH:'B2CE:45774:統
CJK UNIFIED IDEOGRAPH:'B2CF:45775:紮
CJK UNIFIED IDEOGRAPH:'B2D0:45776:紹
CJK UNIFIED IDEOGRAPH:'B2D1:45777:紼
CJK UNIFIED IDEOGRAPH:'B2D2:45778:絀
CJK UNIFIED IDEOGRAPH:'B2D3:45779:細
CJK UNIFIED IDEOGRAPH:'B2D4:45780:紳
CJK UNIFIED IDEOGRAPH:'B2D5:45781:組
CJK UNIFIED IDEOGRAPH:'B2D6:45782:累
CJK UNIFIED IDEOGRAPH:'B2D7:45783:終
CJK UNIFIED IDEOGRAPH:'B2D8:45784:紲
CJK UNIFIED IDEOGRAPH:'B2D9:45785:紱
CJK UNIFIED IDEOGRAPH:'B2DA:45786:缽
CJK UNIFIED IDEOGRAPH:'B2DB:45787:羞
CJK UNIFIED IDEOGRAPH:'B2DC:45788:羚
CJK UNIFIED IDEOGRAPH:'B2DD:45789:翌
CJK UNIFIED IDEOGRAPH:'B2DE:45790:翎
CJK UNIFIED IDEOGRAPH:'B2DF:45791:習
CJK UNIFIED IDEOGRAPH:'B2E0:45792:耜
CJK UNIFIED IDEOGRAPH:'B2E1:45793:聊
CJK UNIFIED IDEOGRAPH:'B2E2:45794:聆
CJK UNIFIED IDEOGRAPH:'B2E3:45795:脯
CJK UNIFIED IDEOGRAPH:'B2E4:45796:脖
CJK UNIFIED IDEOGRAPH:'B2E5:45797:脣
CJK UNIFIED IDEOGRAPH:'B2E6:45798:脫
CJK UNIFIED IDEOGRAPH:'B2E7:45799:脩
CJK UNIFIED IDEOGRAPH:'B2E8:45800:脰
CJK UNIFIED IDEOGRAPH:'B2E9:45801:脤
CJK UNIFIED IDEOGRAPH:'B2EA:45802:舂
CJK UNIFIED IDEOGRAPH:'B2EB:45803:舵
CJK UNIFIED IDEOGRAPH:'B2EC:45804:舷
CJK UNIFIED IDEOGRAPH:'B2ED:45805:舶
CJK UNIFIED IDEOGRAPH:'B2EE:45806:船
CJK UNIFIED IDEOGRAPH:'B2EF:45807:莎
CJK UNIFIED IDEOGRAPH:'B2F0:45808:莞
CJK UNIFIED IDEOGRAPH:'B2F1:45809:莘
CJK UNIFIED IDEOGRAPH:'B2F2:45810:荸
CJK UNIFIED IDEOGRAPH:'B2F3:45811:莢
CJK UNIFIED IDEOGRAPH:'B2F4:45812:莖
CJK UNIFIED IDEOGRAPH:'B2F5:45813:莽
CJK UNIFIED IDEOGRAPH:'B2F6:45814:莫
CJK UNIFIED IDEOGRAPH:'B2F7:45815:莒
CJK UNIFIED IDEOGRAPH:'B2F8:45816:莊
CJK UNIFIED IDEOGRAPH:'B2F9:45817:莓
CJK UNIFIED IDEOGRAPH:'B2FA:45818:莉
CJK UNIFIED IDEOGRAPH:'B2FB:45819:莠
CJK UNIFIED IDEOGRAPH:'B2FC:45820:荷
CJK UNIFIED IDEOGRAPH:'B2FD:45821:荻
CJK UNIFIED IDEOGRAPH:'B2FE:45822:荼
CJK UNIFIED IDEOGRAPH:'B340:45888:莆
CJK UNIFIED IDEOGRAPH:'B341:45889:莧
CJK UNIFIED IDEOGRAPH:'B342:45890:處
CJK UNIFIED IDEOGRAPH:'B343:45891:彪
CJK UNIFIED IDEOGRAPH:'B344:45892:蛇
CJK UNIFIED IDEOGRAPH:'B345:45893:蛀
CJK UNIFIED IDEOGRAPH:'B346:45894:蚶
CJK UNIFIED IDEOGRAPH:'B347:45895:蛄
CJK UNIFIED IDEOGRAPH:'B348:45896:蚵
CJK UNIFIED IDEOGRAPH:'B349:45897:蛆
CJK UNIFIED IDEOGRAPH:'B34A:45898:蛋
CJK UNIFIED IDEOGRAPH:'B34B:45899:蚱
CJK UNIFIED IDEOGRAPH:'B34C:45900:蚯
CJK UNIFIED IDEOGRAPH:'B34D:45901:蛉
CJK UNIFIED IDEOGRAPH:'B34E:45902:術
CJK UNIFIED IDEOGRAPH:'B34F:45903:袞
CJK UNIFIED IDEOGRAPH:'B350:45904:袈
CJK UNIFIED IDEOGRAPH:'B351:45905:被
CJK UNIFIED IDEOGRAPH:'B352:45906:袒
CJK UNIFIED IDEOGRAPH:'B353:45907:袖
CJK UNIFIED IDEOGRAPH:'B354:45908:袍
CJK UNIFIED IDEOGRAPH:'B355:45909:袋
CJK UNIFIED IDEOGRAPH:'B356:45910:覓
CJK UNIFIED IDEOGRAPH:'B357:45911:規
CJK UNIFIED IDEOGRAPH:'B358:45912:訪
CJK UNIFIED IDEOGRAPH:'B359:45913:訝
CJK UNIFIED IDEOGRAPH:'B35A:45914:訣
CJK UNIFIED IDEOGRAPH:'B35B:45915:訥
CJK UNIFIED IDEOGRAPH:'B35C:45916:許
CJK UNIFIED IDEOGRAPH:'B35D:45917:設
CJK UNIFIED IDEOGRAPH:'B35E:45918:訟
CJK UNIFIED IDEOGRAPH:'B35F:45919:訛
CJK UNIFIED IDEOGRAPH:'B360:45920:訢
CJK UNIFIED IDEOGRAPH:'B361:45921:豉
CJK UNIFIED IDEOGRAPH:'B362:45922:豚
CJK UNIFIED IDEOGRAPH:'B363:45923:販
CJK UNIFIED IDEOGRAPH:'B364:45924:責
CJK UNIFIED IDEOGRAPH:'B365:45925:貫
CJK UNIFIED IDEOGRAPH:'B366:45926:貨
CJK UNIFIED IDEOGRAPH:'B367:45927:貪
CJK UNIFIED IDEOGRAPH:'B368:45928:貧
CJK UNIFIED IDEOGRAPH:'B369:45929:赧
CJK UNIFIED IDEOGRAPH:'B36A:45930:赦
CJK UNIFIED IDEOGRAPH:'B36B:45931:趾
CJK UNIFIED IDEOGRAPH:'B36C:45932:趺
CJK UNIFIED IDEOGRAPH:'B36D:45933:軛
CJK UNIFIED IDEOGRAPH:'B36E:45934:軟
CJK UNIFIED IDEOGRAPH:'B36F:45935:這
CJK UNIFIED IDEOGRAPH:'B370:45936:逍
CJK UNIFIED IDEOGRAPH:'B371:45937:通
CJK UNIFIED IDEOGRAPH:'B372:45938:逗
CJK UNIFIED IDEOGRAPH:'B373:45939:連
CJK UNIFIED IDEOGRAPH:'B374:45940:速
CJK UNIFIED IDEOGRAPH:'B375:45941:逝
CJK UNIFIED IDEOGRAPH:'B376:45942:逐
CJK UNIFIED IDEOGRAPH:'B377:45943:逕
CJK UNIFIED IDEOGRAPH:'B378:45944:逞
CJK UNIFIED IDEOGRAPH:'B379:45945:造
CJK UNIFIED IDEOGRAPH:'B37A:45946:透
CJK UNIFIED IDEOGRAPH:'B37B:45947:逢
CJK UNIFIED IDEOGRAPH:'B37C:45948:逖
CJK UNIFIED IDEOGRAPH:'B37D:45949:逛
CJK UNIFIED IDEOGRAPH:'B37E:45950:途
CJK UNIFIED IDEOGRAPH:'B3A1:45985:部
CJK UNIFIED IDEOGRAPH:'B3A2:45986:郭
CJK UNIFIED IDEOGRAPH:'B3A3:45987:都
CJK UNIFIED IDEOGRAPH:'B3A4:45988:酗
CJK UNIFIED IDEOGRAPH:'B3A5:45989:野
CJK UNIFIED IDEOGRAPH:'B3A6:45990:釵
CJK UNIFIED IDEOGRAPH:'B3A7:45991:釦
CJK UNIFIED IDEOGRAPH:'B3A8:45992:釣
CJK UNIFIED IDEOGRAPH:'B3A9:45993:釧
CJK UNIFIED IDEOGRAPH:'B3AA:45994:釭
CJK UNIFIED IDEOGRAPH:'B3AB:45995:釩
CJK UNIFIED IDEOGRAPH:'B3AC:45996:閉
CJK UNIFIED IDEOGRAPH:'B3AD:45997:陪
CJK UNIFIED IDEOGRAPH:'B3AE:45998:陵
CJK UNIFIED IDEOGRAPH:'B3AF:45999:陳
CJK UNIFIED IDEOGRAPH:'B3B0:46000:陸
CJK UNIFIED IDEOGRAPH:'B3B1:46001:陰
CJK UNIFIED IDEOGRAPH:'B3B2:46002:陴
CJK UNIFIED IDEOGRAPH:'B3B3:46003:陶
CJK UNIFIED IDEOGRAPH:'B3B4:46004:陷
CJK UNIFIED IDEOGRAPH:'B3B5:46005:陬
CJK UNIFIED IDEOGRAPH:'B3B6:46006:雀
CJK UNIFIED IDEOGRAPH:'B3B7:46007:雪
CJK UNIFIED IDEOGRAPH:'B3B8:46008:雩
CJK UNIFIED IDEOGRAPH:'B3B9:46009:章
CJK UNIFIED IDEOGRAPH:'B3BA:46010:竟
CJK UNIFIED IDEOGRAPH:'B3BB:46011:頂
CJK UNIFIED IDEOGRAPH:'B3BC:46012:頃
CJK UNIFIED IDEOGRAPH:'B3BD:46013:魚
CJK UNIFIED IDEOGRAPH:'B3BE:46014:鳥
CJK UNIFIED IDEOGRAPH:'B3BF:46015:鹵
CJK UNIFIED IDEOGRAPH:'B3C0:46016:鹿
CJK UNIFIED IDEOGRAPH:'B3C1:46017:麥
CJK UNIFIED IDEOGRAPH:'B3C2:46018:麻
CJK UNIFIED IDEOGRAPH:'B3C3:46019:傢
CJK UNIFIED IDEOGRAPH:'B3C4:46020:傍
CJK UNIFIED IDEOGRAPH:'B3C5:46021:傅
CJK UNIFIED IDEOGRAPH:'B3C6:46022:備
CJK UNIFIED IDEOGRAPH:'B3C7:46023:傑
CJK UNIFIED IDEOGRAPH:'B3C8:46024:傀
CJK UNIFIED IDEOGRAPH:'B3C9:46025:傖
CJK UNIFIED IDEOGRAPH:'B3CA:46026:傘
CJK UNIFIED IDEOGRAPH:'B3CB:46027:傚
CJK UNIFIED IDEOGRAPH:'B3CC:46028:最
CJK UNIFIED IDEOGRAPH:'B3CD:46029:凱
CJK UNIFIED IDEOGRAPH:'B3CE:46030:割
CJK UNIFIED IDEOGRAPH:'B3CF:46031:剴
CJK UNIFIED IDEOGRAPH:'B3D0:46032:創
CJK UNIFIED IDEOGRAPH:'B3D1:46033:剩
CJK UNIFIED IDEOGRAPH:'B3D2:46034:勞
CJK UNIFIED IDEOGRAPH:'B3D3:46035:勝
CJK UNIFIED IDEOGRAPH:'B3D4:46036:勛
CJK UNIFIED IDEOGRAPH:'B3D5:46037:博
CJK UNIFIED IDEOGRAPH:'B3D6:46038:厥
CJK UNIFIED IDEOGRAPH:'B3D7:46039:啻
CJK UNIFIED IDEOGRAPH:'B3D8:46040:喀
CJK UNIFIED IDEOGRAPH:'B3D9:46041:喧
CJK UNIFIED IDEOGRAPH:'B3DA:46042:啼
CJK UNIFIED IDEOGRAPH:'B3DB:46043:喊
CJK UNIFIED IDEOGRAPH:'B3DC:46044:喝
CJK UNIFIED IDEOGRAPH:'B3DD:46045:喘
CJK UNIFIED IDEOGRAPH:'B3DE:46046:喂
CJK UNIFIED IDEOGRAPH:'B3DF:46047:喜
CJK UNIFIED IDEOGRAPH:'B3E0:46048:喪
CJK UNIFIED IDEOGRAPH:'B3E1:46049:喔
CJK UNIFIED IDEOGRAPH:'B3E2:46050:喇
CJK UNIFIED IDEOGRAPH:'B3E3:46051:喋
CJK UNIFIED IDEOGRAPH:'B3E4:46052:喃
CJK UNIFIED IDEOGRAPH:'B3E5:46053:喳
CJK UNIFIED IDEOGRAPH:'B3E6:46054:單
CJK UNIFIED IDEOGRAPH:'B3E7:46055:喟
CJK UNIFIED IDEOGRAPH:'B3E8:46056:唾
CJK UNIFIED IDEOGRAPH:'B3E9:46057:喲
CJK UNIFIED IDEOGRAPH:'B3EA:46058:喚
CJK UNIFIED IDEOGRAPH:'B3EB:46059:喻
CJK UNIFIED IDEOGRAPH:'B3EC:46060:喬
CJK UNIFIED IDEOGRAPH:'B3ED:46061:喱
CJK UNIFIED IDEOGRAPH:'B3EE:46062:啾
CJK UNIFIED IDEOGRAPH:'B3EF:46063:喉
CJK UNIFIED IDEOGRAPH:'B3F0:46064:喫
CJK UNIFIED IDEOGRAPH:'B3F1:46065:喙
CJK UNIFIED IDEOGRAPH:'B3F2:46066:圍
CJK UNIFIED IDEOGRAPH:'B3F3:46067:堯
CJK UNIFIED IDEOGRAPH:'B3F4:46068:堪
CJK UNIFIED IDEOGRAPH:'B3F5:46069:場
CJK UNIFIED IDEOGRAPH:'B3F6:46070:堤
CJK UNIFIED IDEOGRAPH:'B3F7:46071:堰
CJK UNIFIED IDEOGRAPH:'B3F8:46072:報
CJK UNIFIED IDEOGRAPH:'B3F9:46073:堡
CJK UNIFIED IDEOGRAPH:'B3FA:46074:堝
CJK UNIFIED IDEOGRAPH:'B3FB:46075:堠
CJK UNIFIED IDEOGRAPH:'B3FC:46076:壹
CJK UNIFIED IDEOGRAPH:'B3FD:46077:壺
CJK UNIFIED IDEOGRAPH:'B3FE:46078:奠
CJK UNIFIED IDEOGRAPH:'B440:46144:婷
CJK UNIFIED IDEOGRAPH:'B441:46145:媚
CJK UNIFIED IDEOGRAPH:'B442:46146:婿
CJK UNIFIED IDEOGRAPH:'B443:46147:媒
CJK UNIFIED IDEOGRAPH:'B444:46148:媛
CJK UNIFIED IDEOGRAPH:'B445:46149:媧
CJK UNIFIED IDEOGRAPH:'B446:46150:孳
CJK UNIFIED IDEOGRAPH:'B447:46151:孱
CJK UNIFIED IDEOGRAPH:'B448:46152:寒
CJK UNIFIED IDEOGRAPH:'B449:46153:富
CJK UNIFIED IDEOGRAPH:'B44A:46154:寓
CJK UNIFIED IDEOGRAPH:'B44B:46155:寐
CJK UNIFIED IDEOGRAPH:'B44C:46156:尊
CJK UNIFIED IDEOGRAPH:'B44D:46157:尋
CJK UNIFIED IDEOGRAPH:'B44E:46158:就
CJK UNIFIED IDEOGRAPH:'B44F:46159:嵌
CJK UNIFIED IDEOGRAPH:'B450:46160:嵐
CJK UNIFIED IDEOGRAPH:'B451:46161:崴
CJK UNIFIED IDEOGRAPH:'B452:46162:嵇
CJK UNIFIED IDEOGRAPH:'B453:46163:巽
CJK UNIFIED IDEOGRAPH:'B454:46164:幅
CJK UNIFIED IDEOGRAPH:'B455:46165:帽
CJK UNIFIED IDEOGRAPH:'B456:46166:幀
CJK UNIFIED IDEOGRAPH:'B457:46167:幃
CJK UNIFIED IDEOGRAPH:'B458:46168:幾
CJK UNIFIED IDEOGRAPH:'B459:46169:廊
CJK UNIFIED IDEOGRAPH:'B45A:46170:廁
CJK UNIFIED IDEOGRAPH:'B45B:46171:廂
CJK UNIFIED IDEOGRAPH:'B45C:46172:廄
CJK UNIFIED IDEOGRAPH:'B45D:46173:弼
CJK UNIFIED IDEOGRAPH:'B45E:46174:彭
CJK UNIFIED IDEOGRAPH:'B45F:46175:復
CJK UNIFIED IDEOGRAPH:'B460:46176:循
CJK UNIFIED IDEOGRAPH:'B461:46177:徨
CJK UNIFIED IDEOGRAPH:'B462:46178:惑
CJK UNIFIED IDEOGRAPH:'B463:46179:惡
CJK UNIFIED IDEOGRAPH:'B464:46180:悲
CJK UNIFIED IDEOGRAPH:'B465:46181:悶
CJK UNIFIED IDEOGRAPH:'B466:46182:惠
CJK UNIFIED IDEOGRAPH:'B467:46183:愜
CJK UNIFIED IDEOGRAPH:'B468:46184:愣
CJK UNIFIED IDEOGRAPH:'B469:46185:惺
CJK UNIFIED IDEOGRAPH:'B46A:46186:愕
CJK UNIFIED IDEOGRAPH:'B46B:46187:惰
CJK UNIFIED IDEOGRAPH:'B46C:46188:惻
CJK UNIFIED IDEOGRAPH:'B46D:46189:惴
CJK UNIFIED IDEOGRAPH:'B46E:46190:慨
CJK UNIFIED IDEOGRAPH:'B46F:46191:惱
CJK UNIFIED IDEOGRAPH:'B470:46192:愎
CJK UNIFIED IDEOGRAPH:'B471:46193:惶
CJK UNIFIED IDEOGRAPH:'B472:46194:愉
CJK UNIFIED IDEOGRAPH:'B473:46195:愀
CJK UNIFIED IDEOGRAPH:'B474:46196:愒
CJK UNIFIED IDEOGRAPH:'B475:46197:戟
CJK UNIFIED IDEOGRAPH:'B476:46198:扉
CJK UNIFIED IDEOGRAPH:'B477:46199:掣
CJK UNIFIED IDEOGRAPH:'B478:46200:掌
CJK UNIFIED IDEOGRAPH:'B479:46201:描
CJK UNIFIED IDEOGRAPH:'B47A:46202:揀
CJK UNIFIED IDEOGRAPH:'B47B:46203:揩
CJK UNIFIED IDEOGRAPH:'B47C:46204:揉
CJK UNIFIED IDEOGRAPH:'B47D:46205:揆
CJK UNIFIED IDEOGRAPH:'B47E:46206:揍
CJK UNIFIED IDEOGRAPH:'B4A1:46241:插
CJK UNIFIED IDEOGRAPH:'B4A2:46242:揣
CJK UNIFIED IDEOGRAPH:'B4A3:46243:提
CJK UNIFIED IDEOGRAPH:'B4A4:46244:握
CJK UNIFIED IDEOGRAPH:'B4A5:46245:揖
CJK UNIFIED IDEOGRAPH:'B4A6:46246:揭
CJK UNIFIED IDEOGRAPH:'B4A7:46247:揮
CJK UNIFIED IDEOGRAPH:'B4A8:46248:捶
CJK UNIFIED IDEOGRAPH:'B4A9:46249:援
CJK UNIFIED IDEOGRAPH:'B4AA:46250:揪
CJK UNIFIED IDEOGRAPH:'B4AB:46251:換
CJK UNIFIED IDEOGRAPH:'B4AC:46252:摒
CJK UNIFIED IDEOGRAPH:'B4AD:46253:揚
CJK UNIFIED IDEOGRAPH:'B4AE:46254:揹
CJK UNIFIED IDEOGRAPH:'B4AF:46255:敞
CJK UNIFIED IDEOGRAPH:'B4B0:46256:敦
CJK UNIFIED IDEOGRAPH:'B4B1:46257:敢
CJK UNIFIED IDEOGRAPH:'B4B2:46258:散
CJK UNIFIED IDEOGRAPH:'B4B3:46259:斑
CJK UNIFIED IDEOGRAPH:'B4B4:46260:斐
CJK UNIFIED IDEOGRAPH:'B4B5:46261:斯
CJK UNIFIED IDEOGRAPH:'B4B6:46262:普
CJK UNIFIED IDEOGRAPH:'B4B7:46263:晰
CJK UNIFIED IDEOGRAPH:'B4B8:46264:晴
CJK UNIFIED IDEOGRAPH:'B4B9:46265:晶
CJK UNIFIED IDEOGRAPH:'B4BA:46266:景
CJK UNIFIED IDEOGRAPH:'B4BB:46267:暑
CJK UNIFIED IDEOGRAPH:'B4BC:46268:智
CJK UNIFIED IDEOGRAPH:'B4BD:46269:晾
CJK UNIFIED IDEOGRAPH:'B4BE:46270:晷
CJK UNIFIED IDEOGRAPH:'B4BF:46271:曾
CJK UNIFIED IDEOGRAPH:'B4C0:46272:替
CJK UNIFIED IDEOGRAPH:'B4C1:46273:期
CJK UNIFIED IDEOGRAPH:'B4C2:46274:朝
CJK UNIFIED IDEOGRAPH:'B4C3:46275:棺
CJK UNIFIED IDEOGRAPH:'B4C4:46276:棕
CJK UNIFIED IDEOGRAPH:'B4C5:46277:棠
CJK UNIFIED IDEOGRAPH:'B4C6:46278:棘
CJK UNIFIED IDEOGRAPH:'B4C7:46279:棗
CJK UNIFIED IDEOGRAPH:'B4C8:46280:椅
CJK UNIFIED IDEOGRAPH:'B4C9:46281:棟
CJK UNIFIED IDEOGRAPH:'B4CA:46282:棵
CJK UNIFIED IDEOGRAPH:'B4CB:46283:森
CJK UNIFIED IDEOGRAPH:'B4CC:46284:棧
CJK UNIFIED IDEOGRAPH:'B4CD:46285:棹
CJK UNIFIED IDEOGRAPH:'B4CE:46286:棒
CJK UNIFIED IDEOGRAPH:'B4CF:46287:棲
CJK UNIFIED IDEOGRAPH:'B4D0:46288:棣
CJK UNIFIED IDEOGRAPH:'B4D1:46289:棋
CJK UNIFIED IDEOGRAPH:'B4D2:46290:棍
CJK UNIFIED IDEOGRAPH:'B4D3:46291:植
CJK UNIFIED IDEOGRAPH:'B4D4:46292:椒
CJK UNIFIED IDEOGRAPH:'B4D5:46293:椎
CJK UNIFIED IDEOGRAPH:'B4D6:46294:棉
CJK UNIFIED IDEOGRAPH:'B4D7:46295:棚
CJK UNIFIED IDEOGRAPH:'B4D8:46296:楮
CJK UNIFIED IDEOGRAPH:'B4D9:46297:棻
CJK UNIFIED IDEOGRAPH:'B4DA:46298:款
CJK UNIFIED IDEOGRAPH:'B4DB:46299:欺
CJK UNIFIED IDEOGRAPH:'B4DC:46300:欽
CJK UNIFIED IDEOGRAPH:'B4DD:46301:殘
CJK UNIFIED IDEOGRAPH:'B4DE:46302:殖
CJK UNIFIED IDEOGRAPH:'B4DF:46303:殼
CJK UNIFIED IDEOGRAPH:'B4E0:46304:毯
CJK UNIFIED IDEOGRAPH:'B4E1:46305:氮
CJK UNIFIED IDEOGRAPH:'B4E2:46306:氯
CJK UNIFIED IDEOGRAPH:'B4E3:46307:氬
CJK UNIFIED IDEOGRAPH:'B4E4:46308:港
CJK UNIFIED IDEOGRAPH:'B4E5:46309:游
CJK UNIFIED IDEOGRAPH:'B4E6:46310:湔
CJK UNIFIED IDEOGRAPH:'B4E7:46311:渡
CJK UNIFIED IDEOGRAPH:'B4E8:46312:渲
CJK UNIFIED IDEOGRAPH:'B4E9:46313:湧
CJK UNIFIED IDEOGRAPH:'B4EA:46314:湊
CJK UNIFIED IDEOGRAPH:'B4EB:46315:渠
CJK UNIFIED IDEOGRAPH:'B4EC:46316:渥
CJK UNIFIED IDEOGRAPH:'B4ED:46317:渣
CJK UNIFIED IDEOGRAPH:'B4EE:46318:減
CJK UNIFIED IDEOGRAPH:'B4EF:46319:湛
CJK UNIFIED IDEOGRAPH:'B4F0:46320:湘
CJK UNIFIED IDEOGRAPH:'B4F1:46321:渤
CJK UNIFIED IDEOGRAPH:'B4F2:46322:湖
CJK UNIFIED IDEOGRAPH:'B4F3:46323:湮
CJK UNIFIED IDEOGRAPH:'B4F4:46324:渭
CJK UNIFIED IDEOGRAPH:'B4F5:46325:渦
CJK UNIFIED IDEOGRAPH:'B4F6:46326:湯
CJK UNIFIED IDEOGRAPH:'B4F7:46327:渴
CJK UNIFIED IDEOGRAPH:'B4F8:46328:湍
CJK UNIFIED IDEOGRAPH:'B4F9:46329:渺
CJK UNIFIED IDEOGRAPH:'B4FA:46330:測
CJK UNIFIED IDEOGRAPH:'B4FB:46331:湃
CJK UNIFIED IDEOGRAPH:'B4FC:46332:渝
CJK UNIFIED IDEOGRAPH:'B4FD:46333:渾
CJK UNIFIED IDEOGRAPH:'B4FE:46334:滋
CJK UNIFIED IDEOGRAPH:'B540:46400:溉
CJK UNIFIED IDEOGRAPH:'B541:46401:渙
CJK UNIFIED IDEOGRAPH:'B542:46402:湎
CJK UNIFIED IDEOGRAPH:'B543:46403:湣
CJK UNIFIED IDEOGRAPH:'B544:46404:湄
CJK UNIFIED IDEOGRAPH:'B545:46405:湲
CJK UNIFIED IDEOGRAPH:'B546:46406:湩
CJK UNIFIED IDEOGRAPH:'B547:46407:湟
CJK UNIFIED IDEOGRAPH:'B548:46408:焙
CJK UNIFIED IDEOGRAPH:'B549:46409:焚
CJK UNIFIED IDEOGRAPH:'B54A:46410:焦
CJK UNIFIED IDEOGRAPH:'B54B:46411:焰
CJK UNIFIED IDEOGRAPH:'B54C:46412:無
CJK UNIFIED IDEOGRAPH:'B54D:46413:然
CJK UNIFIED IDEOGRAPH:'B54E:46414:煮
CJK UNIFIED IDEOGRAPH:'B54F:46415:焜
CJK UNIFIED IDEOGRAPH:'B550:46416:牌
CJK UNIFIED IDEOGRAPH:'B551:46417:犄
CJK UNIFIED IDEOGRAPH:'B552:46418:犀
CJK UNIFIED IDEOGRAPH:'B553:46419:猶
CJK UNIFIED IDEOGRAPH:'B554:46420:猥
CJK UNIFIED IDEOGRAPH:'B555:46421:猴
CJK UNIFIED IDEOGRAPH:'B556:46422:猩
CJK UNIFIED IDEOGRAPH:'B557:46423:琺
CJK UNIFIED IDEOGRAPH:'B558:46424:琪
CJK UNIFIED IDEOGRAPH:'B559:46425:琳
CJK UNIFIED IDEOGRAPH:'B55A:46426:琢
CJK UNIFIED IDEOGRAPH:'B55B:46427:琥
CJK UNIFIED IDEOGRAPH:'B55C:46428:琵
CJK UNIFIED IDEOGRAPH:'B55D:46429:琶
CJK UNIFIED IDEOGRAPH:'B55E:46430:琴
CJK UNIFIED IDEOGRAPH:'B55F:46431:琯
CJK UNIFIED IDEOGRAPH:'B560:46432:琛
CJK UNIFIED IDEOGRAPH:'B561:46433:琦
CJK UNIFIED IDEOGRAPH:'B562:46434:琨
CJK UNIFIED IDEOGRAPH:'B563:46435:甥
CJK UNIFIED IDEOGRAPH:'B564:46436:甦
CJK UNIFIED IDEOGRAPH:'B565:46437:畫
CJK UNIFIED IDEOGRAPH:'B566:46438:番
CJK UNIFIED IDEOGRAPH:'B567:46439:痢
CJK UNIFIED IDEOGRAPH:'B568:46440:痛
CJK UNIFIED IDEOGRAPH:'B569:46441:痣
CJK UNIFIED IDEOGRAPH:'B56A:46442:痙
CJK UNIFIED IDEOGRAPH:'B56B:46443:痘
CJK UNIFIED IDEOGRAPH:'B56C:46444:痞
CJK UNIFIED IDEOGRAPH:'B56D:46445:痠
CJK UNIFIED IDEOGRAPH:'B56E:46446:登
CJK UNIFIED IDEOGRAPH:'B56F:46447:發
CJK UNIFIED IDEOGRAPH:'B570:46448:皖
CJK UNIFIED IDEOGRAPH:'B571:46449:皓
CJK UNIFIED IDEOGRAPH:'B572:46450:皴
CJK UNIFIED IDEOGRAPH:'B573:46451:盜
CJK UNIFIED IDEOGRAPH:'B574:46452:睏
CJK UNIFIED IDEOGRAPH:'B575:46453:短
CJK UNIFIED IDEOGRAPH:'B576:46454:硝
CJK UNIFIED IDEOGRAPH:'B577:46455:硬
CJK UNIFIED IDEOGRAPH:'B578:46456:硯
CJK UNIFIED IDEOGRAPH:'B579:46457:稍
CJK UNIFIED IDEOGRAPH:'B57A:46458:稈
CJK UNIFIED IDEOGRAPH:'B57B:46459:程
CJK UNIFIED IDEOGRAPH:'B57C:46460:稅
CJK UNIFIED IDEOGRAPH:'B57D:46461:稀
CJK UNIFIED IDEOGRAPH:'B57E:46462:窘
CJK UNIFIED IDEOGRAPH:'B5A1:46497:窗
CJK UNIFIED IDEOGRAPH:'B5A2:46498:窖
CJK UNIFIED IDEOGRAPH:'B5A3:46499:童
CJK UNIFIED IDEOGRAPH:'B5A4:46500:竣
CJK UNIFIED IDEOGRAPH:'B5A5:46501:等
CJK UNIFIED IDEOGRAPH:'B5A6:46502:策
CJK UNIFIED IDEOGRAPH:'B5A7:46503:筆
CJK UNIFIED IDEOGRAPH:'B5A8:46504:筐
CJK UNIFIED IDEOGRAPH:'B5A9:46505:筒
CJK UNIFIED IDEOGRAPH:'B5AA:46506:答
CJK UNIFIED IDEOGRAPH:'B5AB:46507:筍
CJK UNIFIED IDEOGRAPH:'B5AC:46508:筋
CJK UNIFIED IDEOGRAPH:'B5AD:46509:筏
CJK UNIFIED IDEOGRAPH:'B5AE:46510:筑
CJK UNIFIED IDEOGRAPH:'B5AF:46511:粟
CJK UNIFIED IDEOGRAPH:'B5B0:46512:粥
CJK UNIFIED IDEOGRAPH:'B5B1:46513:絞
CJK UNIFIED IDEOGRAPH:'B5B2:46514:結
CJK UNIFIED IDEOGRAPH:'B5B3:46515:絨
CJK UNIFIED IDEOGRAPH:'B5B4:46516:絕
CJK UNIFIED IDEOGRAPH:'B5B5:46517:紫
CJK UNIFIED IDEOGRAPH:'B5B6:46518:絮
CJK UNIFIED IDEOGRAPH:'B5B7:46519:絲
CJK UNIFIED IDEOGRAPH:'B5B8:46520:絡
CJK UNIFIED IDEOGRAPH:'B5B9:46521:給
CJK UNIFIED IDEOGRAPH:'B5BA:46522:絢
CJK UNIFIED IDEOGRAPH:'B5BB:46523:絰
CJK UNIFIED IDEOGRAPH:'B5BC:46524:絳
CJK UNIFIED IDEOGRAPH:'B5BD:46525:善
CJK UNIFIED IDEOGRAPH:'B5BE:46526:翔
CJK UNIFIED IDEOGRAPH:'B5BF:46527:翕
CJK UNIFIED IDEOGRAPH:'B5C0:46528:耋
CJK UNIFIED IDEOGRAPH:'B5C1:46529:聒
CJK UNIFIED IDEOGRAPH:'B5C2:46530:肅
CJK UNIFIED IDEOGRAPH:'B5C3:46531:腕
CJK UNIFIED IDEOGRAPH:'B5C4:46532:腔
CJK UNIFIED IDEOGRAPH:'B5C5:46533:腋
CJK UNIFIED IDEOGRAPH:'B5C6:46534:腑
CJK UNIFIED IDEOGRAPH:'B5C7:46535:腎
CJK UNIFIED IDEOGRAPH:'B5C8:46536:脹
CJK UNIFIED IDEOGRAPH:'B5C9:46537:腆
CJK UNIFIED IDEOGRAPH:'B5CA:46538:脾
CJK UNIFIED IDEOGRAPH:'B5CB:46539:腌
CJK UNIFIED IDEOGRAPH:'B5CC:46540:腓
CJK UNIFIED IDEOGRAPH:'B5CD:46541:腴
CJK UNIFIED IDEOGRAPH:'B5CE:46542:舒
CJK UNIFIED IDEOGRAPH:'B5CF:46543:舜
CJK UNIFIED IDEOGRAPH:'B5D0:46544:菩
CJK UNIFIED IDEOGRAPH:'B5D1:46545:萃
CJK UNIFIED IDEOGRAPH:'B5D2:46546:菸
CJK UNIFIED IDEOGRAPH:'B5D3:46547:萍
CJK UNIFIED IDEOGRAPH:'B5D4:46548:菠
CJK UNIFIED IDEOGRAPH:'B5D5:46549:菅
CJK UNIFIED IDEOGRAPH:'B5D6:46550:萋
CJK UNIFIED IDEOGRAPH:'B5D7:46551:菁
CJK UNIFIED IDEOGRAPH:'B5D8:46552:華
CJK UNIFIED IDEOGRAPH:'B5D9:46553:菱
CJK UNIFIED IDEOGRAPH:'B5DA:46554:菴
CJK UNIFIED IDEOGRAPH:'B5DB:46555:著
CJK UNIFIED IDEOGRAPH:'B5DC:46556:萊
CJK UNIFIED IDEOGRAPH:'B5DD:46557:菰
CJK UNIFIED IDEOGRAPH:'B5DE:46558:萌
CJK UNIFIED IDEOGRAPH:'B5DF:46559:菌
CJK UNIFIED IDEOGRAPH:'B5E0:46560:菽
CJK UNIFIED IDEOGRAPH:'B5E1:46561:菲
CJK UNIFIED IDEOGRAPH:'B5E2:46562:菊
CJK UNIFIED IDEOGRAPH:'B5E3:46563:萸
CJK UNIFIED IDEOGRAPH:'B5E4:46564:萎
CJK UNIFIED IDEOGRAPH:'B5E5:46565:萄
CJK UNIFIED IDEOGRAPH:'B5E6:46566:菜
CJK UNIFIED IDEOGRAPH:'B5E7:46567:萇
CJK UNIFIED IDEOGRAPH:'B5E8:46568:菔
CJK UNIFIED IDEOGRAPH:'B5E9:46569:菟
CJK UNIFIED IDEOGRAPH:'B5EA:46570:虛
CJK UNIFIED IDEOGRAPH:'B5EB:46571:蛟
CJK UNIFIED IDEOGRAPH:'B5EC:46572:蛙
CJK UNIFIED IDEOGRAPH:'B5ED:46573:蛭
CJK UNIFIED IDEOGRAPH:'B5EE:46574:蛔
CJK UNIFIED IDEOGRAPH:'B5EF:46575:蛛
CJK UNIFIED IDEOGRAPH:'B5F0:46576:蛤
CJK UNIFIED IDEOGRAPH:'B5F1:46577:蛐
CJK UNIFIED IDEOGRAPH:'B5F2:46578:蛞
CJK UNIFIED IDEOGRAPH:'B5F3:46579:街
CJK UNIFIED IDEOGRAPH:'B5F4:46580:裁
CJK UNIFIED IDEOGRAPH:'B5F5:46581:裂
CJK UNIFIED IDEOGRAPH:'B5F6:46582:袱
CJK UNIFIED IDEOGRAPH:'B5F7:46583:覃
CJK UNIFIED IDEOGRAPH:'B5F8:46584:視
CJK UNIFIED IDEOGRAPH:'B5F9:46585:註
CJK UNIFIED IDEOGRAPH:'B5FA:46586:詠
CJK UNIFIED IDEOGRAPH:'B5FB:46587:評
CJK UNIFIED IDEOGRAPH:'B5FC:46588:詞
CJK UNIFIED IDEOGRAPH:'B5FD:46589:証
CJK UNIFIED IDEOGRAPH:'B5FE:46590:詁
CJK UNIFIED IDEOGRAPH:'B640:46656:詔
CJK UNIFIED IDEOGRAPH:'B641:46657:詛
CJK UNIFIED IDEOGRAPH:'B642:46658:詐
CJK UNIFIED IDEOGRAPH:'B643:46659:詆
CJK UNIFIED IDEOGRAPH:'B644:46660:訴
CJK UNIFIED IDEOGRAPH:'B645:46661:診
CJK UNIFIED IDEOGRAPH:'B646:46662:訶
CJK UNIFIED IDEOGRAPH:'B647:46663:詖
CJK UNIFIED IDEOGRAPH:'B648:46664:象
CJK UNIFIED IDEOGRAPH:'B649:46665:貂
CJK UNIFIED IDEOGRAPH:'B64A:46666:貯
CJK UNIFIED IDEOGRAPH:'B64B:46667:貼
CJK UNIFIED IDEOGRAPH:'B64C:46668:貳
CJK UNIFIED IDEOGRAPH:'B64D:46669:貽
CJK UNIFIED IDEOGRAPH:'B64E:46670:賁
CJK UNIFIED IDEOGRAPH:'B64F:46671:費
CJK UNIFIED IDEOGRAPH:'B650:46672:賀
CJK UNIFIED IDEOGRAPH:'B651:46673:貴
CJK UNIFIED IDEOGRAPH:'B652:46674:買
CJK UNIFIED IDEOGRAPH:'B653:46675:貶
CJK UNIFIED IDEOGRAPH:'B654:46676:貿
CJK UNIFIED IDEOGRAPH:'B655:46677:貸
CJK UNIFIED IDEOGRAPH:'B656:46678:越
CJK UNIFIED IDEOGRAPH:'B657:46679:超
CJK UNIFIED IDEOGRAPH:'B658:46680:趁
CJK UNIFIED IDEOGRAPH:'B659:46681:跎
CJK UNIFIED IDEOGRAPH:'B65A:46682:距
CJK UNIFIED IDEOGRAPH:'B65B:46683:跋
CJK UNIFIED IDEOGRAPH:'B65C:46684:跚
CJK UNIFIED IDEOGRAPH:'B65D:46685:跑
CJK UNIFIED IDEOGRAPH:'B65E:46686:跌
CJK UNIFIED IDEOGRAPH:'B65F:46687:跛
CJK UNIFIED IDEOGRAPH:'B660:46688:跆
CJK UNIFIED IDEOGRAPH:'B661:46689:軻
CJK UNIFIED IDEOGRAPH:'B662:46690:軸
CJK UNIFIED IDEOGRAPH:'B663:46691:軼
CJK UNIFIED IDEOGRAPH:'B664:46692:辜
CJK UNIFIED IDEOGRAPH:'B665:46693:逮
CJK UNIFIED IDEOGRAPH:'B666:46694:逵
CJK UNIFIED IDEOGRAPH:'B667:46695:週
CJK UNIFIED IDEOGRAPH:'B668:46696:逸
CJK UNIFIED IDEOGRAPH:'B669:46697:進
CJK UNIFIED IDEOGRAPH:'B66A:46698:逶
CJK UNIFIED IDEOGRAPH:'B66B:46699:鄂
CJK UNIFIED IDEOGRAPH:'B66C:46700:郵
CJK UNIFIED IDEOGRAPH:'B66D:46701:鄉
CJK UNIFIED IDEOGRAPH:'B66E:46702:郾
CJK UNIFIED IDEOGRAPH:'B66F:46703:酣
CJK UNIFIED IDEOGRAPH:'B670:46704:酥
CJK UNIFIED IDEOGRAPH:'B671:46705:量
CJK UNIFIED IDEOGRAPH:'B672:46706:鈔
CJK UNIFIED IDEOGRAPH:'B673:46707:鈕
CJK UNIFIED IDEOGRAPH:'B674:46708:鈣
CJK UNIFIED IDEOGRAPH:'B675:46709:鈉
CJK UNIFIED IDEOGRAPH:'B676:46710:鈞
CJK UNIFIED IDEOGRAPH:'B677:46711:鈍
CJK UNIFIED IDEOGRAPH:'B678:46712:鈐
CJK UNIFIED IDEOGRAPH:'B679:46713:鈇
CJK UNIFIED IDEOGRAPH:'B67A:46714:鈑
CJK UNIFIED IDEOGRAPH:'B67B:46715:閔
CJK UNIFIED IDEOGRAPH:'B67C:46716:閏
CJK UNIFIED IDEOGRAPH:'B67D:46717:開
CJK UNIFIED IDEOGRAPH:'B67E:46718:閑
CJK UNIFIED IDEOGRAPH:'B6A1:46753:間
CJK UNIFIED IDEOGRAPH:'B6A2:46754:閒
CJK UNIFIED IDEOGRAPH:'B6A3:46755:閎
CJK UNIFIED IDEOGRAPH:'B6A4:46756:隊
CJK UNIFIED IDEOGRAPH:'B6A5:46757:階
CJK UNIFIED IDEOGRAPH:'B6A6:46758:隋
CJK UNIFIED IDEOGRAPH:'B6A7:46759:陽
CJK UNIFIED IDEOGRAPH:'B6A8:46760:隅
CJK UNIFIED IDEOGRAPH:'B6A9:46761:隆
CJK UNIFIED IDEOGRAPH:'B6AA:46762:隍
CJK UNIFIED IDEOGRAPH:'B6AB:46763:陲
CJK UNIFIED IDEOGRAPH:'B6AC:46764:隄
CJK UNIFIED IDEOGRAPH:'B6AD:46765:雁
CJK UNIFIED IDEOGRAPH:'B6AE:46766:雅
CJK UNIFIED IDEOGRAPH:'B6AF:46767:雄
CJK UNIFIED IDEOGRAPH:'B6B0:46768:集
CJK UNIFIED IDEOGRAPH:'B6B1:46769:雇
CJK UNIFIED IDEOGRAPH:'B6B2:46770:雯
CJK UNIFIED IDEOGRAPH:'B6B3:46771:雲
CJK UNIFIED IDEOGRAPH:'B6B4:46772:韌
CJK UNIFIED IDEOGRAPH:'B6B5:46773:項
CJK UNIFIED IDEOGRAPH:'B6B6:46774:順
CJK UNIFIED IDEOGRAPH:'B6B7:46775:須
CJK UNIFIED IDEOGRAPH:'B6B8:46776:飧
CJK UNIFIED IDEOGRAPH:'B6B9:46777:飪
CJK UNIFIED IDEOGRAPH:'B6BA:46778:飯
CJK UNIFIED IDEOGRAPH:'B6BB:46779:飩
CJK UNIFIED IDEOGRAPH:'B6BC:46780:飲
CJK UNIFIED IDEOGRAPH:'B6BD:46781:飭
CJK UNIFIED IDEOGRAPH:'B6BE:46782:馮
CJK UNIFIED IDEOGRAPH:'B6BF:46783:馭
CJK UNIFIED IDEOGRAPH:'B6C0:46784:黃
CJK UNIFIED IDEOGRAPH:'B6C1:46785:黍
CJK UNIFIED IDEOGRAPH:'B6C2:46786:黑
CJK UNIFIED IDEOGRAPH:'B6C3:46787:亂
CJK UNIFIED IDEOGRAPH:'B6C4:46788:傭
CJK UNIFIED IDEOGRAPH:'B6C5:46789:債
CJK UNIFIED IDEOGRAPH:'B6C6:46790:傲
CJK UNIFIED IDEOGRAPH:'B6C7:46791:傳
CJK UNIFIED IDEOGRAPH:'B6C8:46792:僅
CJK UNIFIED IDEOGRAPH:'B6C9:46793:傾
CJK UNIFIED IDEOGRAPH:'B6CA:46794:催
CJK UNIFIED IDEOGRAPH:'B6CB:46795:傷
CJK UNIFIED IDEOGRAPH:'B6CC:46796:傻
CJK UNIFIED IDEOGRAPH:'B6CD:46797:傯
CJK UNIFIED IDEOGRAPH:'B6CE:46798:僇
CJK UNIFIED IDEOGRAPH:'B6CF:46799:剿
CJK UNIFIED IDEOGRAPH:'B6D0:46800:剷
CJK UNIFIED IDEOGRAPH:'B6D1:46801:剽
CJK UNIFIED IDEOGRAPH:'B6D2:46802:募
CJK UNIFIED IDEOGRAPH:'B6D3:46803:勦
CJK UNIFIED IDEOGRAPH:'B6D4:46804:勤
CJK UNIFIED IDEOGRAPH:'B6D5:46805:勢
CJK UNIFIED IDEOGRAPH:'B6D6:46806:勣
CJK UNIFIED IDEOGRAPH:'B6D7:46807:匯
CJK UNIFIED IDEOGRAPH:'B6D8:46808:嗟
CJK UNIFIED IDEOGRAPH:'B6D9:46809:嗨
CJK UNIFIED IDEOGRAPH:'B6DA:46810:嗓
CJK UNIFIED IDEOGRAPH:'B6DB:46811:嗦
CJK UNIFIED IDEOGRAPH:'B6DC:46812:嗎
CJK UNIFIED IDEOGRAPH:'B6DD:46813:嗜
CJK UNIFIED IDEOGRAPH:'B6DE:46814:嗇
CJK UNIFIED IDEOGRAPH:'B6DF:46815:嗑
CJK UNIFIED IDEOGRAPH:'B6E0:46816:嗣
CJK UNIFIED IDEOGRAPH:'B6E1:46817:嗤
CJK UNIFIED IDEOGRAPH:'B6E2:46818:嗯
CJK UNIFIED IDEOGRAPH:'B6E3:46819:嗚
CJK UNIFIED IDEOGRAPH:'B6E4:46820:嗡
CJK UNIFIED IDEOGRAPH:'B6E5:46821:嗅
CJK UNIFIED IDEOGRAPH:'B6E6:46822:嗆
CJK UNIFIED IDEOGRAPH:'B6E7:46823:嗥
CJK UNIFIED IDEOGRAPH:'B6E8:46824:嗉
CJK UNIFIED IDEOGRAPH:'B6E9:46825:園
CJK UNIFIED IDEOGRAPH:'B6EA:46826:圓
CJK UNIFIED IDEOGRAPH:'B6EB:46827:塞
CJK UNIFIED IDEOGRAPH:'B6EC:46828:塑
CJK UNIFIED IDEOGRAPH:'B6ED:46829:塘
CJK UNIFIED IDEOGRAPH:'B6EE:46830:塗
CJK UNIFIED IDEOGRAPH:'B6EF:46831:塚
CJK UNIFIED IDEOGRAPH:'B6F0:46832:塔
CJK UNIFIED IDEOGRAPH:'B6F1:46833:填
CJK UNIFIED IDEOGRAPH:'B6F2:46834:塌
CJK UNIFIED IDEOGRAPH:'B6F3:46835:塭
CJK UNIFIED IDEOGRAPH:'B6F4:46836:塊
CJK UNIFIED IDEOGRAPH:'B6F5:46837:塢
CJK UNIFIED IDEOGRAPH:'B6F6:46838:塒
CJK UNIFIED IDEOGRAPH:'B6F7:46839:塋
CJK UNIFIED IDEOGRAPH:'B6F8:46840:奧
CJK UNIFIED IDEOGRAPH:'B6F9:46841:嫁
CJK UNIFIED IDEOGRAPH:'B6FA:46842:嫉
CJK UNIFIED IDEOGRAPH:'B6FB:46843:嫌
CJK UNIFIED IDEOGRAPH:'B6FC:46844:媾
CJK UNIFIED IDEOGRAPH:'B6FD:46845:媽
CJK UNIFIED IDEOGRAPH:'B6FE:46846:媼
CJK UNIFIED IDEOGRAPH:'B740:46912:媳
CJK UNIFIED IDEOGRAPH:'B741:46913:嫂
CJK UNIFIED IDEOGRAPH:'B742:46914:媲
CJK UNIFIED IDEOGRAPH:'B743:46915:嵩
CJK UNIFIED IDEOGRAPH:'B744:46916:嵯
CJK UNIFIED IDEOGRAPH:'B745:46917:幌
CJK UNIFIED IDEOGRAPH:'B746:46918:幹
CJK UNIFIED IDEOGRAPH:'B747:46919:廉
CJK UNIFIED IDEOGRAPH:'B748:46920:廈
CJK UNIFIED IDEOGRAPH:'B749:46921:弒
CJK UNIFIED IDEOGRAPH:'B74A:46922:彙
CJK UNIFIED IDEOGRAPH:'B74B:46923:徬
CJK UNIFIED IDEOGRAPH:'B74C:46924:微
CJK UNIFIED IDEOGRAPH:'B74D:46925:愚
CJK UNIFIED IDEOGRAPH:'B74E:46926:意
CJK UNIFIED IDEOGRAPH:'B74F:46927:慈
CJK UNIFIED IDEOGRAPH:'B750:46928:感
CJK UNIFIED IDEOGRAPH:'B751:46929:想
CJK UNIFIED IDEOGRAPH:'B752:46930:愛
CJK UNIFIED IDEOGRAPH:'B753:46931:惹
CJK UNIFIED IDEOGRAPH:'B754:46932:愁
CJK UNIFIED IDEOGRAPH:'B755:46933:愈
CJK UNIFIED IDEOGRAPH:'B756:46934:慎
CJK UNIFIED IDEOGRAPH:'B757:46935:慌
CJK UNIFIED IDEOGRAPH:'B758:46936:慄
CJK UNIFIED IDEOGRAPH:'B759:46937:慍
CJK UNIFIED IDEOGRAPH:'B75A:46938:愾
CJK UNIFIED IDEOGRAPH:'B75B:46939:愴
CJK UNIFIED IDEOGRAPH:'B75C:46940:愧
CJK UNIFIED IDEOGRAPH:'B75D:46941:愍
CJK UNIFIED IDEOGRAPH:'B75E:46942:愆
CJK UNIFIED IDEOGRAPH:'B75F:46943:愷
CJK UNIFIED IDEOGRAPH:'B760:46944:戡
CJK UNIFIED IDEOGRAPH:'B761:46945:戢
CJK UNIFIED IDEOGRAPH:'B762:46946:搓
CJK UNIFIED IDEOGRAPH:'B763:46947:搾
CJK UNIFIED IDEOGRAPH:'B764:46948:搞
CJK UNIFIED IDEOGRAPH:'B765:46949:搪
CJK UNIFIED IDEOGRAPH:'B766:46950:搭
CJK UNIFIED IDEOGRAPH:'B767:46951:搽
CJK UNIFIED IDEOGRAPH:'B768:46952:搬
CJK UNIFIED IDEOGRAPH:'B769:46953:搏
CJK UNIFIED IDEOGRAPH:'B76A:46954:搜
CJK UNIFIED IDEOGRAPH:'B76B:46955:搔
CJK UNIFIED IDEOGRAPH:'B76C:46956:損
CJK UNIFIED IDEOGRAPH:'B76D:46957:搶
CJK UNIFIED IDEOGRAPH:'B76E:46958:搖
CJK UNIFIED IDEOGRAPH:'B76F:46959:搗
CJK UNIFIED IDEOGRAPH:'B770:46960:搆
CJK UNIFIED IDEOGRAPH:'B771:46961:敬
CJK UNIFIED IDEOGRAPH:'B772:46962:斟
CJK UNIFIED IDEOGRAPH:'B773:46963:新
CJK UNIFIED IDEOGRAPH:'B774:46964:暗
CJK UNIFIED IDEOGRAPH:'B775:46965:暉
CJK UNIFIED IDEOGRAPH:'B776:46966:暇
CJK UNIFIED IDEOGRAPH:'B777:46967:暈
CJK UNIFIED IDEOGRAPH:'B778:46968:暖
CJK UNIFIED IDEOGRAPH:'B779:46969:暄
CJK UNIFIED IDEOGRAPH:'B77A:46970:暘
CJK UNIFIED IDEOGRAPH:'B77B:46971:暍
CJK UNIFIED IDEOGRAPH:'B77C:46972:會
CJK UNIFIED IDEOGRAPH:'B77D:46973:榔
CJK UNIFIED IDEOGRAPH:'B77E:46974:業
CJK UNIFIED IDEOGRAPH:'B7A1:47009:楚
CJK UNIFIED IDEOGRAPH:'B7A2:47010:楷
CJK UNIFIED IDEOGRAPH:'B7A3:47011:楠
CJK UNIFIED IDEOGRAPH:'B7A4:47012:楔
CJK UNIFIED IDEOGRAPH:'B7A5:47013:極
CJK UNIFIED IDEOGRAPH:'B7A6:47014:椰
CJK UNIFIED IDEOGRAPH:'B7A7:47015:概
CJK UNIFIED IDEOGRAPH:'B7A8:47016:楊
CJK UNIFIED IDEOGRAPH:'B7A9:47017:楨
CJK UNIFIED IDEOGRAPH:'B7AA:47018:楫
CJK UNIFIED IDEOGRAPH:'B7AB:47019:楞
CJK UNIFIED IDEOGRAPH:'B7AC:47020:楓
CJK UNIFIED IDEOGRAPH:'B7AD:47021:楹
CJK UNIFIED IDEOGRAPH:'B7AE:47022:榆
CJK UNIFIED IDEOGRAPH:'B7AF:47023:楝
CJK UNIFIED IDEOGRAPH:'B7B0:47024:楣
CJK UNIFIED IDEOGRAPH:'B7B1:47025:楛
CJK UNIFIED IDEOGRAPH:'B7B2:47026:歇
CJK UNIFIED IDEOGRAPH:'B7B3:47027:歲
CJK UNIFIED IDEOGRAPH:'B7B4:47028:毀
CJK UNIFIED IDEOGRAPH:'B7B5:47029:殿
CJK UNIFIED IDEOGRAPH:'B7B6:47030:毓
CJK UNIFIED IDEOGRAPH:'B7B7:47031:毽
CJK UNIFIED IDEOGRAPH:'B7B8:47032:溢
CJK UNIFIED IDEOGRAPH:'B7B9:47033:溯
CJK UNIFIED IDEOGRAPH:'B7BA:47034:滓
CJK UNIFIED IDEOGRAPH:'B7BB:47035:溶
CJK UNIFIED IDEOGRAPH:'B7BC:47036:滂
CJK UNIFIED IDEOGRAPH:'B7BD:47037:源
CJK UNIFIED IDEOGRAPH:'B7BE:47038:溝
CJK UNIFIED IDEOGRAPH:'B7BF:47039:滇
CJK UNIFIED IDEOGRAPH:'B7C0:47040:滅
CJK UNIFIED IDEOGRAPH:'B7C1:47041:溥
CJK UNIFIED IDEOGRAPH:'B7C2:47042:溘
CJK UNIFIED IDEOGRAPH:'B7C3:47043:溼
CJK UNIFIED IDEOGRAPH:'B7C4:47044:溺
CJK UNIFIED IDEOGRAPH:'B7C5:47045:溫
CJK UNIFIED IDEOGRAPH:'B7C6:47046:滑
CJK UNIFIED IDEOGRAPH:'B7C7:47047:準
CJK UNIFIED IDEOGRAPH:'B7C8:47048:溜
CJK UNIFIED IDEOGRAPH:'B7C9:47049:滄
CJK UNIFIED IDEOGRAPH:'B7CA:47050:滔
CJK UNIFIED IDEOGRAPH:'B7CB:47051:溪
CJK UNIFIED IDEOGRAPH:'B7CC:47052:溧
CJK UNIFIED IDEOGRAPH:'B7CD:47053:溴
CJK UNIFIED IDEOGRAPH:'B7CE:47054:煎
CJK UNIFIED IDEOGRAPH:'B7CF:47055:煙
CJK UNIFIED IDEOGRAPH:'B7D0:47056:煩
CJK UNIFIED IDEOGRAPH:'B7D1:47057:煤
CJK UNIFIED IDEOGRAPH:'B7D2:47058:煉
CJK UNIFIED IDEOGRAPH:'B7D3:47059:照
CJK UNIFIED IDEOGRAPH:'B7D4:47060:煜
CJK UNIFIED IDEOGRAPH:'B7D5:47061:煬
CJK UNIFIED IDEOGRAPH:'B7D6:47062:煦
CJK UNIFIED IDEOGRAPH:'B7D7:47063:煌
CJK UNIFIED IDEOGRAPH:'B7D8:47064:煥
CJK UNIFIED IDEOGRAPH:'B7D9:47065:煞
CJK UNIFIED IDEOGRAPH:'B7DA:47066:煆
CJK UNIFIED IDEOGRAPH:'B7DB:47067:煨
CJK UNIFIED IDEOGRAPH:'B7DC:47068:煖
CJK UNIFIED IDEOGRAPH:'B7DD:47069:爺
CJK UNIFIED IDEOGRAPH:'B7DE:47070:牒
CJK UNIFIED IDEOGRAPH:'B7DF:47071:猷
CJK UNIFIED IDEOGRAPH:'B7E0:47072:獅
CJK UNIFIED IDEOGRAPH:'B7E1:47073:猿
CJK UNIFIED IDEOGRAPH:'B7E2:47074:猾
CJK UNIFIED IDEOGRAPH:'B7E3:47075:瑯
CJK UNIFIED IDEOGRAPH:'B7E4:47076:瑚
CJK UNIFIED IDEOGRAPH:'B7E5:47077:瑕
CJK UNIFIED IDEOGRAPH:'B7E6:47078:瑟
CJK UNIFIED IDEOGRAPH:'B7E7:47079:瑞
CJK UNIFIED IDEOGRAPH:'B7E8:47080:瑁
CJK UNIFIED IDEOGRAPH:'B7E9:47081:琿
CJK UNIFIED IDEOGRAPH:'B7EA:47082:瑙
CJK UNIFIED IDEOGRAPH:'B7EB:47083:瑛
CJK UNIFIED IDEOGRAPH:'B7EC:47084:瑜
CJK UNIFIED IDEOGRAPH:'B7ED:47085:當
CJK UNIFIED IDEOGRAPH:'B7EE:47086:畸
CJK UNIFIED IDEOGRAPH:'B7EF:47087:瘀
CJK UNIFIED IDEOGRAPH:'B7F0:47088:痰
CJK UNIFIED IDEOGRAPH:'B7F1:47089:瘁
CJK UNIFIED IDEOGRAPH:'B7F2:47090:痲
CJK UNIFIED IDEOGRAPH:'B7F3:47091:痱
CJK UNIFIED IDEOGRAPH:'B7F4:47092:痺
CJK UNIFIED IDEOGRAPH:'B7F5:47093:痿
CJK UNIFIED IDEOGRAPH:'B7F6:47094:痴
CJK UNIFIED IDEOGRAPH:'B7F7:47095:痳
CJK UNIFIED IDEOGRAPH:'B7F8:47096:盞
CJK UNIFIED IDEOGRAPH:'B7F9:47097:盟
CJK UNIFIED IDEOGRAPH:'B7FA:47098:睛
CJK UNIFIED IDEOGRAPH:'B7FB:47099:睫
CJK UNIFIED IDEOGRAPH:'B7FC:47100:睦
CJK UNIFIED IDEOGRAPH:'B7FD:47101:睞
CJK UNIFIED IDEOGRAPH:'B7FE:47102:督
CJK UNIFIED IDEOGRAPH:'B840:47168:睹
CJK UNIFIED IDEOGRAPH:'B841:47169:睪
CJK UNIFIED IDEOGRAPH:'B842:47170:睬
CJK UNIFIED IDEOGRAPH:'B843:47171:睜
CJK UNIFIED IDEOGRAPH:'B844:47172:睥
CJK UNIFIED IDEOGRAPH:'B845:47173:睨
CJK UNIFIED IDEOGRAPH:'B846:47174:睢
CJK UNIFIED IDEOGRAPH:'B847:47175:矮
CJK UNIFIED IDEOGRAPH:'B848:47176:碎
CJK UNIFIED IDEOGRAPH:'B849:47177:碰
CJK UNIFIED IDEOGRAPH:'B84A:47178:碗
CJK UNIFIED IDEOGRAPH:'B84B:47179:碘
CJK UNIFIED IDEOGRAPH:'B84C:47180:碌
CJK UNIFIED IDEOGRAPH:'B84D:47181:碉
CJK UNIFIED IDEOGRAPH:'B84E:47182:硼
CJK UNIFIED IDEOGRAPH:'B84F:47183:碑
CJK UNIFIED IDEOGRAPH:'B850:47184:碓
CJK UNIFIED IDEOGRAPH:'B851:47185:硿
CJK UNIFIED IDEOGRAPH:'B852:47186:祺
CJK UNIFIED IDEOGRAPH:'B853:47187:祿
CJK UNIFIED IDEOGRAPH:'B854:47188:禁
CJK UNIFIED IDEOGRAPH:'B855:47189:萬
CJK UNIFIED IDEOGRAPH:'B856:47190:禽
CJK UNIFIED IDEOGRAPH:'B857:47191:稜
CJK UNIFIED IDEOGRAPH:'B858:47192:稚
CJK UNIFIED IDEOGRAPH:'B859:47193:稠
CJK UNIFIED IDEOGRAPH:'B85A:47194:稔
CJK UNIFIED IDEOGRAPH:'B85B:47195:稟
CJK UNIFIED IDEOGRAPH:'B85C:47196:稞
CJK UNIFIED IDEOGRAPH:'B85D:47197:窟
CJK UNIFIED IDEOGRAPH:'B85E:47198:窠
CJK UNIFIED IDEOGRAPH:'B85F:47199:筷
CJK UNIFIED IDEOGRAPH:'B860:47200:節
CJK UNIFIED IDEOGRAPH:'B861:47201:筠
CJK UNIFIED IDEOGRAPH:'B862:47202:筮
CJK UNIFIED IDEOGRAPH:'B863:47203:筧
CJK UNIFIED IDEOGRAPH:'B864:47204:粱
CJK UNIFIED IDEOGRAPH:'B865:47205:粳
CJK UNIFIED IDEOGRAPH:'B866:47206:粵
CJK UNIFIED IDEOGRAPH:'B867:47207:經
CJK UNIFIED IDEOGRAPH:'B868:47208:絹
CJK UNIFIED IDEOGRAPH:'B869:47209:綑
CJK UNIFIED IDEOGRAPH:'B86A:47210:綁
CJK UNIFIED IDEOGRAPH:'B86B:47211:綏
CJK UNIFIED IDEOGRAPH:'B86C:47212:絛
CJK UNIFIED IDEOGRAPH:'B86D:47213:置
CJK UNIFIED IDEOGRAPH:'B86E:47214:罩
CJK UNIFIED IDEOGRAPH:'B86F:47215:罪
CJK UNIFIED IDEOGRAPH:'B870:47216:署
CJK UNIFIED IDEOGRAPH:'B871:47217:義
CJK UNIFIED IDEOGRAPH:'B872:47218:羨
CJK UNIFIED IDEOGRAPH:'B873:47219:群
CJK UNIFIED IDEOGRAPH:'B874:47220:聖
CJK UNIFIED IDEOGRAPH:'B875:47221:聘
CJK UNIFIED IDEOGRAPH:'B876:47222:肆
CJK UNIFIED IDEOGRAPH:'B877:47223:肄
CJK UNIFIED IDEOGRAPH:'B878:47224:腱
CJK UNIFIED IDEOGRAPH:'B879:47225:腰
CJK UNIFIED IDEOGRAPH:'B87A:47226:腸
CJK UNIFIED IDEOGRAPH:'B87B:47227:腥
CJK UNIFIED IDEOGRAPH:'B87C:47228:腮
CJK UNIFIED IDEOGRAPH:'B87D:47229:腳
CJK UNIFIED IDEOGRAPH:'B87E:47230:腫
CJK UNIFIED IDEOGRAPH:'B8A1:47265:腹
CJK UNIFIED IDEOGRAPH:'B8A2:47266:腺
CJK UNIFIED IDEOGRAPH:'B8A3:47267:腦
CJK UNIFIED IDEOGRAPH:'B8A4:47268:舅
CJK UNIFIED IDEOGRAPH:'B8A5:47269:艇
CJK UNIFIED IDEOGRAPH:'B8A6:47270:蒂
CJK UNIFIED IDEOGRAPH:'B8A7:47271:葷
CJK UNIFIED IDEOGRAPH:'B8A8:47272:落
CJK UNIFIED IDEOGRAPH:'B8A9:47273:萱
CJK UNIFIED IDEOGRAPH:'B8AA:47274:葵
CJK UNIFIED IDEOGRAPH:'B8AB:47275:葦
CJK UNIFIED IDEOGRAPH:'B8AC:47276:葫
CJK UNIFIED IDEOGRAPH:'B8AD:47277:葉
CJK UNIFIED IDEOGRAPH:'B8AE:47278:葬
CJK UNIFIED IDEOGRAPH:'B8AF:47279:葛
CJK UNIFIED IDEOGRAPH:'B8B0:47280:萼
CJK UNIFIED IDEOGRAPH:'B8B1:47281:萵
CJK UNIFIED IDEOGRAPH:'B8B2:47282:葡
CJK UNIFIED IDEOGRAPH:'B8B3:47283:董
CJK UNIFIED IDEOGRAPH:'B8B4:47284:葩
CJK UNIFIED IDEOGRAPH:'B8B5:47285:葭
CJK UNIFIED IDEOGRAPH:'B8B6:47286:葆
CJK UNIFIED IDEOGRAPH:'B8B7:47287:虞
CJK UNIFIED IDEOGRAPH:'B8B8:47288:虜
CJK UNIFIED IDEOGRAPH:'B8B9:47289:號
CJK UNIFIED IDEOGRAPH:'B8BA:47290:蛹
CJK UNIFIED IDEOGRAPH:'B8BB:47291:蜓
CJK UNIFIED IDEOGRAPH:'B8BC:47292:蜈
CJK UNIFIED IDEOGRAPH:'B8BD:47293:蜇
CJK UNIFIED IDEOGRAPH:'B8BE:47294:蜀
CJK UNIFIED IDEOGRAPH:'B8BF:47295:蛾
CJK UNIFIED IDEOGRAPH:'B8C0:47296:蛻
CJK UNIFIED IDEOGRAPH:'B8C1:47297:蜂
CJK UNIFIED IDEOGRAPH:'B8C2:47298:蜃
CJK UNIFIED IDEOGRAPH:'B8C3:47299:蜆
CJK UNIFIED IDEOGRAPH:'B8C4:47300:蜊
CJK UNIFIED IDEOGRAPH:'B8C5:47301:衙
CJK UNIFIED IDEOGRAPH:'B8C6:47302:裟
CJK UNIFIED IDEOGRAPH:'B8C7:47303:裔
CJK UNIFIED IDEOGRAPH:'B8C8:47304:裙
CJK UNIFIED IDEOGRAPH:'B8C9:47305:補
CJK UNIFIED IDEOGRAPH:'B8CA:47306:裘
CJK UNIFIED IDEOGRAPH:'B8CB:47307:裝
CJK UNIFIED IDEOGRAPH:'B8CC:47308:裡
CJK UNIFIED IDEOGRAPH:'B8CD:47309:裊
CJK UNIFIED IDEOGRAPH:'B8CE:47310:裕
CJK UNIFIED IDEOGRAPH:'B8CF:47311:裒
CJK UNIFIED IDEOGRAPH:'B8D0:47312:覜
CJK UNIFIED IDEOGRAPH:'B8D1:47313:解
CJK UNIFIED IDEOGRAPH:'B8D2:47314:詫
CJK UNIFIED IDEOGRAPH:'B8D3:47315:該
CJK UNIFIED IDEOGRAPH:'B8D4:47316:詳
CJK UNIFIED IDEOGRAPH:'B8D5:47317:試
CJK UNIFIED IDEOGRAPH:'B8D6:47318:詩
CJK UNIFIED IDEOGRAPH:'B8D7:47319:詰
CJK UNIFIED IDEOGRAPH:'B8D8:47320:誇
CJK UNIFIED IDEOGRAPH:'B8D9:47321:詼
CJK UNIFIED IDEOGRAPH:'B8DA:47322:詣
CJK UNIFIED IDEOGRAPH:'B8DB:47323:誠
CJK UNIFIED IDEOGRAPH:'B8DC:47324:話
CJK UNIFIED IDEOGRAPH:'B8DD:47325:誅
CJK UNIFIED IDEOGRAPH:'B8DE:47326:詭
CJK UNIFIED IDEOGRAPH:'B8DF:47327:詢
CJK UNIFIED IDEOGRAPH:'B8E0:47328:詮
CJK UNIFIED IDEOGRAPH:'B8E1:47329:詬
CJK UNIFIED IDEOGRAPH:'B8E2:47330:詹
CJK UNIFIED IDEOGRAPH:'B8E3:47331:詻
CJK UNIFIED IDEOGRAPH:'B8E4:47332:訾
CJK UNIFIED IDEOGRAPH:'B8E5:47333:詨
CJK UNIFIED IDEOGRAPH:'B8E6:47334:豢
CJK UNIFIED IDEOGRAPH:'B8E7:47335:貊
CJK UNIFIED IDEOGRAPH:'B8E8:47336:貉
CJK UNIFIED IDEOGRAPH:'B8E9:47337:賊
CJK UNIFIED IDEOGRAPH:'B8EA:47338:資
CJK UNIFIED IDEOGRAPH:'B8EB:47339:賈
CJK UNIFIED IDEOGRAPH:'B8EC:47340:賄
CJK UNIFIED IDEOGRAPH:'B8ED:47341:貲
CJK UNIFIED IDEOGRAPH:'B8EE:47342:賃
CJK UNIFIED IDEOGRAPH:'B8EF:47343:賂
CJK UNIFIED IDEOGRAPH:'B8F0:47344:賅
CJK UNIFIED IDEOGRAPH:'B8F1:47345:跡
CJK UNIFIED IDEOGRAPH:'B8F2:47346:跟
CJK UNIFIED IDEOGRAPH:'B8F3:47347:跨
CJK UNIFIED IDEOGRAPH:'B8F4:47348:路
CJK UNIFIED IDEOGRAPH:'B8F5:47349:跳
CJK UNIFIED IDEOGRAPH:'B8F6:47350:跺
CJK UNIFIED IDEOGRAPH:'B8F7:47351:跪
CJK UNIFIED IDEOGRAPH:'B8F8:47352:跤
CJK UNIFIED IDEOGRAPH:'B8F9:47353:跦
CJK UNIFIED IDEOGRAPH:'B8FA:47354:躲
CJK UNIFIED IDEOGRAPH:'B8FB:47355:較
CJK UNIFIED IDEOGRAPH:'B8FC:47356:載
CJK UNIFIED IDEOGRAPH:'B8FD:47357:軾
CJK UNIFIED IDEOGRAPH:'B8FE:47358:輊
CJK UNIFIED IDEOGRAPH:'B940:47424:辟
CJK UNIFIED IDEOGRAPH:'B941:47425:農
CJK UNIFIED IDEOGRAPH:'B942:47426:運
CJK UNIFIED IDEOGRAPH:'B943:47427:遊
CJK UNIFIED IDEOGRAPH:'B944:47428:道
CJK UNIFIED IDEOGRAPH:'B945:47429:遂
CJK UNIFIED IDEOGRAPH:'B946:47430:達
CJK UNIFIED IDEOGRAPH:'B947:47431:逼
CJK UNIFIED IDEOGRAPH:'B948:47432:違
CJK UNIFIED IDEOGRAPH:'B949:47433:遐
CJK UNIFIED IDEOGRAPH:'B94A:47434:遇
CJK UNIFIED IDEOGRAPH:'B94B:47435:遏
CJK UNIFIED IDEOGRAPH:'B94C:47436:過
CJK UNIFIED IDEOGRAPH:'B94D:47437:遍
CJK UNIFIED IDEOGRAPH:'B94E:47438:遑
CJK UNIFIED IDEOGRAPH:'B94F:47439:逾
CJK UNIFIED IDEOGRAPH:'B950:47440:遁
CJK UNIFIED IDEOGRAPH:'B951:47441:鄒
CJK UNIFIED IDEOGRAPH:'B952:47442:鄗
CJK UNIFIED IDEOGRAPH:'B953:47443:酬
CJK UNIFIED IDEOGRAPH:'B954:47444:酪
CJK UNIFIED IDEOGRAPH:'B955:47445:酩
CJK UNIFIED IDEOGRAPH:'B956:47446:釉
CJK UNIFIED IDEOGRAPH:'B957:47447:鈷
CJK UNIFIED IDEOGRAPH:'B958:47448:鉗
CJK UNIFIED IDEOGRAPH:'B959:47449:鈸
CJK UNIFIED IDEOGRAPH:'B95A:47450:鈽
CJK UNIFIED IDEOGRAPH:'B95B:47451:鉀
CJK UNIFIED IDEOGRAPH:'B95C:47452:鈾
CJK UNIFIED IDEOGRAPH:'B95D:47453:鉛
CJK UNIFIED IDEOGRAPH:'B95E:47454:鉋
CJK UNIFIED IDEOGRAPH:'B95F:47455:鉤
CJK UNIFIED IDEOGRAPH:'B960:47456:鉑
CJK UNIFIED IDEOGRAPH:'B961:47457:鈴
CJK UNIFIED IDEOGRAPH:'B962:47458:鉉
CJK UNIFIED IDEOGRAPH:'B963:47459:鉍
CJK UNIFIED IDEOGRAPH:'B964:47460:鉅
CJK UNIFIED IDEOGRAPH:'B965:47461:鈹
CJK UNIFIED IDEOGRAPH:'B966:47462:鈿
CJK UNIFIED IDEOGRAPH:'B967:47463:鉚
CJK UNIFIED IDEOGRAPH:'B968:47464:閘
CJK UNIFIED IDEOGRAPH:'B969:47465:隘
CJK UNIFIED IDEOGRAPH:'B96A:47466:隔
CJK UNIFIED IDEOGRAPH:'B96B:47467:隕
CJK UNIFIED IDEOGRAPH:'B96C:47468:雍
CJK UNIFIED IDEOGRAPH:'B96D:47469:雋
CJK UNIFIED IDEOGRAPH:'B96E:47470:雉
CJK UNIFIED IDEOGRAPH:'B96F:47471:雊
CJK UNIFIED IDEOGRAPH:'B970:47472:雷
CJK UNIFIED IDEOGRAPH:'B971:47473:電
CJK UNIFIED IDEOGRAPH:'B972:47474:雹
CJK UNIFIED IDEOGRAPH:'B973:47475:零
CJK UNIFIED IDEOGRAPH:'B974:47476:靖
CJK UNIFIED IDEOGRAPH:'B975:47477:靴
CJK UNIFIED IDEOGRAPH:'B976:47478:靶
CJK UNIFIED IDEOGRAPH:'B977:47479:預
CJK UNIFIED IDEOGRAPH:'B978:47480:頑
CJK UNIFIED IDEOGRAPH:'B979:47481:頓
CJK UNIFIED IDEOGRAPH:'B97A:47482:頊
CJK UNIFIED IDEOGRAPH:'B97B:47483:頒
CJK UNIFIED IDEOGRAPH:'B97C:47484:頌
CJK UNIFIED IDEOGRAPH:'B97D:47485:飼
CJK UNIFIED IDEOGRAPH:'B97E:47486:飴
CJK UNIFIED IDEOGRAPH:'B9A1:47521:飽
CJK UNIFIED IDEOGRAPH:'B9A2:47522:飾
CJK UNIFIED IDEOGRAPH:'B9A3:47523:馳
CJK UNIFIED IDEOGRAPH:'B9A4:47524:馱
CJK UNIFIED IDEOGRAPH:'B9A5:47525:馴
CJK UNIFIED IDEOGRAPH:'B9A6:47526:髡
CJK UNIFIED IDEOGRAPH:'B9A7:47527:鳩
CJK UNIFIED IDEOGRAPH:'B9A8:47528:麂
CJK UNIFIED IDEOGRAPH:'B9A9:47529:鼎
CJK UNIFIED IDEOGRAPH:'B9AA:47530:鼓
CJK UNIFIED IDEOGRAPH:'B9AB:47531:鼠
CJK UNIFIED IDEOGRAPH:'B9AC:47532:僧
CJK UNIFIED IDEOGRAPH:'B9AD:47533:僮
CJK UNIFIED IDEOGRAPH:'B9AE:47534:僥
CJK UNIFIED IDEOGRAPH:'B9AF:47535:僖
CJK UNIFIED IDEOGRAPH:'B9B0:47536:僭
CJK UNIFIED IDEOGRAPH:'B9B1:47537:僚
CJK UNIFIED IDEOGRAPH:'B9B2:47538:僕
CJK UNIFIED IDEOGRAPH:'B9B3:47539:像
CJK UNIFIED IDEOGRAPH:'B9B4:47540:僑
CJK UNIFIED IDEOGRAPH:'B9B5:47541:僱
CJK UNIFIED IDEOGRAPH:'B9B6:47542:僎
CJK UNIFIED IDEOGRAPH:'B9B7:47543:僩
CJK UNIFIED IDEOGRAPH:'B9B8:47544:兢
CJK UNIFIED IDEOGRAPH:'B9B9:47545:凳
CJK UNIFIED IDEOGRAPH:'B9BA:47546:劃
CJK UNIFIED IDEOGRAPH:'B9BB:47547:劂
CJK UNIFIED IDEOGRAPH:'B9BC:47548:匱
CJK UNIFIED IDEOGRAPH:'B9BD:47549:厭
CJK UNIFIED IDEOGRAPH:'B9BE:47550:嗾
CJK UNIFIED IDEOGRAPH:'B9BF:47551:嘀
CJK UNIFIED IDEOGRAPH:'B9C0:47552:嘛
CJK UNIFIED IDEOGRAPH:'B9C1:47553:嘗
CJK UNIFIED IDEOGRAPH:'B9C2:47554:嗽
CJK UNIFIED IDEOGRAPH:'B9C3:47555:嘔
CJK UNIFIED IDEOGRAPH:'B9C4:47556:嘆
CJK UNIFIED IDEOGRAPH:'B9C5:47557:嘉
CJK UNIFIED IDEOGRAPH:'B9C6:47558:嘍
CJK UNIFIED IDEOGRAPH:'B9C7:47559:嘎
CJK UNIFIED IDEOGRAPH:'B9C8:47560:嗷
CJK UNIFIED IDEOGRAPH:'B9C9:47561:嘖
CJK UNIFIED IDEOGRAPH:'B9CA:47562:嘟
CJK UNIFIED IDEOGRAPH:'B9CB:47563:嘈
CJK UNIFIED IDEOGRAPH:'B9CC:47564:嘐
CJK UNIFIED IDEOGRAPH:'B9CD:47565:嗶
CJK UNIFIED IDEOGRAPH:'B9CE:47566:團
CJK UNIFIED IDEOGRAPH:'B9CF:47567:圖
CJK UNIFIED IDEOGRAPH:'B9D0:47568:塵
CJK UNIFIED IDEOGRAPH:'B9D1:47569:塾
CJK UNIFIED IDEOGRAPH:'B9D2:47570:境
CJK UNIFIED IDEOGRAPH:'B9D3:47571:墓
CJK UNIFIED IDEOGRAPH:'B9D4:47572:墊
CJK UNIFIED IDEOGRAPH:'B9D5:47573:塹
CJK UNIFIED IDEOGRAPH:'B9D6:47574:墅
CJK UNIFIED IDEOGRAPH:'B9D7:47575:塽
CJK UNIFIED IDEOGRAPH:'B9D8:47576:壽
CJK UNIFIED IDEOGRAPH:'B9D9:47577:夥
CJK UNIFIED IDEOGRAPH:'B9DA:47578:夢
CJK UNIFIED IDEOGRAPH:'B9DB:47579:夤
CJK UNIFIED IDEOGRAPH:'B9DC:47580:奪
CJK UNIFIED IDEOGRAPH:'B9DD:47581:奩
CJK UNIFIED IDEOGRAPH:'B9DE:47582:嫡
CJK UNIFIED IDEOGRAPH:'B9DF:47583:嫦
CJK UNIFIED IDEOGRAPH:'B9E0:47584:嫩
CJK UNIFIED IDEOGRAPH:'B9E1:47585:嫗
CJK UNIFIED IDEOGRAPH:'B9E2:47586:嫖
CJK UNIFIED IDEOGRAPH:'B9E3:47587:嫘
CJK UNIFIED IDEOGRAPH:'B9E4:47588:嫣
CJK UNIFIED IDEOGRAPH:'B9E5:47589:孵
CJK UNIFIED IDEOGRAPH:'B9E6:47590:寞
CJK UNIFIED IDEOGRAPH:'B9E7:47591:寧
CJK UNIFIED IDEOGRAPH:'B9E8:47592:寡
CJK UNIFIED IDEOGRAPH:'B9E9:47593:寥
CJK UNIFIED IDEOGRAPH:'B9EA:47594:實
CJK UNIFIED IDEOGRAPH:'B9EB:47595:寨
CJK UNIFIED IDEOGRAPH:'B9EC:47596:寢
CJK UNIFIED IDEOGRAPH:'B9ED:47597:寤
CJK UNIFIED IDEOGRAPH:'B9EE:47598:察
CJK UNIFIED IDEOGRAPH:'B9EF:47599:對
CJK UNIFIED IDEOGRAPH:'B9F0:47600:屢
CJK UNIFIED IDEOGRAPH:'B9F1:47601:嶄
CJK UNIFIED IDEOGRAPH:'B9F2:47602:嶇
CJK UNIFIED IDEOGRAPH:'B9F3:47603:幛
CJK UNIFIED IDEOGRAPH:'B9F4:47604:幣
CJK UNIFIED IDEOGRAPH:'B9F5:47605:幕
CJK UNIFIED IDEOGRAPH:'B9F6:47606:幗
CJK UNIFIED IDEOGRAPH:'B9F7:47607:幔
CJK UNIFIED IDEOGRAPH:'B9F8:47608:廓
CJK UNIFIED IDEOGRAPH:'B9F9:47609:廖
CJK UNIFIED IDEOGRAPH:'B9FA:47610:弊
CJK UNIFIED IDEOGRAPH:'B9FB:47611:彆
CJK UNIFIED IDEOGRAPH:'B9FC:47612:彰
CJK UNIFIED IDEOGRAPH:'B9FD:47613:徹
CJK UNIFIED IDEOGRAPH:'B9FE:47614:慇
CJK UNIFIED IDEOGRAPH:'BA40:47680:愿
CJK UNIFIED IDEOGRAPH:'BA41:47681:態
CJK UNIFIED IDEOGRAPH:'BA42:47682:慷
CJK UNIFIED IDEOGRAPH:'BA43:47683:慢
CJK UNIFIED IDEOGRAPH:'BA44:47684:慣
CJK UNIFIED IDEOGRAPH:'BA45:47685:慟
CJK UNIFIED IDEOGRAPH:'BA46:47686:慚
CJK UNIFIED IDEOGRAPH:'BA47:47687:慘
CJK UNIFIED IDEOGRAPH:'BA48:47688:慵
CJK UNIFIED IDEOGRAPH:'BA49:47689:截
CJK UNIFIED IDEOGRAPH:'BA4A:47690:撇
CJK UNIFIED IDEOGRAPH:'BA4B:47691:摘
CJK UNIFIED IDEOGRAPH:'BA4C:47692:摔
CJK UNIFIED IDEOGRAPH:'BA4D:47693:撤
CJK UNIFIED IDEOGRAPH:'BA4E:47694:摸
CJK UNIFIED IDEOGRAPH:'BA4F:47695:摟
CJK UNIFIED IDEOGRAPH:'BA50:47696:摺
CJK UNIFIED IDEOGRAPH:'BA51:47697:摑
CJK UNIFIED IDEOGRAPH:'BA52:47698:摧
CJK UNIFIED IDEOGRAPH:'BA53:47699:搴
CJK UNIFIED IDEOGRAPH:'BA54:47700:摭
CJK UNIFIED IDEOGRAPH:'BA55:47701:摻
CJK UNIFIED IDEOGRAPH:'BA56:47702:敲
CJK UNIFIED IDEOGRAPH:'BA57:47703:斡
CJK UNIFIED IDEOGRAPH:'BA58:47704:旗
CJK UNIFIED IDEOGRAPH:'BA59:47705:旖
CJK UNIFIED IDEOGRAPH:'BA5A:47706:暢
CJK UNIFIED IDEOGRAPH:'BA5B:47707:暨
CJK UNIFIED IDEOGRAPH:'BA5C:47708:暝
CJK UNIFIED IDEOGRAPH:'BA5D:47709:榜
CJK UNIFIED IDEOGRAPH:'BA5E:47710:榨
CJK UNIFIED IDEOGRAPH:'BA5F:47711:榕
CJK UNIFIED IDEOGRAPH:'BA60:47712:槁
CJK UNIFIED IDEOGRAPH:'BA61:47713:榮
CJK UNIFIED IDEOGRAPH:'BA62:47714:槓
CJK UNIFIED IDEOGRAPH:'BA63:47715:構
CJK UNIFIED IDEOGRAPH:'BA64:47716:榛
CJK UNIFIED IDEOGRAPH:'BA65:47717:榷
CJK UNIFIED IDEOGRAPH:'BA66:47718:榻
CJK UNIFIED IDEOGRAPH:'BA67:47719:榫
CJK UNIFIED IDEOGRAPH:'BA68:47720:榴
CJK UNIFIED IDEOGRAPH:'BA69:47721:槐
CJK UNIFIED IDEOGRAPH:'BA6A:47722:槍
CJK UNIFIED IDEOGRAPH:'BA6B:47723:榭
CJK UNIFIED IDEOGRAPH:'BA6C:47724:槌
CJK UNIFIED IDEOGRAPH:'BA6D:47725:榦
CJK UNIFIED IDEOGRAPH:'BA6E:47726:槃
CJK UNIFIED IDEOGRAPH:'BA6F:47727:榣
CJK UNIFIED IDEOGRAPH:'BA70:47728:歉
CJK UNIFIED IDEOGRAPH:'BA71:47729:歌
CJK UNIFIED IDEOGRAPH:'BA72:47730:氳
CJK UNIFIED IDEOGRAPH:'BA73:47731:漳
CJK UNIFIED IDEOGRAPH:'BA74:47732:演
CJK UNIFIED IDEOGRAPH:'BA75:47733:滾
CJK UNIFIED IDEOGRAPH:'BA76:47734:漓
CJK UNIFIED IDEOGRAPH:'BA77:47735:滴
CJK UNIFIED IDEOGRAPH:'BA78:47736:漩
CJK UNIFIED IDEOGRAPH:'BA79:47737:漾
CJK UNIFIED IDEOGRAPH:'BA7A:47738:漠
CJK UNIFIED IDEOGRAPH:'BA7B:47739:漬
CJK UNIFIED IDEOGRAPH:'BA7C:47740:漏
CJK UNIFIED IDEOGRAPH:'BA7D:47741:漂
CJK UNIFIED IDEOGRAPH:'BA7E:47742:漢
CJK UNIFIED IDEOGRAPH:'BAA1:47777:滿
CJK UNIFIED IDEOGRAPH:'BAA2:47778:滯
CJK UNIFIED IDEOGRAPH:'BAA3:47779:漆
CJK UNIFIED IDEOGRAPH:'BAA4:47780:漱
CJK UNIFIED IDEOGRAPH:'BAA5:47781:漸
CJK UNIFIED IDEOGRAPH:'BAA6:47782:漲
CJK UNIFIED IDEOGRAPH:'BAA7:47783:漣
CJK UNIFIED IDEOGRAPH:'BAA8:47784:漕
CJK UNIFIED IDEOGRAPH:'BAA9:47785:漫
CJK UNIFIED IDEOGRAPH:'BAAA:47786:漯
CJK UNIFIED IDEOGRAPH:'BAAB:47787:澈
CJK UNIFIED IDEOGRAPH:'BAAC:47788:漪
CJK UNIFIED IDEOGRAPH:'BAAD:47789:滬
CJK UNIFIED IDEOGRAPH:'BAAE:47790:漁
CJK UNIFIED IDEOGRAPH:'BAAF:47791:滲
CJK UNIFIED IDEOGRAPH:'BAB0:47792:滌
CJK UNIFIED IDEOGRAPH:'BAB1:47793:滷
CJK UNIFIED IDEOGRAPH:'BAB2:47794:熔
CJK UNIFIED IDEOGRAPH:'BAB3:47795:熙
CJK UNIFIED IDEOGRAPH:'BAB4:47796:煽
CJK UNIFIED IDEOGRAPH:'BAB5:47797:熊
CJK UNIFIED IDEOGRAPH:'BAB6:47798:熄
CJK UNIFIED IDEOGRAPH:'BAB7:47799:熒
CJK UNIFIED IDEOGRAPH:'BAB8:47800:爾
CJK UNIFIED IDEOGRAPH:'BAB9:47801:犒
CJK UNIFIED IDEOGRAPH:'BABA:47802:犖
CJK UNIFIED IDEOGRAPH:'BABB:47803:獄
CJK UNIFIED IDEOGRAPH:'BABC:47804:獐
CJK UNIFIED IDEOGRAPH:'BABD:47805:瑤
CJK UNIFIED IDEOGRAPH:'BABE:47806:瑣
CJK UNIFIED IDEOGRAPH:'BABF:47807:瑪
CJK UNIFIED IDEOGRAPH:'BAC0:47808:瑰
CJK UNIFIED IDEOGRAPH:'BAC1:47809:瑭
CJK UNIFIED IDEOGRAPH:'BAC2:47810:甄
CJK UNIFIED IDEOGRAPH:'BAC3:47811:疑
CJK UNIFIED IDEOGRAPH:'BAC4:47812:瘧
CJK UNIFIED IDEOGRAPH:'BAC5:47813:瘍
CJK UNIFIED IDEOGRAPH:'BAC6:47814:瘋
CJK UNIFIED IDEOGRAPH:'BAC7:47815:瘉
CJK UNIFIED IDEOGRAPH:'BAC8:47816:瘓
CJK UNIFIED IDEOGRAPH:'BAC9:47817:盡
CJK UNIFIED IDEOGRAPH:'BACA:47818:監
CJK UNIFIED IDEOGRAPH:'BACB:47819:瞄
CJK UNIFIED IDEOGRAPH:'BACC:47820:睽
CJK UNIFIED IDEOGRAPH:'BACD:47821:睿
CJK UNIFIED IDEOGRAPH:'BACE:47822:睡
CJK UNIFIED IDEOGRAPH:'BACF:47823:磁
CJK UNIFIED IDEOGRAPH:'BAD0:47824:碟
CJK UNIFIED IDEOGRAPH:'BAD1:47825:碧
CJK UNIFIED IDEOGRAPH:'BAD2:47826:碳
CJK UNIFIED IDEOGRAPH:'BAD3:47827:碩
CJK UNIFIED IDEOGRAPH:'BAD4:47828:碣
CJK UNIFIED IDEOGRAPH:'BAD5:47829:禎
CJK UNIFIED IDEOGRAPH:'BAD6:47830:福
CJK UNIFIED IDEOGRAPH:'BAD7:47831:禍
CJK UNIFIED IDEOGRAPH:'BAD8:47832:種
CJK UNIFIED IDEOGRAPH:'BAD9:47833:稱
CJK UNIFIED IDEOGRAPH:'BADA:47834:窪
CJK UNIFIED IDEOGRAPH:'BADB:47835:窩
CJK UNIFIED IDEOGRAPH:'BADC:47836:竭
CJK UNIFIED IDEOGRAPH:'BADD:47837:端
CJK UNIFIED IDEOGRAPH:'BADE:47838:管
CJK UNIFIED IDEOGRAPH:'BADF:47839:箕
CJK UNIFIED IDEOGRAPH:'BAE0:47840:箋
CJK UNIFIED IDEOGRAPH:'BAE1:47841:筵
CJK UNIFIED IDEOGRAPH:'BAE2:47842:算
CJK UNIFIED IDEOGRAPH:'BAE3:47843:箝
CJK UNIFIED IDEOGRAPH:'BAE4:47844:箔
CJK UNIFIED IDEOGRAPH:'BAE5:47845:箏
CJK UNIFIED IDEOGRAPH:'BAE6:47846:箸
CJK UNIFIED IDEOGRAPH:'BAE7:47847:箇
CJK UNIFIED IDEOGRAPH:'BAE8:47848:箄
CJK UNIFIED IDEOGRAPH:'BAE9:47849:粹
CJK UNIFIED IDEOGRAPH:'BAEA:47850:粽
CJK UNIFIED IDEOGRAPH:'BAEB:47851:精
CJK UNIFIED IDEOGRAPH:'BAEC:47852:綻
CJK UNIFIED IDEOGRAPH:'BAED:47853:綰
CJK UNIFIED IDEOGRAPH:'BAEE:47854:綜
CJK UNIFIED IDEOGRAPH:'BAEF:47855:綽
CJK UNIFIED IDEOGRAPH:'BAF0:47856:綾
CJK UNIFIED IDEOGRAPH:'BAF1:47857:綠
CJK UNIFIED IDEOGRAPH:'BAF2:47858:緊
CJK UNIFIED IDEOGRAPH:'BAF3:47859:綴
CJK UNIFIED IDEOGRAPH:'BAF4:47860:網
CJK UNIFIED IDEOGRAPH:'BAF5:47861:綱
CJK UNIFIED IDEOGRAPH:'BAF6:47862:綺
CJK UNIFIED IDEOGRAPH:'BAF7:47863:綢
CJK UNIFIED IDEOGRAPH:'BAF8:47864:綿
CJK UNIFIED IDEOGRAPH:'BAF9:47865:綵
CJK UNIFIED IDEOGRAPH:'BAFA:47866:綸
CJK UNIFIED IDEOGRAPH:'BAFB:47867:維
CJK UNIFIED IDEOGRAPH:'BAFC:47868:緒
CJK UNIFIED IDEOGRAPH:'BAFD:47869:緇
CJK UNIFIED IDEOGRAPH:'BAFE:47870:綬
CJK UNIFIED IDEOGRAPH:'BB40:47936:罰
CJK UNIFIED IDEOGRAPH:'BB41:47937:翠
CJK UNIFIED IDEOGRAPH:'BB42:47938:翡
CJK UNIFIED IDEOGRAPH:'BB43:47939:翟
CJK UNIFIED IDEOGRAPH:'BB44:47940:聞
CJK UNIFIED IDEOGRAPH:'BB45:47941:聚
CJK UNIFIED IDEOGRAPH:'BB46:47942:肇
CJK UNIFIED IDEOGRAPH:'BB47:47943:腐
CJK UNIFIED IDEOGRAPH:'BB48:47944:膀
CJK UNIFIED IDEOGRAPH:'BB49:47945:膏
CJK UNIFIED IDEOGRAPH:'BB4A:47946:膈
CJK UNIFIED IDEOGRAPH:'BB4B:47947:膊
CJK UNIFIED IDEOGRAPH:'BB4C:47948:腿
CJK UNIFIED IDEOGRAPH:'BB4D:47949:膂
CJK UNIFIED IDEOGRAPH:'BB4E:47950:臧
CJK UNIFIED IDEOGRAPH:'BB4F:47951:臺
CJK UNIFIED IDEOGRAPH:'BB50:47952:與
CJK UNIFIED IDEOGRAPH:'BB51:47953:舔
CJK UNIFIED IDEOGRAPH:'BB52:47954:舞
CJK UNIFIED IDEOGRAPH:'BB53:47955:艋
CJK UNIFIED IDEOGRAPH:'BB54:47956:蓉
CJK UNIFIED IDEOGRAPH:'BB55:47957:蒿
CJK UNIFIED IDEOGRAPH:'BB56:47958:蓆
CJK UNIFIED IDEOGRAPH:'BB57:47959:蓄
CJK UNIFIED IDEOGRAPH:'BB58:47960:蒙
CJK UNIFIED IDEOGRAPH:'BB59:47961:蒞
CJK UNIFIED IDEOGRAPH:'BB5A:47962:蒲
CJK UNIFIED IDEOGRAPH:'BB5B:47963:蒜
CJK UNIFIED IDEOGRAPH:'BB5C:47964:蓋
CJK UNIFIED IDEOGRAPH:'BB5D:47965:蒸
CJK UNIFIED IDEOGRAPH:'BB5E:47966:蓀
CJK UNIFIED IDEOGRAPH:'BB5F:47967:蓓
CJK UNIFIED IDEOGRAPH:'BB60:47968:蒐
CJK UNIFIED IDEOGRAPH:'BB61:47969:蒼
CJK UNIFIED IDEOGRAPH:'BB62:47970:蓑
CJK UNIFIED IDEOGRAPH:'BB63:47971:蓊
CJK UNIFIED IDEOGRAPH:'BB64:47972:蜿
CJK UNIFIED IDEOGRAPH:'BB65:47973:蜜
CJK UNIFIED IDEOGRAPH:'BB66:47974:蜻
CJK UNIFIED IDEOGRAPH:'BB67:47975:蜢
CJK UNIFIED IDEOGRAPH:'BB68:47976:蜥
CJK UNIFIED IDEOGRAPH:'BB69:47977:蜴
CJK UNIFIED IDEOGRAPH:'BB6A:47978:蜘
CJK UNIFIED IDEOGRAPH:'BB6B:47979:蝕
CJK UNIFIED IDEOGRAPH:'BB6C:47980:蜷
CJK UNIFIED IDEOGRAPH:'BB6D:47981:蜩
CJK UNIFIED IDEOGRAPH:'BB6E:47982:裳
CJK UNIFIED IDEOGRAPH:'BB6F:47983:褂
CJK UNIFIED IDEOGRAPH:'BB70:47984:裴
CJK UNIFIED IDEOGRAPH:'BB71:47985:裹
CJK UNIFIED IDEOGRAPH:'BB72:47986:裸
CJK UNIFIED IDEOGRAPH:'BB73:47987:製
CJK UNIFIED IDEOGRAPH:'BB74:47988:裨
CJK UNIFIED IDEOGRAPH:'BB75:47989:褚
CJK UNIFIED IDEOGRAPH:'BB76:47990:裯
CJK UNIFIED IDEOGRAPH:'BB77:47991:誦
CJK UNIFIED IDEOGRAPH:'BB78:47992:誌
CJK UNIFIED IDEOGRAPH:'BB79:47993:語
CJK UNIFIED IDEOGRAPH:'BB7A:47994:誣
CJK UNIFIED IDEOGRAPH:'BB7B:47995:認
CJK UNIFIED IDEOGRAPH:'BB7C:47996:誡
CJK UNIFIED IDEOGRAPH:'BB7D:47997:誓
CJK UNIFIED IDEOGRAPH:'BB7E:47998:誤
CJK UNIFIED IDEOGRAPH:'BBA1:48033:說
CJK UNIFIED IDEOGRAPH:'BBA2:48034:誥
CJK UNIFIED IDEOGRAPH:'BBA3:48035:誨
CJK UNIFIED IDEOGRAPH:'BBA4:48036:誘
CJK UNIFIED IDEOGRAPH:'BBA5:48037:誑
CJK UNIFIED IDEOGRAPH:'BBA6:48038:誚
CJK UNIFIED IDEOGRAPH:'BBA7:48039:誧
CJK UNIFIED IDEOGRAPH:'BBA8:48040:豪
CJK UNIFIED IDEOGRAPH:'BBA9:48041:貍
CJK UNIFIED IDEOGRAPH:'BBAA:48042:貌
CJK UNIFIED IDEOGRAPH:'BBAB:48043:賓
CJK UNIFIED IDEOGRAPH:'BBAC:48044:賑
CJK UNIFIED IDEOGRAPH:'BBAD:48045:賒
CJK UNIFIED IDEOGRAPH:'BBAE:48046:赫
CJK UNIFIED IDEOGRAPH:'BBAF:48047:趙
CJK UNIFIED IDEOGRAPH:'BBB0:48048:趕
CJK UNIFIED IDEOGRAPH:'BBB1:48049:跼
CJK UNIFIED IDEOGRAPH:'BBB2:48050:輔
CJK UNIFIED IDEOGRAPH:'BBB3:48051:輒
CJK UNIFIED IDEOGRAPH:'BBB4:48052:輕
CJK UNIFIED IDEOGRAPH:'BBB5:48053:輓
CJK UNIFIED IDEOGRAPH:'BBB6:48054:辣
CJK UNIFIED IDEOGRAPH:'BBB7:48055:遠
CJK UNIFIED IDEOGRAPH:'BBB8:48056:遘
CJK UNIFIED IDEOGRAPH:'BBB9:48057:遜
CJK UNIFIED IDEOGRAPH:'BBBA:48058:遣
CJK UNIFIED IDEOGRAPH:'BBBB:48059:遙
CJK UNIFIED IDEOGRAPH:'BBBC:48060:遞
CJK UNIFIED IDEOGRAPH:'BBBD:48061:遢
CJK UNIFIED IDEOGRAPH:'BBBE:48062:遝
CJK UNIFIED IDEOGRAPH:'BBBF:48063:遛
CJK UNIFIED IDEOGRAPH:'BBC0:48064:鄙
CJK UNIFIED IDEOGRAPH:'BBC1:48065:鄘
CJK UNIFIED IDEOGRAPH:'BBC2:48066:鄞
CJK UNIFIED IDEOGRAPH:'BBC3:48067:酵
CJK UNIFIED IDEOGRAPH:'BBC4:48068:酸
CJK UNIFIED IDEOGRAPH:'BBC5:48069:酷
CJK UNIFIED IDEOGRAPH:'BBC6:48070:酴
CJK UNIFIED IDEOGRAPH:'BBC7:48071:鉸
CJK UNIFIED IDEOGRAPH:'BBC8:48072:銀
CJK UNIFIED IDEOGRAPH:'BBC9:48073:銅
CJK UNIFIED IDEOGRAPH:'BBCA:48074:銘
CJK UNIFIED IDEOGRAPH:'BBCB:48075:銖
CJK UNIFIED IDEOGRAPH:'BBCC:48076:鉻
CJK UNIFIED IDEOGRAPH:'BBCD:48077:銓
CJK UNIFIED IDEOGRAPH:'BBCE:48078:銜
CJK UNIFIED IDEOGRAPH:'BBCF:48079:銨
CJK UNIFIED IDEOGRAPH:'BBD0:48080:鉼
CJK UNIFIED IDEOGRAPH:'BBD1:48081:銑
CJK UNIFIED IDEOGRAPH:'BBD2:48082:閡
CJK UNIFIED IDEOGRAPH:'BBD3:48083:閨
CJK UNIFIED IDEOGRAPH:'BBD4:48084:閩
CJK UNIFIED IDEOGRAPH:'BBD5:48085:閣
CJK UNIFIED IDEOGRAPH:'BBD6:48086:閥
CJK UNIFIED IDEOGRAPH:'BBD7:48087:閤
CJK UNIFIED IDEOGRAPH:'BBD8:48088:隙
CJK UNIFIED IDEOGRAPH:'BBD9:48089:障
CJK UNIFIED IDEOGRAPH:'BBDA:48090:際
CJK UNIFIED IDEOGRAPH:'BBDB:48091:雌
CJK UNIFIED IDEOGRAPH:'BBDC:48092:雒
CJK UNIFIED IDEOGRAPH:'BBDD:48093:需
CJK UNIFIED IDEOGRAPH:'BBDE:48094:靼
CJK UNIFIED IDEOGRAPH:'BBDF:48095:鞅
CJK UNIFIED IDEOGRAPH:'BBE0:48096:韶
CJK UNIFIED IDEOGRAPH:'BBE1:48097:頗
CJK UNIFIED IDEOGRAPH:'BBE2:48098:領
CJK UNIFIED IDEOGRAPH:'BBE3:48099:颯
CJK UNIFIED IDEOGRAPH:'BBE4:48100:颱
CJK UNIFIED IDEOGRAPH:'BBE5:48101:餃
CJK UNIFIED IDEOGRAPH:'BBE6:48102:餅
CJK UNIFIED IDEOGRAPH:'BBE7:48103:餌
CJK UNIFIED IDEOGRAPH:'BBE8:48104:餉
CJK UNIFIED IDEOGRAPH:'BBE9:48105:駁
CJK UNIFIED IDEOGRAPH:'BBEA:48106:骯
CJK UNIFIED IDEOGRAPH:'BBEB:48107:骰
CJK UNIFIED IDEOGRAPH:'BBEC:48108:髦
CJK UNIFIED IDEOGRAPH:'BBED:48109:魁
CJK UNIFIED IDEOGRAPH:'BBEE:48110:魂
CJK UNIFIED IDEOGRAPH:'BBEF:48111:鳴
CJK UNIFIED IDEOGRAPH:'BBF0:48112:鳶
CJK UNIFIED IDEOGRAPH:'BBF1:48113:鳳
CJK UNIFIED IDEOGRAPH:'BBF2:48114:麼
CJK UNIFIED IDEOGRAPH:'BBF3:48115:鼻
CJK UNIFIED IDEOGRAPH:'BBF4:48116:齊
CJK UNIFIED IDEOGRAPH:'BBF5:48117:億
CJK UNIFIED IDEOGRAPH:'BBF6:48118:儀
CJK UNIFIED IDEOGRAPH:'BBF7:48119:僻
CJK UNIFIED IDEOGRAPH:'BBF8:48120:僵
CJK UNIFIED IDEOGRAPH:'BBF9:48121:價
CJK UNIFIED IDEOGRAPH:'BBFA:48122:儂
CJK UNIFIED IDEOGRAPH:'BBFB:48123:儈
CJK UNIFIED IDEOGRAPH:'BBFC:48124:儉
CJK UNIFIED IDEOGRAPH:'BBFD:48125:儅
CJK UNIFIED IDEOGRAPH:'BBFE:48126:凜
CJK UNIFIED IDEOGRAPH:'BC40:48192:劇
CJK UNIFIED IDEOGRAPH:'BC41:48193:劈
CJK UNIFIED IDEOGRAPH:'BC42:48194:劉
CJK UNIFIED IDEOGRAPH:'BC43:48195:劍
CJK UNIFIED IDEOGRAPH:'BC44:48196:劊
CJK UNIFIED IDEOGRAPH:'BC45:48197:勰
CJK UNIFIED IDEOGRAPH:'BC46:48198:厲
CJK UNIFIED IDEOGRAPH:'BC47:48199:嘮
CJK UNIFIED IDEOGRAPH:'BC48:48200:嘻
CJK UNIFIED IDEOGRAPH:'BC49:48201:嘹
CJK UNIFIED IDEOGRAPH:'BC4A:48202:嘲
CJK UNIFIED IDEOGRAPH:'BC4B:48203:嘿
CJK UNIFIED IDEOGRAPH:'BC4C:48204:嘴
CJK UNIFIED IDEOGRAPH:'BC4D:48205:嘩
CJK UNIFIED IDEOGRAPH:'BC4E:48206:噓
CJK UNIFIED IDEOGRAPH:'BC4F:48207:噎
CJK UNIFIED IDEOGRAPH:'BC50:48208:噗
CJK UNIFIED IDEOGRAPH:'BC51:48209:噴
CJK UNIFIED IDEOGRAPH:'BC52:48210:嘶
CJK UNIFIED IDEOGRAPH:'BC53:48211:嘯
CJK UNIFIED IDEOGRAPH:'BC54:48212:嘰
CJK UNIFIED IDEOGRAPH:'BC55:48213:墀
CJK UNIFIED IDEOGRAPH:'BC56:48214:墟
CJK UNIFIED IDEOGRAPH:'BC57:48215:增
CJK UNIFIED IDEOGRAPH:'BC58:48216:墳
CJK UNIFIED IDEOGRAPH:'BC59:48217:墜
CJK UNIFIED IDEOGRAPH:'BC5A:48218:墮
CJK UNIFIED IDEOGRAPH:'BC5B:48219:墩
CJK UNIFIED IDEOGRAPH:'BC5C:48220:墦
CJK UNIFIED IDEOGRAPH:'BC5D:48221:奭
CJK UNIFIED IDEOGRAPH:'BC5E:48222:嬉
CJK UNIFIED IDEOGRAPH:'BC5F:48223:嫻
CJK UNIFIED IDEOGRAPH:'BC60:48224:嬋
CJK UNIFIED IDEOGRAPH:'BC61:48225:嫵
CJK UNIFIED IDEOGRAPH:'BC62:48226:嬌
CJK UNIFIED IDEOGRAPH:'BC63:48227:嬈
CJK UNIFIED IDEOGRAPH:'BC64:48228:寮
CJK UNIFIED IDEOGRAPH:'BC65:48229:寬
CJK UNIFIED IDEOGRAPH:'BC66:48230:審
CJK UNIFIED IDEOGRAPH:'BC67:48231:寫
CJK UNIFIED IDEOGRAPH:'BC68:48232:層
CJK UNIFIED IDEOGRAPH:'BC69:48233:履
CJK UNIFIED IDEOGRAPH:'BC6A:48234:嶝
CJK UNIFIED IDEOGRAPH:'BC6B:48235:嶔
CJK UNIFIED IDEOGRAPH:'BC6C:48236:幢
CJK UNIFIED IDEOGRAPH:'BC6D:48237:幟
CJK UNIFIED IDEOGRAPH:'BC6E:48238:幡
CJK UNIFIED IDEOGRAPH:'BC6F:48239:廢
CJK UNIFIED IDEOGRAPH:'BC70:48240:廚
CJK UNIFIED IDEOGRAPH:'BC71:48241:廟
CJK UNIFIED IDEOGRAPH:'BC72:48242:廝
CJK UNIFIED IDEOGRAPH:'BC73:48243:廣
CJK UNIFIED IDEOGRAPH:'BC74:48244:廠
CJK UNIFIED IDEOGRAPH:'BC75:48245:彈
CJK UNIFIED IDEOGRAPH:'BC76:48246:影
CJK UNIFIED IDEOGRAPH:'BC77:48247:德
CJK UNIFIED IDEOGRAPH:'BC78:48248:徵
CJK UNIFIED IDEOGRAPH:'BC79:48249:慶
CJK UNIFIED IDEOGRAPH:'BC7A:48250:慧
CJK UNIFIED IDEOGRAPH:'BC7B:48251:慮
CJK UNIFIED IDEOGRAPH:'BC7C:48252:慝
CJK UNIFIED IDEOGRAPH:'BC7D:48253:慕
CJK UNIFIED IDEOGRAPH:'BC7E:48254:憂
CJK UNIFIED IDEOGRAPH:'BCA1:48289:慼
CJK UNIFIED IDEOGRAPH:'BCA2:48290:慰
CJK UNIFIED IDEOGRAPH:'BCA3:48291:慫
CJK UNIFIED IDEOGRAPH:'BCA4:48292:慾
CJK UNIFIED IDEOGRAPH:'BCA5:48293:憧
CJK UNIFIED IDEOGRAPH:'BCA6:48294:憐
CJK UNIFIED IDEOGRAPH:'BCA7:48295:憫
CJK UNIFIED IDEOGRAPH:'BCA8:48296:憎
CJK UNIFIED IDEOGRAPH:'BCA9:48297:憬
CJK UNIFIED IDEOGRAPH:'BCAA:48298:憚
CJK UNIFIED IDEOGRAPH:'BCAB:48299:憤
CJK UNIFIED IDEOGRAPH:'BCAC:48300:憔
CJK UNIFIED IDEOGRAPH:'BCAD:48301:憮
CJK UNIFIED IDEOGRAPH:'BCAE:48302:戮
CJK UNIFIED IDEOGRAPH:'BCAF:48303:摩
CJK UNIFIED IDEOGRAPH:'BCB0:48304:摯
CJK UNIFIED IDEOGRAPH:'BCB1:48305:摹
CJK UNIFIED IDEOGRAPH:'BCB2:48306:撞
CJK UNIFIED IDEOGRAPH:'BCB3:48307:撲
CJK UNIFIED IDEOGRAPH:'BCB4:48308:撈
CJK UNIFIED IDEOGRAPH:'BCB5:48309:撐
CJK UNIFIED IDEOGRAPH:'BCB6:48310:撰
CJK UNIFIED IDEOGRAPH:'BCB7:48311:撥
CJK UNIFIED IDEOGRAPH:'BCB8:48312:撓
CJK UNIFIED IDEOGRAPH:'BCB9:48313:撕
CJK UNIFIED IDEOGRAPH:'BCBA:48314:撩
CJK UNIFIED IDEOGRAPH:'BCBB:48315:撒
CJK UNIFIED IDEOGRAPH:'BCBC:48316:撮
CJK UNIFIED IDEOGRAPH:'BCBD:48317:播
CJK UNIFIED IDEOGRAPH:'BCBE:48318:撫
CJK UNIFIED IDEOGRAPH:'BCBF:48319:撚
CJK UNIFIED IDEOGRAPH:'BCC0:48320:撬
CJK UNIFIED IDEOGRAPH:'BCC1:48321:撙
CJK UNIFIED IDEOGRAPH:'BCC2:48322:撢
CJK UNIFIED IDEOGRAPH:'BCC3:48323:撳
CJK UNIFIED IDEOGRAPH:'BCC4:48324:敵
CJK UNIFIED IDEOGRAPH:'BCC5:48325:敷
CJK UNIFIED IDEOGRAPH:'BCC6:48326:數
CJK UNIFIED IDEOGRAPH:'BCC7:48327:暮
CJK UNIFIED IDEOGRAPH:'BCC8:48328:暫
CJK UNIFIED IDEOGRAPH:'BCC9:48329:暴
CJK UNIFIED IDEOGRAPH:'BCCA:48330:暱
CJK UNIFIED IDEOGRAPH:'BCCB:48331:樣
CJK UNIFIED IDEOGRAPH:'BCCC:48332:樟
CJK UNIFIED IDEOGRAPH:'BCCD:48333:槨
CJK UNIFIED IDEOGRAPH:'BCCE:48334:樁
CJK UNIFIED IDEOGRAPH:'BCCF:48335:樞
CJK UNIFIED IDEOGRAPH:'BCD0:48336:標
CJK UNIFIED IDEOGRAPH:'BCD1:48337:槽
CJK UNIFIED IDEOGRAPH:'BCD2:48338:模
CJK UNIFIED IDEOGRAPH:'BCD3:48339:樓
CJK UNIFIED IDEOGRAPH:'BCD4:48340:樊
CJK UNIFIED IDEOGRAPH:'BCD5:48341:槳
CJK UNIFIED IDEOGRAPH:'BCD6:48342:樂
CJK UNIFIED IDEOGRAPH:'BCD7:48343:樅
CJK UNIFIED IDEOGRAPH:'BCD8:48344:槭
CJK UNIFIED IDEOGRAPH:'BCD9:48345:樑
CJK UNIFIED IDEOGRAPH:'BCDA:48346:歐
CJK UNIFIED IDEOGRAPH:'BCDB:48347:歎
CJK UNIFIED IDEOGRAPH:'BCDC:48348:殤
CJK UNIFIED IDEOGRAPH:'BCDD:48349:毅
CJK UNIFIED IDEOGRAPH:'BCDE:48350:毆
CJK UNIFIED IDEOGRAPH:'BCDF:48351:漿
CJK UNIFIED IDEOGRAPH:'BCE0:48352:潼
CJK UNIFIED IDEOGRAPH:'BCE1:48353:澄
CJK UNIFIED IDEOGRAPH:'BCE2:48354:潑
CJK UNIFIED IDEOGRAPH:'BCE3:48355:潦
CJK UNIFIED IDEOGRAPH:'BCE4:48356:潔
CJK UNIFIED IDEOGRAPH:'BCE5:48357:澆
CJK UNIFIED IDEOGRAPH:'BCE6:48358:潭
CJK UNIFIED IDEOGRAPH:'BCE7:48359:潛
CJK UNIFIED IDEOGRAPH:'BCE8:48360:潸
CJK UNIFIED IDEOGRAPH:'BCE9:48361:潮
CJK UNIFIED IDEOGRAPH:'BCEA:48362:澎
CJK UNIFIED IDEOGRAPH:'BCEB:48363:潺
CJK UNIFIED IDEOGRAPH:'BCEC:48364:潰
CJK UNIFIED IDEOGRAPH:'BCED:48365:潤
CJK UNIFIED IDEOGRAPH:'BCEE:48366:澗
CJK UNIFIED IDEOGRAPH:'BCEF:48367:潘
CJK UNIFIED IDEOGRAPH:'BCF0:48368:滕
CJK UNIFIED IDEOGRAPH:'BCF1:48369:潯
CJK UNIFIED IDEOGRAPH:'BCF2:48370:潠
CJK UNIFIED IDEOGRAPH:'BCF3:48371:潟
CJK UNIFIED IDEOGRAPH:'BCF4:48372:熟
CJK UNIFIED IDEOGRAPH:'BCF5:48373:熬
CJK UNIFIED IDEOGRAPH:'BCF6:48374:熱
CJK UNIFIED IDEOGRAPH:'BCF7:48375:熨
CJK UNIFIED IDEOGRAPH:'BCF8:48376:牖
CJK UNIFIED IDEOGRAPH:'BCF9:48377:犛
CJK UNIFIED IDEOGRAPH:'BCFA:48378:獎
CJK UNIFIED IDEOGRAPH:'BCFB:48379:獗
CJK UNIFIED IDEOGRAPH:'BCFC:48380:瑩
CJK UNIFIED IDEOGRAPH:'BCFD:48381:璋
CJK UNIFIED IDEOGRAPH:'BCFE:48382:璃
CJK UNIFIED IDEOGRAPH:'BD40:48448:瑾
CJK UNIFIED IDEOGRAPH:'BD41:48449:璀
CJK UNIFIED IDEOGRAPH:'BD42:48450:畿
CJK UNIFIED IDEOGRAPH:'BD43:48451:瘠
CJK UNIFIED IDEOGRAPH:'BD44:48452:瘩
CJK UNIFIED IDEOGRAPH:'BD45:48453:瘟
CJK UNIFIED IDEOGRAPH:'BD46:48454:瘤
CJK UNIFIED IDEOGRAPH:'BD47:48455:瘦
CJK UNIFIED IDEOGRAPH:'BD48:48456:瘡
CJK UNIFIED IDEOGRAPH:'BD49:48457:瘢
CJK UNIFIED IDEOGRAPH:'BD4A:48458:皚
CJK UNIFIED IDEOGRAPH:'BD4B:48459:皺
CJK UNIFIED IDEOGRAPH:'BD4C:48460:盤
CJK UNIFIED IDEOGRAPH:'BD4D:48461:瞎
CJK UNIFIED IDEOGRAPH:'BD4E:48462:瞇
CJK UNIFIED IDEOGRAPH:'BD4F:48463:瞌
CJK UNIFIED IDEOGRAPH:'BD50:48464:瞑
CJK UNIFIED IDEOGRAPH:'BD51:48465:瞋
CJK UNIFIED IDEOGRAPH:'BD52:48466:磋
CJK UNIFIED IDEOGRAPH:'BD53:48467:磅
CJK UNIFIED IDEOGRAPH:'BD54:48468:確
CJK UNIFIED IDEOGRAPH:'BD55:48469:磊
CJK UNIFIED IDEOGRAPH:'BD56:48470:碾
CJK UNIFIED IDEOGRAPH:'BD57:48471:磕
CJK UNIFIED IDEOGRAPH:'BD58:48472:碼
CJK UNIFIED IDEOGRAPH:'BD59:48473:磐
CJK UNIFIED IDEOGRAPH:'BD5A:48474:稿
CJK UNIFIED IDEOGRAPH:'BD5B:48475:稼
CJK UNIFIED IDEOGRAPH:'BD5C:48476:穀
CJK UNIFIED IDEOGRAPH:'BD5D:48477:稽
CJK UNIFIED IDEOGRAPH:'BD5E:48478:稷
CJK UNIFIED IDEOGRAPH:'BD5F:48479:稻
CJK UNIFIED IDEOGRAPH:'BD60:48480:窯
CJK UNIFIED IDEOGRAPH:'BD61:48481:窮
CJK UNIFIED IDEOGRAPH:'BD62:48482:箭
CJK UNIFIED IDEOGRAPH:'BD63:48483:箱
CJK UNIFIED IDEOGRAPH:'BD64:48484:範
CJK UNIFIED IDEOGRAPH:'BD65:48485:箴
CJK UNIFIED IDEOGRAPH:'BD66:48486:篆
CJK UNIFIED IDEOGRAPH:'BD67:48487:篇
CJK UNIFIED IDEOGRAPH:'BD68:48488:篁
CJK UNIFIED IDEOGRAPH:'BD69:48489:箠
CJK UNIFIED IDEOGRAPH:'BD6A:48490:篌
CJK UNIFIED IDEOGRAPH:'BD6B:48491:糊
CJK UNIFIED IDEOGRAPH:'BD6C:48492:締
CJK UNIFIED IDEOGRAPH:'BD6D:48493:練
CJK UNIFIED IDEOGRAPH:'BD6E:48494:緯
CJK UNIFIED IDEOGRAPH:'BD6F:48495:緻
CJK UNIFIED IDEOGRAPH:'BD70:48496:緘
CJK UNIFIED IDEOGRAPH:'BD71:48497:緬
CJK UNIFIED IDEOGRAPH:'BD72:48498:緝
CJK UNIFIED IDEOGRAPH:'BD73:48499:編
CJK UNIFIED IDEOGRAPH:'BD74:48500:緣
CJK UNIFIED IDEOGRAPH:'BD75:48501:線
CJK UNIFIED IDEOGRAPH:'BD76:48502:緞
CJK UNIFIED IDEOGRAPH:'BD77:48503:緩
CJK UNIFIED IDEOGRAPH:'BD78:48504:綞
CJK UNIFIED IDEOGRAPH:'BD79:48505:緙
CJK UNIFIED IDEOGRAPH:'BD7A:48506:緲
CJK UNIFIED IDEOGRAPH:'BD7B:48507:緹
CJK UNIFIED IDEOGRAPH:'BD7C:48508:罵
CJK UNIFIED IDEOGRAPH:'BD7D:48509:罷
CJK UNIFIED IDEOGRAPH:'BD7E:48510:羯
CJK UNIFIED IDEOGRAPH:'BDA1:48545:翩
CJK UNIFIED IDEOGRAPH:'BDA2:48546:耦
CJK UNIFIED IDEOGRAPH:'BDA3:48547:膛
CJK UNIFIED IDEOGRAPH:'BDA4:48548:膜
CJK UNIFIED IDEOGRAPH:'BDA5:48549:膝
CJK UNIFIED IDEOGRAPH:'BDA6:48550:膠
CJK UNIFIED IDEOGRAPH:'BDA7:48551:膚
CJK UNIFIED IDEOGRAPH:'BDA8:48552:膘
CJK UNIFIED IDEOGRAPH:'BDA9:48553:蔗
CJK UNIFIED IDEOGRAPH:'BDAA:48554:蔽
CJK UNIFIED IDEOGRAPH:'BDAB:48555:蔚
CJK UNIFIED IDEOGRAPH:'BDAC:48556:蓮
CJK UNIFIED IDEOGRAPH:'BDAD:48557:蔬
CJK UNIFIED IDEOGRAPH:'BDAE:48558:蔭
CJK UNIFIED IDEOGRAPH:'BDAF:48559:蔓
CJK UNIFIED IDEOGRAPH:'BDB0:48560:蔑
CJK UNIFIED IDEOGRAPH:'BDB1:48561:蔣
CJK UNIFIED IDEOGRAPH:'BDB2:48562:蔡
CJK UNIFIED IDEOGRAPH:'BDB3:48563:蔔
CJK UNIFIED IDEOGRAPH:'BDB4:48564:蓬
CJK UNIFIED IDEOGRAPH:'BDB5:48565:蔥
CJK UNIFIED IDEOGRAPH:'BDB6:48566:蓿
CJK UNIFIED IDEOGRAPH:'BDB7:48567:蔆
CJK UNIFIED IDEOGRAPH:'BDB8:48568:螂
CJK UNIFIED IDEOGRAPH:'BDB9:48569:蝴
CJK UNIFIED IDEOGRAPH:'BDBA:48570:蝶
CJK UNIFIED IDEOGRAPH:'BDBB:48571:蝠
CJK UNIFIED IDEOGRAPH:'BDBC:48572:蝦
CJK UNIFIED IDEOGRAPH:'BDBD:48573:蝸
CJK UNIFIED IDEOGRAPH:'BDBE:48574:蝨
CJK UNIFIED IDEOGRAPH:'BDBF:48575:蝙
CJK UNIFIED IDEOGRAPH:'BDC0:48576:蝗
CJK UNIFIED IDEOGRAPH:'BDC1:48577:蝌
CJK UNIFIED IDEOGRAPH:'BDC2:48578:蝓
CJK UNIFIED IDEOGRAPH:'BDC3:48579:衛
CJK UNIFIED IDEOGRAPH:'BDC4:48580:衝
CJK UNIFIED IDEOGRAPH:'BDC5:48581:褐
CJK UNIFIED IDEOGRAPH:'BDC6:48582:複
CJK UNIFIED IDEOGRAPH:'BDC7:48583:褒
CJK UNIFIED IDEOGRAPH:'BDC8:48584:褓
CJK UNIFIED IDEOGRAPH:'BDC9:48585:褕
CJK UNIFIED IDEOGRAPH:'BDCA:48586:褊
CJK UNIFIED IDEOGRAPH:'BDCB:48587:誼
CJK UNIFIED IDEOGRAPH:'BDCC:48588:諒
CJK UNIFIED IDEOGRAPH:'BDCD:48589:談
CJK UNIFIED IDEOGRAPH:'BDCE:48590:諄
CJK UNIFIED IDEOGRAPH:'BDCF:48591:誕
CJK UNIFIED IDEOGRAPH:'BDD0:48592:請
CJK UNIFIED IDEOGRAPH:'BDD1:48593:諸
CJK UNIFIED IDEOGRAPH:'BDD2:48594:課
CJK UNIFIED IDEOGRAPH:'BDD3:48595:諉
CJK UNIFIED IDEOGRAPH:'BDD4:48596:諂
CJK UNIFIED IDEOGRAPH:'BDD5:48597:調
CJK UNIFIED IDEOGRAPH:'BDD6:48598:誰
CJK UNIFIED IDEOGRAPH:'BDD7:48599:論
CJK UNIFIED IDEOGRAPH:'BDD8:48600:諍
CJK UNIFIED IDEOGRAPH:'BDD9:48601:誶
CJK UNIFIED IDEOGRAPH:'BDDA:48602:誹
CJK UNIFIED IDEOGRAPH:'BDDB:48603:諛
CJK UNIFIED IDEOGRAPH:'BDDC:48604:豌
CJK UNIFIED IDEOGRAPH:'BDDD:48605:豎
CJK UNIFIED IDEOGRAPH:'BDDE:48606:豬
CJK UNIFIED IDEOGRAPH:'BDDF:48607:賠
CJK UNIFIED IDEOGRAPH:'BDE0:48608:賞
CJK UNIFIED IDEOGRAPH:'BDE1:48609:賦
CJK UNIFIED IDEOGRAPH:'BDE2:48610:賤
CJK UNIFIED IDEOGRAPH:'BDE3:48611:賬
CJK UNIFIED IDEOGRAPH:'BDE4:48612:賭
CJK UNIFIED IDEOGRAPH:'BDE5:48613:賢
CJK UNIFIED IDEOGRAPH:'BDE6:48614:賣
CJK UNIFIED IDEOGRAPH:'BDE7:48615:賜
CJK UNIFIED IDEOGRAPH:'BDE8:48616:質
CJK UNIFIED IDEOGRAPH:'BDE9:48617:賡
CJK UNIFIED IDEOGRAPH:'BDEA:48618:赭
CJK UNIFIED IDEOGRAPH:'BDEB:48619:趟
CJK UNIFIED IDEOGRAPH:'BDEC:48620:趣
CJK UNIFIED IDEOGRAPH:'BDED:48621:踫
CJK UNIFIED IDEOGRAPH:'BDEE:48622:踐
CJK UNIFIED IDEOGRAPH:'BDEF:48623:踝
CJK UNIFIED IDEOGRAPH:'BDF0:48624:踢
CJK UNIFIED IDEOGRAPH:'BDF1:48625:踏
CJK UNIFIED IDEOGRAPH:'BDF2:48626:踩
CJK UNIFIED IDEOGRAPH:'BDF3:48627:踟
CJK UNIFIED IDEOGRAPH:'BDF4:48628:踡
CJK UNIFIED IDEOGRAPH:'BDF5:48629:踞
CJK UNIFIED IDEOGRAPH:'BDF6:48630:躺
CJK UNIFIED IDEOGRAPH:'BDF7:48631:輝
CJK UNIFIED IDEOGRAPH:'BDF8:48632:輛
CJK UNIFIED IDEOGRAPH:'BDF9:48633:輟
CJK UNIFIED IDEOGRAPH:'BDFA:48634:輩
CJK UNIFIED IDEOGRAPH:'BDFB:48635:輦
CJK UNIFIED IDEOGRAPH:'BDFC:48636:輪
CJK UNIFIED IDEOGRAPH:'BDFD:48637:輜
CJK UNIFIED IDEOGRAPH:'BDFE:48638:輞
CJK UNIFIED IDEOGRAPH:'BE40:48704:輥
CJK UNIFIED IDEOGRAPH:'BE41:48705:適
CJK UNIFIED IDEOGRAPH:'BE42:48706:遮
CJK UNIFIED IDEOGRAPH:'BE43:48707:遨
CJK UNIFIED IDEOGRAPH:'BE44:48708:遭
CJK UNIFIED IDEOGRAPH:'BE45:48709:遷
CJK UNIFIED IDEOGRAPH:'BE46:48710:鄰
CJK UNIFIED IDEOGRAPH:'BE47:48711:鄭
CJK UNIFIED IDEOGRAPH:'BE48:48712:鄧
CJK UNIFIED IDEOGRAPH:'BE49:48713:鄱
CJK UNIFIED IDEOGRAPH:'BE4A:48714:醇
CJK UNIFIED IDEOGRAPH:'BE4B:48715:醉
CJK UNIFIED IDEOGRAPH:'BE4C:48716:醋
CJK UNIFIED IDEOGRAPH:'BE4D:48717:醃
CJK UNIFIED IDEOGRAPH:'BE4E:48718:鋅
CJK UNIFIED IDEOGRAPH:'BE4F:48719:銻
CJK UNIFIED IDEOGRAPH:'BE50:48720:銷
CJK UNIFIED IDEOGRAPH:'BE51:48721:鋪
CJK UNIFIED IDEOGRAPH:'BE52:48722:銬
CJK UNIFIED IDEOGRAPH:'BE53:48723:鋤
CJK UNIFIED IDEOGRAPH:'BE54:48724:鋁
CJK UNIFIED IDEOGRAPH:'BE55:48725:銳
CJK UNIFIED IDEOGRAPH:'BE56:48726:銼
CJK UNIFIED IDEOGRAPH:'BE57:48727:鋒
CJK UNIFIED IDEOGRAPH:'BE58:48728:鋇
CJK UNIFIED IDEOGRAPH:'BE59:48729:鋰
CJK UNIFIED IDEOGRAPH:'BE5A:48730:銲
CJK UNIFIED IDEOGRAPH:'BE5B:48731:閭
CJK UNIFIED IDEOGRAPH:'BE5C:48732:閱
CJK UNIFIED IDEOGRAPH:'BE5D:48733:霄
CJK UNIFIED IDEOGRAPH:'BE5E:48734:霆
CJK UNIFIED IDEOGRAPH:'BE5F:48735:震
CJK UNIFIED IDEOGRAPH:'BE60:48736:霉
CJK UNIFIED IDEOGRAPH:'BE61:48737:靠
CJK UNIFIED IDEOGRAPH:'BE62:48738:鞍
CJK UNIFIED IDEOGRAPH:'BE63:48739:鞋
CJK UNIFIED IDEOGRAPH:'BE64:48740:鞏
CJK UNIFIED IDEOGRAPH:'BE65:48741:頡
CJK UNIFIED IDEOGRAPH:'BE66:48742:頫
CJK UNIFIED IDEOGRAPH:'BE67:48743:頜
CJK UNIFIED IDEOGRAPH:'BE68:48744:颳
CJK UNIFIED IDEOGRAPH:'BE69:48745:養
CJK UNIFIED IDEOGRAPH:'BE6A:48746:餓
CJK UNIFIED IDEOGRAPH:'BE6B:48747:餒
CJK UNIFIED IDEOGRAPH:'BE6C:48748:餘
CJK UNIFIED IDEOGRAPH:'BE6D:48749:駝
CJK UNIFIED IDEOGRAPH:'BE6E:48750:駐
CJK UNIFIED IDEOGRAPH:'BE6F:48751:駟
CJK UNIFIED IDEOGRAPH:'BE70:48752:駛
CJK UNIFIED IDEOGRAPH:'BE71:48753:駑
CJK UNIFIED IDEOGRAPH:'BE72:48754:駕
CJK UNIFIED IDEOGRAPH:'BE73:48755:駒
CJK UNIFIED IDEOGRAPH:'BE74:48756:駙
CJK UNIFIED IDEOGRAPH:'BE75:48757:骷
CJK UNIFIED IDEOGRAPH:'BE76:48758:髮
CJK UNIFIED IDEOGRAPH:'BE77:48759:髯
CJK UNIFIED IDEOGRAPH:'BE78:48760:鬧
CJK UNIFIED IDEOGRAPH:'BE79:48761:魅
CJK UNIFIED IDEOGRAPH:'BE7A:48762:魄
CJK UNIFIED IDEOGRAPH:'BE7B:48763:魷
CJK UNIFIED IDEOGRAPH:'BE7C:48764:魯
CJK UNIFIED IDEOGRAPH:'BE7D:48765:鴆
CJK UNIFIED IDEOGRAPH:'BE7E:48766:鴉
CJK UNIFIED IDEOGRAPH:'BEA1:48801:鴃
CJK UNIFIED IDEOGRAPH:'BEA2:48802:麩
CJK UNIFIED IDEOGRAPH:'BEA3:48803:麾
CJK UNIFIED IDEOGRAPH:'BEA4:48804:黎
CJK UNIFIED IDEOGRAPH:'BEA5:48805:墨
CJK UNIFIED IDEOGRAPH:'BEA6:48806:齒
CJK UNIFIED IDEOGRAPH:'BEA7:48807:儒
CJK UNIFIED IDEOGRAPH:'BEA8:48808:儘
CJK UNIFIED IDEOGRAPH:'BEA9:48809:儔
CJK UNIFIED IDEOGRAPH:'BEAA:48810:儐
CJK UNIFIED IDEOGRAPH:'BEAB:48811:儕
CJK UNIFIED IDEOGRAPH:'BEAC:48812:冀
CJK UNIFIED IDEOGRAPH:'BEAD:48813:冪
CJK UNIFIED IDEOGRAPH:'BEAE:48814:凝
CJK UNIFIED IDEOGRAPH:'BEAF:48815:劑
CJK UNIFIED IDEOGRAPH:'BEB0:48816:劓
CJK UNIFIED IDEOGRAPH:'BEB1:48817:勳
CJK UNIFIED IDEOGRAPH:'BEB2:48818:噙
CJK UNIFIED IDEOGRAPH:'BEB3:48819:噫
CJK UNIFIED IDEOGRAPH:'BEB4:48820:噹
CJK UNIFIED IDEOGRAPH:'BEB5:48821:噩
CJK UNIFIED IDEOGRAPH:'BEB6:48822:噤
CJK UNIFIED IDEOGRAPH:'BEB7:48823:噸
CJK UNIFIED IDEOGRAPH:'BEB8:48824:噪
CJK UNIFIED IDEOGRAPH:'BEB9:48825:器
CJK UNIFIED IDEOGRAPH:'BEBA:48826:噥
CJK UNIFIED IDEOGRAPH:'BEBB:48827:噱
CJK UNIFIED IDEOGRAPH:'BEBC:48828:噯
CJK UNIFIED IDEOGRAPH:'BEBD:48829:噬
CJK UNIFIED IDEOGRAPH:'BEBE:48830:噢
CJK UNIFIED IDEOGRAPH:'BEBF:48831:噶
CJK UNIFIED IDEOGRAPH:'BEC0:48832:壁
CJK UNIFIED IDEOGRAPH:'BEC1:48833:墾
CJK UNIFIED IDEOGRAPH:'BEC2:48834:壇
CJK UNIFIED IDEOGRAPH:'BEC3:48835:壅
CJK UNIFIED IDEOGRAPH:'BEC4:48836:奮
CJK UNIFIED IDEOGRAPH:'BEC5:48837:嬝
CJK UNIFIED IDEOGRAPH:'BEC6:48838:嬴
CJK UNIFIED IDEOGRAPH:'BEC7:48839:學
CJK UNIFIED IDEOGRAPH:'BEC8:48840:寰
CJK UNIFIED IDEOGRAPH:'BEC9:48841:導
CJK UNIFIED IDEOGRAPH:'BECA:48842:彊
CJK UNIFIED IDEOGRAPH:'BECB:48843:憲
CJK UNIFIED IDEOGRAPH:'BECC:48844:憑
CJK UNIFIED IDEOGRAPH:'BECD:48845:憩
CJK UNIFIED IDEOGRAPH:'BECE:48846:憊
CJK UNIFIED IDEOGRAPH:'BECF:48847:懍
CJK UNIFIED IDEOGRAPH:'BED0:48848:憶
CJK UNIFIED IDEOGRAPH:'BED1:48849:憾
CJK UNIFIED IDEOGRAPH:'BED2:48850:懊
CJK UNIFIED IDEOGRAPH:'BED3:48851:懈
CJK UNIFIED IDEOGRAPH:'BED4:48852:戰
CJK UNIFIED IDEOGRAPH:'BED5:48853:擅
CJK UNIFIED IDEOGRAPH:'BED6:48854:擁
CJK UNIFIED IDEOGRAPH:'BED7:48855:擋
CJK UNIFIED IDEOGRAPH:'BED8:48856:撻
CJK UNIFIED IDEOGRAPH:'BED9:48857:撼
CJK UNIFIED IDEOGRAPH:'BEDA:48858:據
CJK UNIFIED IDEOGRAPH:'BEDB:48859:擄
CJK UNIFIED IDEOGRAPH:'BEDC:48860:擇
CJK UNIFIED IDEOGRAPH:'BEDD:48861:擂
CJK UNIFIED IDEOGRAPH:'BEDE:48862:操
CJK UNIFIED IDEOGRAPH:'BEDF:48863:撿
CJK UNIFIED IDEOGRAPH:'BEE0:48864:擒
CJK UNIFIED IDEOGRAPH:'BEE1:48865:擔
CJK UNIFIED IDEOGRAPH:'BEE2:48866:撾
CJK UNIFIED IDEOGRAPH:'BEE3:48867:整
CJK UNIFIED IDEOGRAPH:'BEE4:48868:曆
CJK UNIFIED IDEOGRAPH:'BEE5:48869:曉
CJK UNIFIED IDEOGRAPH:'BEE6:48870:暹
CJK UNIFIED IDEOGRAPH:'BEE7:48871:曄
CJK UNIFIED IDEOGRAPH:'BEE8:48872:曇
CJK UNIFIED IDEOGRAPH:'BEE9:48873:暸
CJK UNIFIED IDEOGRAPH:'BEEA:48874:樽
CJK UNIFIED IDEOGRAPH:'BEEB:48875:樸
CJK UNIFIED IDEOGRAPH:'BEEC:48876:樺
CJK UNIFIED IDEOGRAPH:'BEED:48877:橙
CJK UNIFIED IDEOGRAPH:'BEEE:48878:橫
CJK UNIFIED IDEOGRAPH:'BEEF:48879:橘
CJK UNIFIED IDEOGRAPH:'BEF0:48880:樹
CJK UNIFIED IDEOGRAPH:'BEF1:48881:橄
CJK UNIFIED IDEOGRAPH:'BEF2:48882:橢
CJK UNIFIED IDEOGRAPH:'BEF3:48883:橡
CJK UNIFIED IDEOGRAPH:'BEF4:48884:橋
CJK UNIFIED IDEOGRAPH:'BEF5:48885:橇
CJK UNIFIED IDEOGRAPH:'BEF6:48886:樵
CJK UNIFIED IDEOGRAPH:'BEF7:48887:機
CJK UNIFIED IDEOGRAPH:'BEF8:48888:橈
CJK UNIFIED IDEOGRAPH:'BEF9:48889:歙
CJK UNIFIED IDEOGRAPH:'BEFA:48890:歷
CJK UNIFIED IDEOGRAPH:'BEFB:48891:氅
CJK UNIFIED IDEOGRAPH:'BEFC:48892:濂
CJK UNIFIED IDEOGRAPH:'BEFD:48893:澱
CJK UNIFIED IDEOGRAPH:'BEFE:48894:澡
CJK UNIFIED IDEOGRAPH:'BF40:48960:濃
CJK UNIFIED IDEOGRAPH:'BF41:48961:澤
CJK UNIFIED IDEOGRAPH:'BF42:48962:濁
CJK UNIFIED IDEOGRAPH:'BF43:48963:澧
CJK UNIFIED IDEOGRAPH:'BF44:48964:澳
CJK UNIFIED IDEOGRAPH:'BF45:48965:激
CJK UNIFIED IDEOGRAPH:'BF46:48966:澹
CJK UNIFIED IDEOGRAPH:'BF47:48967:澶
CJK UNIFIED IDEOGRAPH:'BF48:48968:澦
CJK UNIFIED IDEOGRAPH:'BF49:48969:澠
CJK UNIFIED IDEOGRAPH:'BF4A:48970:澴
CJK UNIFIED IDEOGRAPH:'BF4B:48971:熾
CJK UNIFIED IDEOGRAPH:'BF4C:48972:燉
CJK UNIFIED IDEOGRAPH:'BF4D:48973:燐
CJK UNIFIED IDEOGRAPH:'BF4E:48974:燒
CJK UNIFIED IDEOGRAPH:'BF4F:48975:燈
CJK UNIFIED IDEOGRAPH:'BF50:48976:燕
CJK UNIFIED IDEOGRAPH:'BF51:48977:熹
CJK UNIFIED IDEOGRAPH:'BF52:48978:燎
CJK UNIFIED IDEOGRAPH:'BF53:48979:燙
CJK UNIFIED IDEOGRAPH:'BF54:48980:燜
CJK UNIFIED IDEOGRAPH:'BF55:48981:燃
CJK UNIFIED IDEOGRAPH:'BF56:48982:燄
CJK UNIFIED IDEOGRAPH:'BF57:48983:獨
CJK UNIFIED IDEOGRAPH:'BF58:48984:璜
CJK UNIFIED IDEOGRAPH:'BF59:48985:璣
CJK UNIFIED IDEOGRAPH:'BF5A:48986:璘
CJK UNIFIED IDEOGRAPH:'BF5B:48987:璟
CJK UNIFIED IDEOGRAPH:'BF5C:48988:璞
CJK UNIFIED IDEOGRAPH:'BF5D:48989:瓢
CJK UNIFIED IDEOGRAPH:'BF5E:48990:甌
CJK UNIFIED IDEOGRAPH:'BF5F:48991:甍
CJK UNIFIED IDEOGRAPH:'BF60:48992:瘴
CJK UNIFIED IDEOGRAPH:'BF61:48993:瘸
CJK UNIFIED IDEOGRAPH:'BF62:48994:瘺
CJK UNIFIED IDEOGRAPH:'BF63:48995:盧
CJK UNIFIED IDEOGRAPH:'BF64:48996:盥
CJK UNIFIED IDEOGRAPH:'BF65:48997:瞠
CJK UNIFIED IDEOGRAPH:'BF66:48998:瞞
CJK UNIFIED IDEOGRAPH:'BF67:48999:瞟
CJK UNIFIED IDEOGRAPH:'BF68:49000:瞥
CJK UNIFIED IDEOGRAPH:'BF69:49001:磨
CJK UNIFIED IDEOGRAPH:'BF6A:49002:磚
CJK UNIFIED IDEOGRAPH:'BF6B:49003:磬
CJK UNIFIED IDEOGRAPH:'BF6C:49004:磧
CJK UNIFIED IDEOGRAPH:'BF6D:49005:禦
CJK UNIFIED IDEOGRAPH:'BF6E:49006:積
CJK UNIFIED IDEOGRAPH:'BF6F:49007:穎
CJK UNIFIED IDEOGRAPH:'BF70:49008:穆
CJK UNIFIED IDEOGRAPH:'BF71:49009:穌
CJK UNIFIED IDEOGRAPH:'BF72:49010:穋
CJK UNIFIED IDEOGRAPH:'BF73:49011:窺
CJK UNIFIED IDEOGRAPH:'BF74:49012:篙
CJK UNIFIED IDEOGRAPH:'BF75:49013:簑
CJK UNIFIED IDEOGRAPH:'BF76:49014:築
CJK UNIFIED IDEOGRAPH:'BF77:49015:篤
CJK UNIFIED IDEOGRAPH:'BF78:49016:篛
CJK UNIFIED IDEOGRAPH:'BF79:49017:篡
CJK UNIFIED IDEOGRAPH:'BF7A:49018:篩
CJK UNIFIED IDEOGRAPH:'BF7B:49019:篦
CJK UNIFIED IDEOGRAPH:'BF7C:49020:糕
CJK UNIFIED IDEOGRAPH:'BF7D:49021:糖
CJK UNIFIED IDEOGRAPH:'BF7E:49022:縊
CJK UNIFIED IDEOGRAPH:'BFA1:49057:縑
CJK UNIFIED IDEOGRAPH:'BFA2:49058:縈
CJK UNIFIED IDEOGRAPH:'BFA3:49059:縛
CJK UNIFIED IDEOGRAPH:'BFA4:49060:縣
CJK UNIFIED IDEOGRAPH:'BFA5:49061:縞
CJK UNIFIED IDEOGRAPH:'BFA6:49062:縝
CJK UNIFIED IDEOGRAPH:'BFA7:49063:縉
CJK UNIFIED IDEOGRAPH:'BFA8:49064:縐
CJK UNIFIED IDEOGRAPH:'BFA9:49065:罹
CJK UNIFIED IDEOGRAPH:'BFAA:49066:羲
CJK UNIFIED IDEOGRAPH:'BFAB:49067:翰
CJK UNIFIED IDEOGRAPH:'BFAC:49068:翱
CJK UNIFIED IDEOGRAPH:'BFAD:49069:翮
CJK UNIFIED IDEOGRAPH:'BFAE:49070:耨
CJK UNIFIED IDEOGRAPH:'BFAF:49071:膳
CJK UNIFIED IDEOGRAPH:'BFB0:49072:膩
CJK UNIFIED IDEOGRAPH:'BFB1:49073:膨
CJK UNIFIED IDEOGRAPH:'BFB2:49074:臻
CJK UNIFIED IDEOGRAPH:'BFB3:49075:興
CJK UNIFIED IDEOGRAPH:'BFB4:49076:艘
CJK UNIFIED IDEOGRAPH:'BFB5:49077:艙
CJK UNIFIED IDEOGRAPH:'BFB6:49078:蕊
CJK UNIFIED IDEOGRAPH:'BFB7:49079:蕙
CJK UNIFIED IDEOGRAPH:'BFB8:49080:蕈
CJK UNIFIED IDEOGRAPH:'BFB9:49081:蕨
CJK UNIFIED IDEOGRAPH:'BFBA:49082:蕩
CJK UNIFIED IDEOGRAPH:'BFBB:49083:蕃
CJK UNIFIED IDEOGRAPH:'BFBC:49084:蕉
CJK UNIFIED IDEOGRAPH:'BFBD:49085:蕭
CJK UNIFIED IDEOGRAPH:'BFBE:49086:蕪
CJK UNIFIED IDEOGRAPH:'BFBF:49087:蕞
CJK UNIFIED IDEOGRAPH:'BFC0:49088:螃
CJK UNIFIED IDEOGRAPH:'BFC1:49089:螟
CJK UNIFIED IDEOGRAPH:'BFC2:49090:螞
CJK UNIFIED IDEOGRAPH:'BFC3:49091:螢
CJK UNIFIED IDEOGRAPH:'BFC4:49092:融
CJK UNIFIED IDEOGRAPH:'BFC5:49093:衡
CJK UNIFIED IDEOGRAPH:'BFC6:49094:褪
CJK UNIFIED IDEOGRAPH:'BFC7:49095:褲
CJK UNIFIED IDEOGRAPH:'BFC8:49096:褥
CJK UNIFIED IDEOGRAPH:'BFC9:49097:褫
CJK UNIFIED IDEOGRAPH:'BFCA:49098:褡
CJK UNIFIED IDEOGRAPH:'BFCB:49099:親
CJK UNIFIED IDEOGRAPH:'BFCC:49100:覦
CJK UNIFIED IDEOGRAPH:'BFCD:49101:諦
CJK UNIFIED IDEOGRAPH:'BFCE:49102:諺
CJK UNIFIED IDEOGRAPH:'BFCF:49103:諫
CJK UNIFIED IDEOGRAPH:'BFD0:49104:諱
CJK UNIFIED IDEOGRAPH:'BFD1:49105:謀
CJK UNIFIED IDEOGRAPH:'BFD2:49106:諜
CJK UNIFIED IDEOGRAPH:'BFD3:49107:諧
CJK UNIFIED IDEOGRAPH:'BFD4:49108:諮
CJK UNIFIED IDEOGRAPH:'BFD5:49109:諾
CJK UNIFIED IDEOGRAPH:'BFD6:49110:謁
CJK UNIFIED IDEOGRAPH:'BFD7:49111:謂
CJK UNIFIED IDEOGRAPH:'BFD8:49112:諷
CJK UNIFIED IDEOGRAPH:'BFD9:49113:諭
CJK UNIFIED IDEOGRAPH:'BFDA:49114:諳
CJK UNIFIED IDEOGRAPH:'BFDB:49115:諶
CJK UNIFIED IDEOGRAPH:'BFDC:49116:諼
CJK UNIFIED IDEOGRAPH:'BFDD:49117:豫
CJK UNIFIED IDEOGRAPH:'BFDE:49118:豭
CJK UNIFIED IDEOGRAPH:'BFDF:49119:貓
CJK UNIFIED IDEOGRAPH:'BFE0:49120:賴
CJK UNIFIED IDEOGRAPH:'BFE1:49121:蹄
CJK UNIFIED IDEOGRAPH:'BFE2:49122:踱
CJK UNIFIED IDEOGRAPH:'BFE3:49123:踴
CJK UNIFIED IDEOGRAPH:'BFE4:49124:蹂
CJK UNIFIED IDEOGRAPH:'BFE5:49125:踹
CJK UNIFIED IDEOGRAPH:'BFE6:49126:踵
CJK UNIFIED IDEOGRAPH:'BFE7:49127:輻
CJK UNIFIED IDEOGRAPH:'BFE8:49128:輯
CJK UNIFIED IDEOGRAPH:'BFE9:49129:輸
CJK UNIFIED IDEOGRAPH:'BFEA:49130:輳
CJK UNIFIED IDEOGRAPH:'BFEB:49131:辨
CJK UNIFIED IDEOGRAPH:'BFEC:49132:辦
CJK UNIFIED IDEOGRAPH:'BFED:49133:遵
CJK UNIFIED IDEOGRAPH:'BFEE:49134:遴
CJK UNIFIED IDEOGRAPH:'BFEF:49135:選
CJK UNIFIED IDEOGRAPH:'BFF0:49136:遲
CJK UNIFIED IDEOGRAPH:'BFF1:49137:遼
CJK UNIFIED IDEOGRAPH:'BFF2:49138:遺
CJK UNIFIED IDEOGRAPH:'BFF3:49139:鄴
CJK UNIFIED IDEOGRAPH:'BFF4:49140:醒
CJK UNIFIED IDEOGRAPH:'BFF5:49141:錠
CJK UNIFIED IDEOGRAPH:'BFF6:49142:錶
CJK UNIFIED IDEOGRAPH:'BFF7:49143:鋸
CJK UNIFIED IDEOGRAPH:'BFF8:49144:錳
CJK UNIFIED IDEOGRAPH:'BFF9:49145:錯
CJK UNIFIED IDEOGRAPH:'BFFA:49146:錢
CJK UNIFIED IDEOGRAPH:'BFFB:49147:鋼
CJK UNIFIED IDEOGRAPH:'BFFC:49148:錫
CJK UNIFIED IDEOGRAPH:'BFFD:49149:錄
CJK UNIFIED IDEOGRAPH:'BFFE:49150:錚
CJK UNIFIED IDEOGRAPH:'C040:49216:錐
CJK UNIFIED IDEOGRAPH:'C041:49217:錦
CJK UNIFIED IDEOGRAPH:'C042:49218:錡
CJK UNIFIED IDEOGRAPH:'C043:49219:錕
CJK UNIFIED IDEOGRAPH:'C044:49220:錮
CJK UNIFIED IDEOGRAPH:'C045:49221:錙
CJK UNIFIED IDEOGRAPH:'C046:49222:閻
CJK UNIFIED IDEOGRAPH:'C047:49223:隧
CJK UNIFIED IDEOGRAPH:'C048:49224:隨
CJK UNIFIED IDEOGRAPH:'C049:49225:險
CJK UNIFIED IDEOGRAPH:'C04A:49226:雕
CJK UNIFIED IDEOGRAPH:'C04B:49227:霎
CJK UNIFIED IDEOGRAPH:'C04C:49228:霑
CJK UNIFIED IDEOGRAPH:'C04D:49229:霖
CJK UNIFIED IDEOGRAPH:'C04E:49230:霍
CJK UNIFIED IDEOGRAPH:'C04F:49231:霓
CJK UNIFIED IDEOGRAPH:'C050:49232:霏
CJK UNIFIED IDEOGRAPH:'C051:49233:靛
CJK UNIFIED IDEOGRAPH:'C052:49234:靜
CJK UNIFIED IDEOGRAPH:'C053:49235:靦
CJK UNIFIED IDEOGRAPH:'C054:49236:鞘
CJK UNIFIED IDEOGRAPH:'C055:49237:頰
CJK UNIFIED IDEOGRAPH:'C056:49238:頸
CJK UNIFIED IDEOGRAPH:'C057:49239:頻
CJK UNIFIED IDEOGRAPH:'C058:49240:頷
CJK UNIFIED IDEOGRAPH:'C059:49241:頭
CJK UNIFIED IDEOGRAPH:'C05A:49242:頹
CJK UNIFIED IDEOGRAPH:'C05B:49243:頤
CJK UNIFIED IDEOGRAPH:'C05C:49244:餐
CJK UNIFIED IDEOGRAPH:'C05D:49245:館
CJK UNIFIED IDEOGRAPH:'C05E:49246:餞
CJK UNIFIED IDEOGRAPH:'C05F:49247:餛
CJK UNIFIED IDEOGRAPH:'C060:49248:餡
CJK UNIFIED IDEOGRAPH:'C061:49249:餚
CJK UNIFIED IDEOGRAPH:'C062:49250:駭
CJK UNIFIED IDEOGRAPH:'C063:49251:駢
CJK UNIFIED IDEOGRAPH:'C064:49252:駱
CJK UNIFIED IDEOGRAPH:'C065:49253:骸
CJK UNIFIED IDEOGRAPH:'C066:49254:骼
CJK UNIFIED IDEOGRAPH:'C067:49255:髻
CJK UNIFIED IDEOGRAPH:'C068:49256:髭
CJK UNIFIED IDEOGRAPH:'C069:49257:鬨
CJK UNIFIED IDEOGRAPH:'C06A:49258:鮑
CJK UNIFIED IDEOGRAPH:'C06B:49259:鴕
CJK UNIFIED IDEOGRAPH:'C06C:49260:鴣
CJK UNIFIED IDEOGRAPH:'C06D:49261:鴦
CJK UNIFIED IDEOGRAPH:'C06E:49262:鴨
CJK UNIFIED IDEOGRAPH:'C06F:49263:鴒
CJK UNIFIED IDEOGRAPH:'C070:49264:鴛
CJK UNIFIED IDEOGRAPH:'C071:49265:默
CJK UNIFIED IDEOGRAPH:'C072:49266:黔
CJK UNIFIED IDEOGRAPH:'C073:49267:龍
CJK UNIFIED IDEOGRAPH:'C074:49268:龜
CJK UNIFIED IDEOGRAPH:'C075:49269:優
CJK UNIFIED IDEOGRAPH:'C076:49270:償
CJK UNIFIED IDEOGRAPH:'C077:49271:儡
CJK UNIFIED IDEOGRAPH:'C078:49272:儲
CJK UNIFIED IDEOGRAPH:'C079:49273:勵
CJK UNIFIED IDEOGRAPH:'C07A:49274:嚎
CJK UNIFIED IDEOGRAPH:'C07B:49275:嚀
CJK UNIFIED IDEOGRAPH:'C07C:49276:嚐
CJK UNIFIED IDEOGRAPH:'C07D:49277:嚅
CJK UNIFIED IDEOGRAPH:'C07E:49278:嚇
CJK UNIFIED IDEOGRAPH:'C0A1:49313:嚏
CJK UNIFIED IDEOGRAPH:'C0A2:49314:壕
CJK UNIFIED IDEOGRAPH:'C0A3:49315:壓
CJK UNIFIED IDEOGRAPH:'C0A4:49316:壑
CJK UNIFIED IDEOGRAPH:'C0A5:49317:壎
CJK UNIFIED IDEOGRAPH:'C0A6:49318:嬰
CJK UNIFIED IDEOGRAPH:'C0A7:49319:嬪
CJK UNIFIED IDEOGRAPH:'C0A8:49320:嬤
CJK UNIFIED IDEOGRAPH:'C0A9:49321:孺
CJK UNIFIED IDEOGRAPH:'C0AA:49322:尷
CJK UNIFIED IDEOGRAPH:'C0AB:49323:屨
CJK UNIFIED IDEOGRAPH:'C0AC:49324:嶼
CJK UNIFIED IDEOGRAPH:'C0AD:49325:嶺
CJK UNIFIED IDEOGRAPH:'C0AE:49326:嶽
CJK UNIFIED IDEOGRAPH:'C0AF:49327:嶸
CJK UNIFIED IDEOGRAPH:'C0B0:49328:幫
CJK UNIFIED IDEOGRAPH:'C0B1:49329:彌
CJK UNIFIED IDEOGRAPH:'C0B2:49330:徽
CJK UNIFIED IDEOGRAPH:'C0B3:49331:應
CJK UNIFIED IDEOGRAPH:'C0B4:49332:懂
CJK UNIFIED IDEOGRAPH:'C0B5:49333:懇
CJK UNIFIED IDEOGRAPH:'C0B6:49334:懦
CJK UNIFIED IDEOGRAPH:'C0B7:49335:懋
CJK UNIFIED IDEOGRAPH:'C0B8:49336:戲
CJK UNIFIED IDEOGRAPH:'C0B9:49337:戴
CJK UNIFIED IDEOGRAPH:'C0BA:49338:擎
CJK UNIFIED IDEOGRAPH:'C0BB:49339:擊
CJK UNIFIED IDEOGRAPH:'C0BC:49340:擘
CJK UNIFIED IDEOGRAPH:'C0BD:49341:擠
CJK UNIFIED IDEOGRAPH:'C0BE:49342:擰
CJK UNIFIED IDEOGRAPH:'C0BF:49343:擦
CJK UNIFIED IDEOGRAPH:'C0C0:49344:擬
CJK UNIFIED IDEOGRAPH:'C0C1:49345:擱
CJK UNIFIED IDEOGRAPH:'C0C2:49346:擢
CJK UNIFIED IDEOGRAPH:'C0C3:49347:擭
CJK UNIFIED IDEOGRAPH:'C0C4:49348:斂
CJK UNIFIED IDEOGRAPH:'C0C5:49349:斃
CJK UNIFIED IDEOGRAPH:'C0C6:49350:曙
CJK UNIFIED IDEOGRAPH:'C0C7:49351:曖
CJK UNIFIED IDEOGRAPH:'C0C8:49352:檀
CJK UNIFIED IDEOGRAPH:'C0C9:49353:檔
CJK UNIFIED IDEOGRAPH:'C0CA:49354:檄
CJK UNIFIED IDEOGRAPH:'C0CB:49355:檢
CJK UNIFIED IDEOGRAPH:'C0CC:49356:檜
CJK UNIFIED IDEOGRAPH:'C0CD:49357:櫛
CJK UNIFIED IDEOGRAPH:'C0CE:49358:檣
CJK UNIFIED IDEOGRAPH:'C0CF:49359:橾
CJK UNIFIED IDEOGRAPH:'C0D0:49360:檗
CJK UNIFIED IDEOGRAPH:'C0D1:49361:檐
CJK UNIFIED IDEOGRAPH:'C0D2:49362:檠
CJK UNIFIED IDEOGRAPH:'C0D3:49363:歜
CJK UNIFIED IDEOGRAPH:'C0D4:49364:殮
CJK UNIFIED IDEOGRAPH:'C0D5:49365:毚
CJK UNIFIED IDEOGRAPH:'C0D6:49366:氈
CJK UNIFIED IDEOGRAPH:'C0D7:49367:濘
CJK UNIFIED IDEOGRAPH:'C0D8:49368:濱
CJK UNIFIED IDEOGRAPH:'C0D9:49369:濟
CJK UNIFIED IDEOGRAPH:'C0DA:49370:濠
CJK UNIFIED IDEOGRAPH:'C0DB:49371:濛
CJK UNIFIED IDEOGRAPH:'C0DC:49372:濤
CJK UNIFIED IDEOGRAPH:'C0DD:49373:濫
CJK UNIFIED IDEOGRAPH:'C0DE:49374:濯
CJK UNIFIED IDEOGRAPH:'C0DF:49375:澀
CJK UNIFIED IDEOGRAPH:'C0E0:49376:濬
CJK UNIFIED IDEOGRAPH:'C0E1:49377:濡
CJK UNIFIED IDEOGRAPH:'C0E2:49378:濩
CJK UNIFIED IDEOGRAPH:'C0E3:49379:濕
CJK UNIFIED IDEOGRAPH:'C0E4:49380:濮
CJK UNIFIED IDEOGRAPH:'C0E5:49381:濰
CJK UNIFIED IDEOGRAPH:'C0E6:49382:燧
CJK UNIFIED IDEOGRAPH:'C0E7:49383:營
CJK UNIFIED IDEOGRAPH:'C0E8:49384:燮
CJK UNIFIED IDEOGRAPH:'C0E9:49385:燦
CJK UNIFIED IDEOGRAPH:'C0EA:49386:燥
CJK UNIFIED IDEOGRAPH:'C0EB:49387:燭
CJK UNIFIED IDEOGRAPH:'C0EC:49388:燬
CJK UNIFIED IDEOGRAPH:'C0ED:49389:燴
CJK UNIFIED IDEOGRAPH:'C0EE:49390:燠
CJK UNIFIED IDEOGRAPH:'C0EF:49391:爵
CJK UNIFIED IDEOGRAPH:'C0F0:49392:牆
CJK UNIFIED IDEOGRAPH:'C0F1:49393:獰
CJK UNIFIED IDEOGRAPH:'C0F2:49394:獲
CJK UNIFIED IDEOGRAPH:'C0F3:49395:璩
CJK UNIFIED IDEOGRAPH:'C0F4:49396:環
CJK UNIFIED IDEOGRAPH:'C0F5:49397:璦
CJK UNIFIED IDEOGRAPH:'C0F6:49398:璨
CJK UNIFIED IDEOGRAPH:'C0F7:49399:癆
CJK UNIFIED IDEOGRAPH:'C0F8:49400:療
CJK UNIFIED IDEOGRAPH:'C0F9:49401:癌
CJK UNIFIED IDEOGRAPH:'C0FA:49402:盪
CJK UNIFIED IDEOGRAPH:'C0FB:49403:瞳
CJK UNIFIED IDEOGRAPH:'C0FC:49404:瞪
CJK UNIFIED IDEOGRAPH:'C0FD:49405:瞰
CJK UNIFIED IDEOGRAPH:'C0FE:49406:瞬
CJK UNIFIED IDEOGRAPH:'C140:49472:瞧
CJK UNIFIED IDEOGRAPH:'C141:49473:瞭
CJK UNIFIED IDEOGRAPH:'C142:49474:矯
CJK UNIFIED IDEOGRAPH:'C143:49475:磷
CJK UNIFIED IDEOGRAPH:'C144:49476:磺
CJK UNIFIED IDEOGRAPH:'C145:49477:磴
CJK UNIFIED IDEOGRAPH:'C146:49478:磯
CJK UNIFIED IDEOGRAPH:'C147:49479:礁
CJK UNIFIED IDEOGRAPH:'C148:49480:禧
CJK UNIFIED IDEOGRAPH:'C149:49481:禪
CJK UNIFIED IDEOGRAPH:'C14A:49482:穗
CJK UNIFIED IDEOGRAPH:'C14B:49483:窿
CJK UNIFIED IDEOGRAPH:'C14C:49484:簇
CJK UNIFIED IDEOGRAPH:'C14D:49485:簍
CJK UNIFIED IDEOGRAPH:'C14E:49486:篾
CJK UNIFIED IDEOGRAPH:'C14F:49487:篷
CJK UNIFIED IDEOGRAPH:'C150:49488:簌
CJK UNIFIED IDEOGRAPH:'C151:49489:篠
CJK UNIFIED IDEOGRAPH:'C152:49490:糠
CJK UNIFIED IDEOGRAPH:'C153:49491:糜
CJK UNIFIED IDEOGRAPH:'C154:49492:糞
CJK UNIFIED IDEOGRAPH:'C155:49493:糢
CJK UNIFIED IDEOGRAPH:'C156:49494:糟
CJK UNIFIED IDEOGRAPH:'C157:49495:糙
CJK UNIFIED IDEOGRAPH:'C158:49496:糝
CJK UNIFIED IDEOGRAPH:'C159:49497:縮
CJK UNIFIED IDEOGRAPH:'C15A:49498:績
CJK UNIFIED IDEOGRAPH:'C15B:49499:繆
CJK UNIFIED IDEOGRAPH:'C15C:49500:縷
CJK UNIFIED IDEOGRAPH:'C15D:49501:縲
CJK UNIFIED IDEOGRAPH:'C15E:49502:繃
CJK UNIFIED IDEOGRAPH:'C15F:49503:縫
CJK UNIFIED IDEOGRAPH:'C160:49504:總
CJK UNIFIED IDEOGRAPH:'C161:49505:縱
CJK UNIFIED IDEOGRAPH:'C162:49506:繅
CJK UNIFIED IDEOGRAPH:'C163:49507:繁
CJK UNIFIED IDEOGRAPH:'C164:49508:縴
CJK UNIFIED IDEOGRAPH:'C165:49509:縹
CJK UNIFIED IDEOGRAPH:'C166:49510:繈
CJK UNIFIED IDEOGRAPH:'C167:49511:縵
CJK UNIFIED IDEOGRAPH:'C168:49512:縿
CJK UNIFIED IDEOGRAPH:'C169:49513:縯
CJK UNIFIED IDEOGRAPH:'C16A:49514:罄
CJK UNIFIED IDEOGRAPH:'C16B:49515:翳
CJK UNIFIED IDEOGRAPH:'C16C:49516:翼
CJK UNIFIED IDEOGRAPH:'C16D:49517:聱
CJK UNIFIED IDEOGRAPH:'C16E:49518:聲
CJK UNIFIED IDEOGRAPH:'C16F:49519:聰
CJK UNIFIED IDEOGRAPH:'C170:49520:聯
CJK UNIFIED IDEOGRAPH:'C171:49521:聳
CJK UNIFIED IDEOGRAPH:'C172:49522:臆
CJK UNIFIED IDEOGRAPH:'C173:49523:臃
CJK UNIFIED IDEOGRAPH:'C174:49524:膺
CJK UNIFIED IDEOGRAPH:'C175:49525:臂
CJK UNIFIED IDEOGRAPH:'C176:49526:臀
CJK UNIFIED IDEOGRAPH:'C177:49527:膿
CJK UNIFIED IDEOGRAPH:'C178:49528:膽
CJK UNIFIED IDEOGRAPH:'C179:49529:臉
CJK UNIFIED IDEOGRAPH:'C17A:49530:膾
CJK UNIFIED IDEOGRAPH:'C17B:49531:臨
CJK UNIFIED IDEOGRAPH:'C17C:49532:舉
CJK UNIFIED IDEOGRAPH:'C17D:49533:艱
CJK UNIFIED IDEOGRAPH:'C17E:49534:薪
CJK UNIFIED IDEOGRAPH:'C1A1:49569:薄
CJK UNIFIED IDEOGRAPH:'C1A2:49570:蕾
CJK UNIFIED IDEOGRAPH:'C1A3:49571:薜
CJK UNIFIED IDEOGRAPH:'C1A4:49572:薑
CJK UNIFIED IDEOGRAPH:'C1A5:49573:薔
CJK UNIFIED IDEOGRAPH:'C1A6:49574:薯
CJK UNIFIED IDEOGRAPH:'C1A7:49575:薛
CJK UNIFIED IDEOGRAPH:'C1A8:49576:薇
CJK UNIFIED IDEOGRAPH:'C1A9:49577:薨
CJK UNIFIED IDEOGRAPH:'C1AA:49578:薊
CJK UNIFIED IDEOGRAPH:'C1AB:49579:虧
CJK UNIFIED IDEOGRAPH:'C1AC:49580:蟀
CJK UNIFIED IDEOGRAPH:'C1AD:49581:蟑
CJK UNIFIED IDEOGRAPH:'C1AE:49582:螳
CJK UNIFIED IDEOGRAPH:'C1AF:49583:蟒
CJK UNIFIED IDEOGRAPH:'C1B0:49584:蟆
CJK UNIFIED IDEOGRAPH:'C1B1:49585:螫
CJK UNIFIED IDEOGRAPH:'C1B2:49586:螻
CJK UNIFIED IDEOGRAPH:'C1B3:49587:螺
CJK UNIFIED IDEOGRAPH:'C1B4:49588:蟈
CJK UNIFIED IDEOGRAPH:'C1B5:49589:蟋
CJK UNIFIED IDEOGRAPH:'C1B6:49590:褻
CJK UNIFIED IDEOGRAPH:'C1B7:49591:褶
CJK UNIFIED IDEOGRAPH:'C1B8:49592:襄
CJK UNIFIED IDEOGRAPH:'C1B9:49593:褸
CJK UNIFIED IDEOGRAPH:'C1BA:49594:褽
CJK UNIFIED IDEOGRAPH:'C1BB:49595:覬
CJK UNIFIED IDEOGRAPH:'C1BC:49596:謎
CJK UNIFIED IDEOGRAPH:'C1BD:49597:謗
CJK UNIFIED IDEOGRAPH:'C1BE:49598:謙
CJK UNIFIED IDEOGRAPH:'C1BF:49599:講
CJK UNIFIED IDEOGRAPH:'C1C0:49600:謊
CJK UNIFIED IDEOGRAPH:'C1C1:49601:謠
CJK UNIFIED IDEOGRAPH:'C1C2:49602:謝
CJK UNIFIED IDEOGRAPH:'C1C3:49603:謄
CJK UNIFIED IDEOGRAPH:'C1C4:49604:謐
CJK UNIFIED IDEOGRAPH:'C1C5:49605:豁
CJK UNIFIED IDEOGRAPH:'C1C6:49606:谿
CJK UNIFIED IDEOGRAPH:'C1C7:49607:豳
CJK UNIFIED IDEOGRAPH:'C1C8:49608:賺
CJK UNIFIED IDEOGRAPH:'C1C9:49609:賽
CJK UNIFIED IDEOGRAPH:'C1CA:49610:購
CJK UNIFIED IDEOGRAPH:'C1CB:49611:賸
CJK UNIFIED IDEOGRAPH:'C1CC:49612:賻
CJK UNIFIED IDEOGRAPH:'C1CD:49613:趨
CJK UNIFIED IDEOGRAPH:'C1CE:49614:蹉
CJK UNIFIED IDEOGRAPH:'C1CF:49615:蹋
CJK UNIFIED IDEOGRAPH:'C1D0:49616:蹈
CJK UNIFIED IDEOGRAPH:'C1D1:49617:蹊
CJK UNIFIED IDEOGRAPH:'C1D2:49618:轄
CJK UNIFIED IDEOGRAPH:'C1D3:49619:輾
CJK UNIFIED IDEOGRAPH:'C1D4:49620:轂
CJK UNIFIED IDEOGRAPH:'C1D5:49621:轅
CJK UNIFIED IDEOGRAPH:'C1D6:49622:輿
CJK UNIFIED IDEOGRAPH:'C1D7:49623:避
CJK UNIFIED IDEOGRAPH:'C1D8:49624:遽
CJK UNIFIED IDEOGRAPH:'C1D9:49625:還
CJK UNIFIED IDEOGRAPH:'C1DA:49626:邁
CJK UNIFIED IDEOGRAPH:'C1DB:49627:邂
CJK UNIFIED IDEOGRAPH:'C1DC:49628:邀
CJK UNIFIED IDEOGRAPH:'C1DD:49629:鄹
CJK UNIFIED IDEOGRAPH:'C1DE:49630:醣
CJK UNIFIED IDEOGRAPH:'C1DF:49631:醞
CJK UNIFIED IDEOGRAPH:'C1E0:49632:醜
CJK UNIFIED IDEOGRAPH:'C1E1:49633:鍍
CJK UNIFIED IDEOGRAPH:'C1E2:49634:鎂
CJK UNIFIED IDEOGRAPH:'C1E3:49635:錨
CJK UNIFIED IDEOGRAPH:'C1E4:49636:鍵
CJK UNIFIED IDEOGRAPH:'C1E5:49637:鍊
CJK UNIFIED IDEOGRAPH:'C1E6:49638:鍥
CJK UNIFIED IDEOGRAPH:'C1E7:49639:鍋
CJK UNIFIED IDEOGRAPH:'C1E8:49640:錘
CJK UNIFIED IDEOGRAPH:'C1E9:49641:鍾
CJK UNIFIED IDEOGRAPH:'C1EA:49642:鍬
CJK UNIFIED IDEOGRAPH:'C1EB:49643:鍛
CJK UNIFIED IDEOGRAPH:'C1EC:49644:鍰
CJK UNIFIED IDEOGRAPH:'C1ED:49645:鍚
CJK UNIFIED IDEOGRAPH:'C1EE:49646:鍔
CJK UNIFIED IDEOGRAPH:'C1EF:49647:闊
CJK UNIFIED IDEOGRAPH:'C1F0:49648:闋
CJK UNIFIED IDEOGRAPH:'C1F1:49649:闌
CJK UNIFIED IDEOGRAPH:'C1F2:49650:闈
CJK UNIFIED IDEOGRAPH:'C1F3:49651:闆
CJK UNIFIED IDEOGRAPH:'C1F4:49652:隱
CJK UNIFIED IDEOGRAPH:'C1F5:49653:隸
CJK UNIFIED IDEOGRAPH:'C1F6:49654:雖
CJK UNIFIED IDEOGRAPH:'C1F7:49655:霜
CJK UNIFIED IDEOGRAPH:'C1F8:49656:霞
CJK UNIFIED IDEOGRAPH:'C1F9:49657:鞠
CJK UNIFIED IDEOGRAPH:'C1FA:49658:韓
CJK UNIFIED IDEOGRAPH:'C1FB:49659:顆
CJK UNIFIED IDEOGRAPH:'C1FC:49660:颶
CJK UNIFIED IDEOGRAPH:'C1FD:49661:餵
CJK UNIFIED IDEOGRAPH:'C1FE:49662:騁
CJK UNIFIED IDEOGRAPH:'C240:49728:駿
CJK UNIFIED IDEOGRAPH:'C241:49729:鮮
CJK UNIFIED IDEOGRAPH:'C242:49730:鮫
CJK UNIFIED IDEOGRAPH:'C243:49731:鮪
CJK UNIFIED IDEOGRAPH:'C244:49732:鮭
CJK UNIFIED IDEOGRAPH:'C245:49733:鴻
CJK UNIFIED IDEOGRAPH:'C246:49734:鴿
CJK UNIFIED IDEOGRAPH:'C247:49735:麋
CJK UNIFIED IDEOGRAPH:'C248:49736:黏
CJK UNIFIED IDEOGRAPH:'C249:49737:點
CJK UNIFIED IDEOGRAPH:'C24A:49738:黜
CJK UNIFIED IDEOGRAPH:'C24B:49739:黝
CJK UNIFIED IDEOGRAPH:'C24C:49740:黛
CJK UNIFIED IDEOGRAPH:'C24D:49741:鼾
CJK UNIFIED IDEOGRAPH:'C24E:49742:齋
CJK UNIFIED IDEOGRAPH:'C24F:49743:叢
CJK UNIFIED IDEOGRAPH:'C250:49744:嚕
CJK UNIFIED IDEOGRAPH:'C251:49745:嚮
CJK UNIFIED IDEOGRAPH:'C252:49746:壙
CJK UNIFIED IDEOGRAPH:'C253:49747:壘
CJK UNIFIED IDEOGRAPH:'C254:49748:嬸
CJK UNIFIED IDEOGRAPH:'C255:49749:彝
CJK UNIFIED IDEOGRAPH:'C256:49750:懣
CJK UNIFIED IDEOGRAPH:'C257:49751:戳
CJK UNIFIED IDEOGRAPH:'C258:49752:擴
CJK UNIFIED IDEOGRAPH:'C259:49753:擲
CJK UNIFIED IDEOGRAPH:'C25A:49754:擾
CJK UNIFIED IDEOGRAPH:'C25B:49755:攆
CJK UNIFIED IDEOGRAPH:'C25C:49756:擺
CJK UNIFIED IDEOGRAPH:'C25D:49757:擻
CJK UNIFIED IDEOGRAPH:'C25E:49758:擷
CJK UNIFIED IDEOGRAPH:'C25F:49759:斷
CJK UNIFIED IDEOGRAPH:'C260:49760:曜
CJK UNIFIED IDEOGRAPH:'C261:49761:朦
CJK UNIFIED IDEOGRAPH:'C262:49762:檳
CJK UNIFIED IDEOGRAPH:'C263:49763:檬
CJK UNIFIED IDEOGRAPH:'C264:49764:櫃
CJK UNIFIED IDEOGRAPH:'C265:49765:檻
CJK UNIFIED IDEOGRAPH:'C266:49766:檸
CJK UNIFIED IDEOGRAPH:'C267:49767:櫂
CJK UNIFIED IDEOGRAPH:'C268:49768:檮
CJK UNIFIED IDEOGRAPH:'C269:49769:檯
CJK UNIFIED IDEOGRAPH:'C26A:49770:歟
CJK UNIFIED IDEOGRAPH:'C26B:49771:歸
CJK UNIFIED IDEOGRAPH:'C26C:49772:殯
CJK UNIFIED IDEOGRAPH:'C26D:49773:瀉
CJK UNIFIED IDEOGRAPH:'C26E:49774:瀋
CJK UNIFIED IDEOGRAPH:'C26F:49775:濾
CJK UNIFIED IDEOGRAPH:'C270:49776:瀆
CJK UNIFIED IDEOGRAPH:'C271:49777:濺
CJK UNIFIED IDEOGRAPH:'C272:49778:瀑
CJK UNIFIED IDEOGRAPH:'C273:49779:瀏
CJK UNIFIED IDEOGRAPH:'C274:49780:燻
CJK UNIFIED IDEOGRAPH:'C275:49781:燼
CJK UNIFIED IDEOGRAPH:'C276:49782:燾
CJK UNIFIED IDEOGRAPH:'C277:49783:燸
CJK UNIFIED IDEOGRAPH:'C278:49784:獷
CJK UNIFIED IDEOGRAPH:'C279:49785:獵
CJK UNIFIED IDEOGRAPH:'C27A:49786:璧
CJK UNIFIED IDEOGRAPH:'C27B:49787:璿
CJK UNIFIED IDEOGRAPH:'C27C:49788:甕
CJK UNIFIED IDEOGRAPH:'C27D:49789:癖
CJK UNIFIED IDEOGRAPH:'C27E:49790:癘
CJK UNIFIED IDEOGRAPH:'C2A1:49825:癒
CJK UNIFIED IDEOGRAPH:'C2A2:49826:瞽
CJK UNIFIED IDEOGRAPH:'C2A3:49827:瞿
CJK UNIFIED IDEOGRAPH:'C2A4:49828:瞻
CJK UNIFIED IDEOGRAPH:'C2A5:49829:瞼
CJK UNIFIED IDEOGRAPH:'C2A6:49830:礎
CJK UNIFIED IDEOGRAPH:'C2A7:49831:禮
CJK UNIFIED IDEOGRAPH:'C2A8:49832:穡
CJK UNIFIED IDEOGRAPH:'C2A9:49833:穢
CJK UNIFIED IDEOGRAPH:'C2AA:49834:穠
CJK UNIFIED IDEOGRAPH:'C2AB:49835:竄
CJK UNIFIED IDEOGRAPH:'C2AC:49836:竅
CJK UNIFIED IDEOGRAPH:'C2AD:49837:簫
CJK UNIFIED IDEOGRAPH:'C2AE:49838:簧
CJK UNIFIED IDEOGRAPH:'C2AF:49839:簪
CJK UNIFIED IDEOGRAPH:'C2B0:49840:簞
CJK UNIFIED IDEOGRAPH:'C2B1:49841:簣
CJK UNIFIED IDEOGRAPH:'C2B2:49842:簡
CJK UNIFIED IDEOGRAPH:'C2B3:49843:糧
CJK UNIFIED IDEOGRAPH:'C2B4:49844:織
CJK UNIFIED IDEOGRAPH:'C2B5:49845:繕
CJK UNIFIED IDEOGRAPH:'C2B6:49846:繞
CJK UNIFIED IDEOGRAPH:'C2B7:49847:繚
CJK UNIFIED IDEOGRAPH:'C2B8:49848:繡
CJK UNIFIED IDEOGRAPH:'C2B9:49849:繒
CJK UNIFIED IDEOGRAPH:'C2BA:49850:繙
CJK UNIFIED IDEOGRAPH:'C2BB:49851:罈
CJK UNIFIED IDEOGRAPH:'C2BC:49852:翹
CJK UNIFIED IDEOGRAPH:'C2BD:49853:翻
CJK UNIFIED IDEOGRAPH:'C2BE:49854:職
CJK UNIFIED IDEOGRAPH:'C2BF:49855:聶
CJK UNIFIED IDEOGRAPH:'C2C0:49856:臍
CJK UNIFIED IDEOGRAPH:'C2C1:49857:臏
CJK UNIFIED IDEOGRAPH:'C2C2:49858:舊
CJK UNIFIED IDEOGRAPH:'C2C3:49859:藏
CJK UNIFIED IDEOGRAPH:'C2C4:49860:薩
CJK UNIFIED IDEOGRAPH:'C2C5:49861:藍
CJK UNIFIED IDEOGRAPH:'C2C6:49862:藐
CJK UNIFIED IDEOGRAPH:'C2C7:49863:藉
CJK UNIFIED IDEOGRAPH:'C2C8:49864:薰
CJK UNIFIED IDEOGRAPH:'C2C9:49865:薺
CJK UNIFIED IDEOGRAPH:'C2CA:49866:薹
CJK UNIFIED IDEOGRAPH:'C2CB:49867:薦
CJK UNIFIED IDEOGRAPH:'C2CC:49868:蟯
CJK UNIFIED IDEOGRAPH:'C2CD:49869:蟬
CJK UNIFIED IDEOGRAPH:'C2CE:49870:蟲
CJK UNIFIED IDEOGRAPH:'C2CF:49871:蟠
CJK UNIFIED IDEOGRAPH:'C2D0:49872:覆
CJK UNIFIED IDEOGRAPH:'C2D1:49873:覲
CJK UNIFIED IDEOGRAPH:'C2D2:49874:觴
CJK UNIFIED IDEOGRAPH:'C2D3:49875:謨
CJK UNIFIED IDEOGRAPH:'C2D4:49876:謹
CJK UNIFIED IDEOGRAPH:'C2D5:49877:謬
CJK UNIFIED IDEOGRAPH:'C2D6:49878:謫
CJK UNIFIED IDEOGRAPH:'C2D7:49879:豐
CJK UNIFIED IDEOGRAPH:'C2D8:49880:贅
CJK UNIFIED IDEOGRAPH:'C2D9:49881:蹙
CJK UNIFIED IDEOGRAPH:'C2DA:49882:蹣
CJK UNIFIED IDEOGRAPH:'C2DB:49883:蹦
CJK UNIFIED IDEOGRAPH:'C2DC:49884:蹤
CJK UNIFIED IDEOGRAPH:'C2DD:49885:蹟
CJK UNIFIED IDEOGRAPH:'C2DE:49886:蹕
CJK UNIFIED IDEOGRAPH:'C2DF:49887:軀
CJK UNIFIED IDEOGRAPH:'C2E0:49888:轉
CJK UNIFIED IDEOGRAPH:'C2E1:49889:轍
CJK UNIFIED IDEOGRAPH:'C2E2:49890:邇
CJK UNIFIED IDEOGRAPH:'C2E3:49891:邃
CJK UNIFIED IDEOGRAPH:'C2E4:49892:邈
CJK UNIFIED IDEOGRAPH:'C2E5:49893:醫
CJK UNIFIED IDEOGRAPH:'C2E6:49894:醬
CJK UNIFIED IDEOGRAPH:'C2E7:49895:釐
CJK UNIFIED IDEOGRAPH:'C2E8:49896:鎔
CJK UNIFIED IDEOGRAPH:'C2E9:49897:鎊
CJK UNIFIED IDEOGRAPH:'C2EA:49898:鎖
CJK UNIFIED IDEOGRAPH:'C2EB:49899:鎢
CJK UNIFIED IDEOGRAPH:'C2EC:49900:鎳
CJK UNIFIED IDEOGRAPH:'C2ED:49901:鎮
CJK UNIFIED IDEOGRAPH:'C2EE:49902:鎬
CJK UNIFIED IDEOGRAPH:'C2EF:49903:鎰
CJK UNIFIED IDEOGRAPH:'C2F0:49904:鎘
CJK UNIFIED IDEOGRAPH:'C2F1:49905:鎚
CJK UNIFIED IDEOGRAPH:'C2F2:49906:鎗
CJK UNIFIED IDEOGRAPH:'C2F3:49907:闔
CJK UNIFIED IDEOGRAPH:'C2F4:49908:闖
CJK UNIFIED IDEOGRAPH:'C2F5:49909:闐
CJK UNIFIED IDEOGRAPH:'C2F6:49910:闕
CJK UNIFIED IDEOGRAPH:'C2F7:49911:離
CJK UNIFIED IDEOGRAPH:'C2F8:49912:雜
CJK UNIFIED IDEOGRAPH:'C2F9:49913:雙
CJK UNIFIED IDEOGRAPH:'C2FA:49914:雛
CJK UNIFIED IDEOGRAPH:'C2FB:49915:雞
CJK UNIFIED IDEOGRAPH:'C2FC:49916:霤
CJK UNIFIED IDEOGRAPH:'C2FD:49917:鞣
CJK UNIFIED IDEOGRAPH:'C2FE:49918:鞦
CJK UNIFIED IDEOGRAPH:'C340:49984:鞭
CJK UNIFIED IDEOGRAPH:'C341:49985:韹
CJK UNIFIED IDEOGRAPH:'C342:49986:額
CJK UNIFIED IDEOGRAPH:'C343:49987:顏
CJK UNIFIED IDEOGRAPH:'C344:49988:題
CJK UNIFIED IDEOGRAPH:'C345:49989:顎
CJK UNIFIED IDEOGRAPH:'C346:49990:顓
CJK UNIFIED IDEOGRAPH:'C347:49991:颺
CJK UNIFIED IDEOGRAPH:'C348:49992:餾
CJK UNIFIED IDEOGRAPH:'C349:49993:餿
CJK UNIFIED IDEOGRAPH:'C34A:49994:餽
CJK UNIFIED IDEOGRAPH:'C34B:49995:餮
CJK UNIFIED IDEOGRAPH:'C34C:49996:馥
CJK UNIFIED IDEOGRAPH:'C34D:49997:騎
CJK UNIFIED IDEOGRAPH:'C34E:49998:髁
CJK UNIFIED IDEOGRAPH:'C34F:49999:鬃
CJK UNIFIED IDEOGRAPH:'C350:50000:鬆
CJK UNIFIED IDEOGRAPH:'C351:50001:魏
CJK UNIFIED IDEOGRAPH:'C352:50002:魎
CJK UNIFIED IDEOGRAPH:'C353:50003:魍
CJK UNIFIED IDEOGRAPH:'C354:50004:鯊
CJK UNIFIED IDEOGRAPH:'C355:50005:鯉
CJK UNIFIED IDEOGRAPH:'C356:50006:鯽
CJK UNIFIED IDEOGRAPH:'C357:50007:鯈
CJK UNIFIED IDEOGRAPH:'C358:50008:鯀
CJK UNIFIED IDEOGRAPH:'C359:50009:鵑
CJK UNIFIED IDEOGRAPH:'C35A:50010:鵝
CJK UNIFIED IDEOGRAPH:'C35B:50011:鵠
CJK UNIFIED IDEOGRAPH:'C35C:50012:黠
CJK UNIFIED IDEOGRAPH:'C35D:50013:鼕
CJK UNIFIED IDEOGRAPH:'C35E:50014:鼬
CJK UNIFIED IDEOGRAPH:'C35F:50015:儳
CJK UNIFIED IDEOGRAPH:'C360:50016:嚥
CJK UNIFIED IDEOGRAPH:'C361:50017:壞
CJK UNIFIED IDEOGRAPH:'C362:50018:壟
CJK UNIFIED IDEOGRAPH:'C363:50019:壢
CJK UNIFIED IDEOGRAPH:'C364:50020:寵
CJK UNIFIED IDEOGRAPH:'C365:50021:龐
CJK UNIFIED IDEOGRAPH:'C366:50022:廬
CJK UNIFIED IDEOGRAPH:'C367:50023:懲
CJK UNIFIED IDEOGRAPH:'C368:50024:懷
CJK UNIFIED IDEOGRAPH:'C369:50025:懶
CJK UNIFIED IDEOGRAPH:'C36A:50026:懵
CJK UNIFIED IDEOGRAPH:'C36B:50027:攀
CJK UNIFIED IDEOGRAPH:'C36C:50028:攏
CJK UNIFIED IDEOGRAPH:'C36D:50029:曠
CJK UNIFIED IDEOGRAPH:'C36E:50030:曝
CJK UNIFIED IDEOGRAPH:'C36F:50031:櫥
CJK UNIFIED IDEOGRAPH:'C370:50032:櫝
CJK UNIFIED IDEOGRAPH:'C371:50033:櫚
CJK UNIFIED IDEOGRAPH:'C372:50034:櫓
CJK UNIFIED IDEOGRAPH:'C373:50035:瀛
CJK UNIFIED IDEOGRAPH:'C374:50036:瀟
CJK UNIFIED IDEOGRAPH:'C375:50037:瀨
CJK UNIFIED IDEOGRAPH:'C376:50038:瀚
CJK UNIFIED IDEOGRAPH:'C377:50039:瀝
CJK UNIFIED IDEOGRAPH:'C378:50040:瀕
CJK UNIFIED IDEOGRAPH:'C379:50041:瀘
CJK UNIFIED IDEOGRAPH:'C37A:50042:爆
CJK UNIFIED IDEOGRAPH:'C37B:50043:爍
CJK UNIFIED IDEOGRAPH:'C37C:50044:牘
CJK UNIFIED IDEOGRAPH:'C37D:50045:犢
CJK UNIFIED IDEOGRAPH:'C37E:50046:獸
CJK UNIFIED IDEOGRAPH:'C3A1:50081:獺
CJK UNIFIED IDEOGRAPH:'C3A2:50082:璽
CJK UNIFIED IDEOGRAPH:'C3A3:50083:瓊
CJK UNIFIED IDEOGRAPH:'C3A4:50084:瓣
CJK UNIFIED IDEOGRAPH:'C3A5:50085:疇
CJK UNIFIED IDEOGRAPH:'C3A6:50086:疆
CJK UNIFIED IDEOGRAPH:'C3A7:50087:癟
CJK UNIFIED IDEOGRAPH:'C3A8:50088:癡
CJK UNIFIED IDEOGRAPH:'C3A9:50089:矇
CJK UNIFIED IDEOGRAPH:'C3AA:50090:礙
CJK UNIFIED IDEOGRAPH:'C3AB:50091:禱
CJK UNIFIED IDEOGRAPH:'C3AC:50092:穫
CJK UNIFIED IDEOGRAPH:'C3AD:50093:穩
CJK UNIFIED IDEOGRAPH:'C3AE:50094:簾
CJK UNIFIED IDEOGRAPH:'C3AF:50095:簿
CJK UNIFIED IDEOGRAPH:'C3B0:50096:簸
CJK UNIFIED IDEOGRAPH:'C3B1:50097:簽
CJK UNIFIED IDEOGRAPH:'C3B2:50098:簷
CJK UNIFIED IDEOGRAPH:'C3B3:50099:籀
CJK UNIFIED IDEOGRAPH:'C3B4:50100:繫
CJK UNIFIED IDEOGRAPH:'C3B5:50101:繭
CJK UNIFIED IDEOGRAPH:'C3B6:50102:繹
CJK UNIFIED IDEOGRAPH:'C3B7:50103:繩
CJK UNIFIED IDEOGRAPH:'C3B8:50104:繪
CJK UNIFIED IDEOGRAPH:'C3B9:50105:羅
CJK UNIFIED IDEOGRAPH:'C3BA:50106:繳
CJK UNIFIED IDEOGRAPH:'C3BB:50107:羶
CJK UNIFIED IDEOGRAPH:'C3BC:50108:羹
CJK UNIFIED IDEOGRAPH:'C3BD:50109:羸
CJK UNIFIED IDEOGRAPH:'C3BE:50110:臘
CJK UNIFIED IDEOGRAPH:'C3BF:50111:藩
CJK UNIFIED IDEOGRAPH:'C3C0:50112:藝
CJK UNIFIED IDEOGRAPH:'C3C1:50113:藪
CJK UNIFIED IDEOGRAPH:'C3C2:50114:藕
CJK UNIFIED IDEOGRAPH:'C3C3:50115:藤
CJK UNIFIED IDEOGRAPH:'C3C4:50116:藥
CJK UNIFIED IDEOGRAPH:'C3C5:50117:藷
CJK UNIFIED IDEOGRAPH:'C3C6:50118:蟻
CJK UNIFIED IDEOGRAPH:'C3C7:50119:蠅
CJK UNIFIED IDEOGRAPH:'C3C8:50120:蠍
CJK UNIFIED IDEOGRAPH:'C3C9:50121:蟹
CJK UNIFIED IDEOGRAPH:'C3CA:50122:蟾
CJK UNIFIED IDEOGRAPH:'C3CB:50123:襠
CJK UNIFIED IDEOGRAPH:'C3CC:50124:襟
CJK UNIFIED IDEOGRAPH:'C3CD:50125:襖
CJK UNIFIED IDEOGRAPH:'C3CE:50126:襞
CJK UNIFIED IDEOGRAPH:'C3CF:50127:譁
CJK UNIFIED IDEOGRAPH:'C3D0:50128:譜
CJK UNIFIED IDEOGRAPH:'C3D1:50129:識
CJK UNIFIED IDEOGRAPH:'C3D2:50130:證
CJK UNIFIED IDEOGRAPH:'C3D3:50131:譚
CJK UNIFIED IDEOGRAPH:'C3D4:50132:譎
CJK UNIFIED IDEOGRAPH:'C3D5:50133:譏
CJK UNIFIED IDEOGRAPH:'C3D6:50134:譆
CJK UNIFIED IDEOGRAPH:'C3D7:50135:譙
CJK UNIFIED IDEOGRAPH:'C3D8:50136:贈
CJK UNIFIED IDEOGRAPH:'C3D9:50137:贊
CJK UNIFIED IDEOGRAPH:'C3DA:50138:蹼
CJK UNIFIED IDEOGRAPH:'C3DB:50139:蹲
CJK UNIFIED IDEOGRAPH:'C3DC:50140:躇
CJK UNIFIED IDEOGRAPH:'C3DD:50141:蹶
CJK UNIFIED IDEOGRAPH:'C3DE:50142:蹬
CJK UNIFIED IDEOGRAPH:'C3DF:50143:蹺
CJK UNIFIED IDEOGRAPH:'C3E0:50144:蹴
CJK UNIFIED IDEOGRAPH:'C3E1:50145:轔
CJK UNIFIED IDEOGRAPH:'C3E2:50146:轎
CJK UNIFIED IDEOGRAPH:'C3E3:50147:辭
CJK UNIFIED IDEOGRAPH:'C3E4:50148:邊
CJK UNIFIED IDEOGRAPH:'C3E5:50149:邋
CJK UNIFIED IDEOGRAPH:'C3E6:50150:醱
CJK UNIFIED IDEOGRAPH:'C3E7:50151:醮
CJK UNIFIED IDEOGRAPH:'C3E8:50152:鏡
CJK UNIFIED IDEOGRAPH:'C3E9:50153:鏑
CJK UNIFIED IDEOGRAPH:'C3EA:50154:鏟
CJK UNIFIED IDEOGRAPH:'C3EB:50155:鏃
CJK UNIFIED IDEOGRAPH:'C3EC:50156:鏈
CJK UNIFIED IDEOGRAPH:'C3ED:50157:鏜
CJK UNIFIED IDEOGRAPH:'C3EE:50158:鏝
CJK UNIFIED IDEOGRAPH:'C3EF:50159:鏖
CJK UNIFIED IDEOGRAPH:'C3F0:50160:鏢
CJK UNIFIED IDEOGRAPH:'C3F1:50161:鏍
CJK UNIFIED IDEOGRAPH:'C3F2:50162:鏘
CJK UNIFIED IDEOGRAPH:'C3F3:50163:鏤
CJK UNIFIED IDEOGRAPH:'C3F4:50164:鏗
CJK UNIFIED IDEOGRAPH:'C3F5:50165:鏨
CJK UNIFIED IDEOGRAPH:'C3F6:50166:關
CJK UNIFIED IDEOGRAPH:'C3F7:50167:隴
CJK UNIFIED IDEOGRAPH:'C3F8:50168:難
CJK UNIFIED IDEOGRAPH:'C3F9:50169:霪
CJK UNIFIED IDEOGRAPH:'C3FA:50170:霧
CJK UNIFIED IDEOGRAPH:'C3FB:50171:靡
CJK UNIFIED IDEOGRAPH:'C3FC:50172:韜
CJK UNIFIED IDEOGRAPH:'C3FD:50173:韻
CJK UNIFIED IDEOGRAPH:'C3FE:50174:類
CJK UNIFIED IDEOGRAPH:'C440:50240:願
CJK UNIFIED IDEOGRAPH:'C441:50241:顛
CJK UNIFIED IDEOGRAPH:'C442:50242:颼
CJK UNIFIED IDEOGRAPH:'C443:50243:饅
CJK UNIFIED IDEOGRAPH:'C444:50244:饉
CJK UNIFIED IDEOGRAPH:'C445:50245:騖
CJK UNIFIED IDEOGRAPH:'C446:50246:騙
CJK UNIFIED IDEOGRAPH:'C447:50247:鬍
CJK UNIFIED IDEOGRAPH:'C448:50248:鯨
CJK UNIFIED IDEOGRAPH:'C449:50249:鯧
CJK UNIFIED IDEOGRAPH:'C44A:50250:鯖
CJK UNIFIED IDEOGRAPH:'C44B:50251:鯛
CJK UNIFIED IDEOGRAPH:'C44C:50252:鶉
CJK UNIFIED IDEOGRAPH:'C44D:50253:鵡
CJK UNIFIED IDEOGRAPH:'C44E:50254:鵲
CJK UNIFIED IDEOGRAPH:'C44F:50255:鵪
CJK UNIFIED IDEOGRAPH:'C450:50256:鵬
CJK UNIFIED IDEOGRAPH:'C451:50257:麒
CJK UNIFIED IDEOGRAPH:'C452:50258:麗
CJK UNIFIED IDEOGRAPH:'C453:50259:麓
CJK UNIFIED IDEOGRAPH:'C454:50260:麴
CJK UNIFIED IDEOGRAPH:'C455:50261:勸
CJK UNIFIED IDEOGRAPH:'C456:50262:嚨
CJK UNIFIED IDEOGRAPH:'C457:50263:嚷
CJK UNIFIED IDEOGRAPH:'C458:50264:嚶
CJK UNIFIED IDEOGRAPH:'C459:50265:嚴
CJK UNIFIED IDEOGRAPH:'C45A:50266:嚼
CJK UNIFIED IDEOGRAPH:'C45B:50267:壤
CJK UNIFIED IDEOGRAPH:'C45C:50268:孀
CJK UNIFIED IDEOGRAPH:'C45D:50269:孃
CJK UNIFIED IDEOGRAPH:'C45E:50270:孽
CJK UNIFIED IDEOGRAPH:'C45F:50271:寶
CJK UNIFIED IDEOGRAPH:'C460:50272:巉
CJK UNIFIED IDEOGRAPH:'C461:50273:懸
CJK UNIFIED IDEOGRAPH:'C462:50274:懺
CJK UNIFIED IDEOGRAPH:'C463:50275:攘
CJK UNIFIED IDEOGRAPH:'C464:50276:攔
CJK UNIFIED IDEOGRAPH:'C465:50277:攙
CJK UNIFIED IDEOGRAPH:'C466:50278:曦
CJK UNIFIED IDEOGRAPH:'C467:50279:朧
CJK UNIFIED IDEOGRAPH:'C468:50280:櫬
CJK UNIFIED IDEOGRAPH:'C469:50281:瀾
CJK UNIFIED IDEOGRAPH:'C46A:50282:瀰
CJK UNIFIED IDEOGRAPH:'C46B:50283:瀲
CJK UNIFIED IDEOGRAPH:'C46C:50284:爐
CJK UNIFIED IDEOGRAPH:'C46D:50285:獻
CJK UNIFIED IDEOGRAPH:'C46E:50286:瓏
CJK UNIFIED IDEOGRAPH:'C46F:50287:癢
CJK UNIFIED IDEOGRAPH:'C470:50288:癥
CJK UNIFIED IDEOGRAPH:'C471:50289:礦
CJK UNIFIED IDEOGRAPH:'C472:50290:礪
CJK UNIFIED IDEOGRAPH:'C473:50291:礬
CJK UNIFIED IDEOGRAPH:'C474:50292:礫
CJK UNIFIED IDEOGRAPH:'C475:50293:竇
CJK UNIFIED IDEOGRAPH:'C476:50294:競
CJK UNIFIED IDEOGRAPH:'C477:50295:籌
CJK UNIFIED IDEOGRAPH:'C478:50296:籃
CJK UNIFIED IDEOGRAPH:'C479:50297:籍
CJK UNIFIED IDEOGRAPH:'C47A:50298:糯
CJK UNIFIED IDEOGRAPH:'C47B:50299:糰
CJK UNIFIED IDEOGRAPH:'C47C:50300:辮
CJK UNIFIED IDEOGRAPH:'C47D:50301:繽
CJK UNIFIED IDEOGRAPH:'C47E:50302:繼
CJK UNIFIED IDEOGRAPH:'C4A1:50337:纂
CJK UNIFIED IDEOGRAPH:'C4A2:50338:罌
CJK UNIFIED IDEOGRAPH:'C4A3:50339:耀
CJK UNIFIED IDEOGRAPH:'C4A4:50340:臚
CJK UNIFIED IDEOGRAPH:'C4A5:50341:艦
CJK UNIFIED IDEOGRAPH:'C4A6:50342:藻
CJK UNIFIED IDEOGRAPH:'C4A7:50343:藹
CJK UNIFIED IDEOGRAPH:'C4A8:50344:蘑
CJK UNIFIED IDEOGRAPH:'C4A9:50345:藺
CJK UNIFIED IDEOGRAPH:'C4AA:50346:蘆
CJK UNIFIED IDEOGRAPH:'C4AB:50347:蘋
CJK UNIFIED IDEOGRAPH:'C4AC:50348:蘇
CJK UNIFIED IDEOGRAPH:'C4AD:50349:蘊
CJK UNIFIED IDEOGRAPH:'C4AE:50350:蠔
CJK UNIFIED IDEOGRAPH:'C4AF:50351:蠕
CJK UNIFIED IDEOGRAPH:'C4B0:50352:襤
CJK UNIFIED IDEOGRAPH:'C4B1:50353:覺
CJK UNIFIED IDEOGRAPH:'C4B2:50354:觸
CJK UNIFIED IDEOGRAPH:'C4B3:50355:議
CJK UNIFIED IDEOGRAPH:'C4B4:50356:譬
CJK UNIFIED IDEOGRAPH:'C4B5:50357:警
CJK UNIFIED IDEOGRAPH:'C4B6:50358:譯
CJK UNIFIED IDEOGRAPH:'C4B7:50359:譟
CJK UNIFIED IDEOGRAPH:'C4B8:50360:譫
CJK UNIFIED IDEOGRAPH:'C4B9:50361:贏
CJK UNIFIED IDEOGRAPH:'C4BA:50362:贍
CJK UNIFIED IDEOGRAPH:'C4BB:50363:躉
CJK UNIFIED IDEOGRAPH:'C4BC:50364:躁
CJK UNIFIED IDEOGRAPH:'C4BD:50365:躅
CJK UNIFIED IDEOGRAPH:'C4BE:50366:躂
CJK UNIFIED IDEOGRAPH:'C4BF:50367:醴
CJK UNIFIED IDEOGRAPH:'C4C0:50368:釋
CJK UNIFIED IDEOGRAPH:'C4C1:50369:鐘
CJK UNIFIED IDEOGRAPH:'C4C2:50370:鐃
CJK UNIFIED IDEOGRAPH:'C4C3:50371:鏽
CJK UNIFIED IDEOGRAPH:'C4C4:50372:闡
CJK UNIFIED IDEOGRAPH:'C4C5:50373:霰
CJK UNIFIED IDEOGRAPH:'C4C6:50374:飄
CJK UNIFIED IDEOGRAPH:'C4C7:50375:饒
CJK UNIFIED IDEOGRAPH:'C4C8:50376:饑
CJK UNIFIED IDEOGRAPH:'C4C9:50377:馨
CJK UNIFIED IDEOGRAPH:'C4CA:50378:騫
CJK UNIFIED IDEOGRAPH:'C4CB:50379:騰
CJK UNIFIED IDEOGRAPH:'C4CC:50380:騷
CJK UNIFIED IDEOGRAPH:'C4CD:50381:騵
CJK UNIFIED IDEOGRAPH:'C4CE:50382:鰓
CJK UNIFIED IDEOGRAPH:'C4CF:50383:鰍
CJK UNIFIED IDEOGRAPH:'C4D0:50384:鹹
CJK UNIFIED IDEOGRAPH:'C4D1:50385:麵
CJK UNIFIED IDEOGRAPH:'C4D2:50386:黨
CJK UNIFIED IDEOGRAPH:'C4D3:50387:鼯
CJK UNIFIED IDEOGRAPH:'C4D4:50388:齟
CJK UNIFIED IDEOGRAPH:'C4D5:50389:齣
CJK UNIFIED IDEOGRAPH:'C4D6:50390:齡
CJK UNIFIED IDEOGRAPH:'C4D7:50391:儷
CJK UNIFIED IDEOGRAPH:'C4D8:50392:儸
CJK UNIFIED IDEOGRAPH:'C4D9:50393:囁
CJK UNIFIED IDEOGRAPH:'C4DA:50394:囀
CJK UNIFIED IDEOGRAPH:'C4DB:50395:囂
CJK UNIFIED IDEOGRAPH:'C4DC:50396:夔
CJK UNIFIED IDEOGRAPH:'C4DD:50397:屬
CJK UNIFIED IDEOGRAPH:'C4DE:50398:巍
CJK UNIFIED IDEOGRAPH:'C4DF:50399:懼
CJK UNIFIED IDEOGRAPH:'C4E0:50400:懾
CJK UNIFIED IDEOGRAPH:'C4E1:50401:攝
CJK UNIFIED IDEOGRAPH:'C4E2:50402:攜
CJK UNIFIED IDEOGRAPH:'C4E3:50403:斕
CJK UNIFIED IDEOGRAPH:'C4E4:50404:曩
CJK UNIFIED IDEOGRAPH:'C4E5:50405:櫻
CJK UNIFIED IDEOGRAPH:'C4E6:50406:欄
CJK UNIFIED IDEOGRAPH:'C4E7:50407:櫺
CJK UNIFIED IDEOGRAPH:'C4E8:50408:殲
CJK UNIFIED IDEOGRAPH:'C4E9:50409:灌
CJK UNIFIED IDEOGRAPH:'C4EA:50410:爛
CJK UNIFIED IDEOGRAPH:'C4EB:50411:犧
CJK UNIFIED IDEOGRAPH:'C4EC:50412:瓖
CJK UNIFIED IDEOGRAPH:'C4ED:50413:瓔
CJK UNIFIED IDEOGRAPH:'C4EE:50414:癩
CJK UNIFIED IDEOGRAPH:'C4EF:50415:矓
CJK UNIFIED IDEOGRAPH:'C4F0:50416:籐
CJK UNIFIED IDEOGRAPH:'C4F1:50417:纏
CJK UNIFIED IDEOGRAPH:'C4F2:50418:續
CJK UNIFIED IDEOGRAPH:'C4F3:50419:羼
CJK UNIFIED IDEOGRAPH:'C4F4:50420:蘗
CJK UNIFIED IDEOGRAPH:'C4F5:50421:蘭
CJK UNIFIED IDEOGRAPH:'C4F6:50422:蘚
CJK UNIFIED IDEOGRAPH:'C4F7:50423:蠣
CJK UNIFIED IDEOGRAPH:'C4F8:50424:蠢
CJK UNIFIED IDEOGRAPH:'C4F9:50425:蠡
CJK UNIFIED IDEOGRAPH:'C4FA:50426:蠟
CJK UNIFIED IDEOGRAPH:'C4FB:50427:襪
CJK UNIFIED IDEOGRAPH:'C4FC:50428:襬
CJK UNIFIED IDEOGRAPH:'C4FD:50429:覽
CJK UNIFIED IDEOGRAPH:'C4FE:50430:譴
CJK UNIFIED IDEOGRAPH:'C540:50496:護
CJK UNIFIED IDEOGRAPH:'C541:50497:譽
CJK UNIFIED IDEOGRAPH:'C542:50498:贓
CJK UNIFIED IDEOGRAPH:'C543:50499:躊
CJK UNIFIED IDEOGRAPH:'C544:50500:躍
CJK UNIFIED IDEOGRAPH:'C545:50501:躋
CJK UNIFIED IDEOGRAPH:'C546:50502:轟
CJK UNIFIED IDEOGRAPH:'C547:50503:辯
CJK UNIFIED IDEOGRAPH:'C548:50504:醺
CJK UNIFIED IDEOGRAPH:'C549:50505:鐮
CJK UNIFIED IDEOGRAPH:'C54A:50506:鐳
CJK UNIFIED IDEOGRAPH:'C54B:50507:鐵
CJK UNIFIED IDEOGRAPH:'C54C:50508:鐺
CJK UNIFIED IDEOGRAPH:'C54D:50509:鐸
CJK UNIFIED IDEOGRAPH:'C54E:50510:鐲
CJK UNIFIED IDEOGRAPH:'C54F:50511:鐫
CJK UNIFIED IDEOGRAPH:'C550:50512:闢
CJK UNIFIED IDEOGRAPH:'C551:50513:霸
CJK UNIFIED IDEOGRAPH:'C552:50514:霹
CJK UNIFIED IDEOGRAPH:'C553:50515:露
CJK UNIFIED IDEOGRAPH:'C554:50516:響
CJK UNIFIED IDEOGRAPH:'C555:50517:顧
CJK UNIFIED IDEOGRAPH:'C556:50518:顥
CJK UNIFIED IDEOGRAPH:'C557:50519:饗
CJK UNIFIED IDEOGRAPH:'C558:50520:驅
CJK UNIFIED IDEOGRAPH:'C559:50521:驃
CJK UNIFIED IDEOGRAPH:'C55A:50522:驀
CJK UNIFIED IDEOGRAPH:'C55B:50523:騾
CJK UNIFIED IDEOGRAPH:'C55C:50524:髏
CJK UNIFIED IDEOGRAPH:'C55D:50525:魔
CJK UNIFIED IDEOGRAPH:'C55E:50526:魑
CJK UNIFIED IDEOGRAPH:'C55F:50527:鰭
CJK UNIFIED IDEOGRAPH:'C560:50528:鰥
CJK UNIFIED IDEOGRAPH:'C561:50529:鶯
CJK UNIFIED IDEOGRAPH:'C562:50530:鶴
CJK UNIFIED IDEOGRAPH:'C563:50531:鷂
CJK UNIFIED IDEOGRAPH:'C564:50532:鶸
CJK UNIFIED IDEOGRAPH:'C565:50533:麝
CJK UNIFIED IDEOGRAPH:'C566:50534:黯
CJK UNIFIED IDEOGRAPH:'C567:50535:鼙
CJK UNIFIED IDEOGRAPH:'C568:50536:齜
CJK UNIFIED IDEOGRAPH:'C569:50537:齦
CJK UNIFIED IDEOGRAPH:'C56A:50538:齧
CJK UNIFIED IDEOGRAPH:'C56B:50539:儼
CJK UNIFIED IDEOGRAPH:'C56C:50540:儻
CJK UNIFIED IDEOGRAPH:'C56D:50541:囈
CJK UNIFIED IDEOGRAPH:'C56E:50542:囊
CJK UNIFIED IDEOGRAPH:'C56F:50543:囉
CJK UNIFIED IDEOGRAPH:'C570:50544:孿
CJK UNIFIED IDEOGRAPH:'C571:50545:巔
CJK UNIFIED IDEOGRAPH:'C572:50546:巒
CJK UNIFIED IDEOGRAPH:'C573:50547:彎
CJK UNIFIED IDEOGRAPH:'C574:50548:懿
CJK UNIFIED IDEOGRAPH:'C575:50549:攤
CJK UNIFIED IDEOGRAPH:'C576:50550:權
CJK UNIFIED IDEOGRAPH:'C577:50551:歡
CJK UNIFIED IDEOGRAPH:'C578:50552:灑
CJK UNIFIED IDEOGRAPH:'C579:50553:灘
CJK UNIFIED IDEOGRAPH:'C57A:50554:玀
CJK UNIFIED IDEOGRAPH:'C57B:50555:瓤
CJK UNIFIED IDEOGRAPH:'C57C:50556:疊
CJK UNIFIED IDEOGRAPH:'C57D:50557:癮
CJK UNIFIED IDEOGRAPH:'C57E:50558:癬
CJK UNIFIED IDEOGRAPH:'C5A1:50593:禳
CJK UNIFIED IDEOGRAPH:'C5A2:50594:籠
CJK UNIFIED IDEOGRAPH:'C5A3:50595:籟
CJK UNIFIED IDEOGRAPH:'C5A4:50596:聾
CJK UNIFIED IDEOGRAPH:'C5A5:50597:聽
CJK UNIFIED IDEOGRAPH:'C5A6:50598:臟
CJK UNIFIED IDEOGRAPH:'C5A7:50599:襲
CJK UNIFIED IDEOGRAPH:'C5A8:50600:襯
CJK UNIFIED IDEOGRAPH:'C5A9:50601:觼
CJK UNIFIED IDEOGRAPH:'C5AA:50602:讀
CJK UNIFIED IDEOGRAPH:'C5AB:50603:贖
CJK UNIFIED IDEOGRAPH:'C5AC:50604:贗
CJK UNIFIED IDEOGRAPH:'C5AD:50605:躑
CJK UNIFIED IDEOGRAPH:'C5AE:50606:躓
CJK UNIFIED IDEOGRAPH:'C5AF:50607:轡
CJK UNIFIED IDEOGRAPH:'C5B0:50608:酈
CJK UNIFIED IDEOGRAPH:'C5B1:50609:鑄
CJK UNIFIED IDEOGRAPH:'C5B2:50610:鑑
CJK UNIFIED IDEOGRAPH:'C5B3:50611:鑒
CJK UNIFIED IDEOGRAPH:'C5B4:50612:霽
CJK UNIFIED IDEOGRAPH:'C5B5:50613:霾
CJK UNIFIED IDEOGRAPH:'C5B6:50614:韃
CJK UNIFIED IDEOGRAPH:'C5B7:50615:韁
CJK UNIFIED IDEOGRAPH:'C5B8:50616:顫
CJK UNIFIED IDEOGRAPH:'C5B9:50617:饕
CJK UNIFIED IDEOGRAPH:'C5BA:50618:驕
CJK UNIFIED IDEOGRAPH:'C5BB:50619:驍
CJK UNIFIED IDEOGRAPH:'C5BC:50620:髒
CJK UNIFIED IDEOGRAPH:'C5BD:50621:鬚
CJK UNIFIED IDEOGRAPH:'C5BE:50622:鱉
CJK UNIFIED IDEOGRAPH:'C5BF:50623:鰱
CJK UNIFIED IDEOGRAPH:'C5C0:50624:鰾
CJK UNIFIED IDEOGRAPH:'C5C1:50625:鰻
CJK UNIFIED IDEOGRAPH:'C5C2:50626:鷓
CJK UNIFIED IDEOGRAPH:'C5C3:50627:鷗
CJK UNIFIED IDEOGRAPH:'C5C4:50628:鼴
CJK UNIFIED IDEOGRAPH:'C5C5:50629:齬
CJK UNIFIED IDEOGRAPH:'C5C6:50630:齪
CJK UNIFIED IDEOGRAPH:'C5C7:50631:龔
CJK UNIFIED IDEOGRAPH:'C5C8:50632:囌
CJK UNIFIED IDEOGRAPH:'C5C9:50633:巖
CJK UNIFIED IDEOGRAPH:'C5CA:50634:戀
CJK UNIFIED IDEOGRAPH:'C5CB:50635:攣
CJK UNIFIED IDEOGRAPH:'C5CC:50636:攫
CJK UNIFIED IDEOGRAPH:'C5CD:50637:攪
CJK UNIFIED IDEOGRAPH:'C5CE:50638:曬
CJK UNIFIED IDEOGRAPH:'C5CF:50639:欐
CJK UNIFIED IDEOGRAPH:'C5D0:50640:瓚
CJK UNIFIED IDEOGRAPH:'C5D1:50641:竊
CJK UNIFIED IDEOGRAPH:'C5D2:50642:籤
CJK UNIFIED IDEOGRAPH:'C5D3:50643:籣
CJK UNIFIED IDEOGRAPH:'C5D4:50644:籥
CJK UNIFIED IDEOGRAPH:'C5D5:50645:纓
CJK UNIFIED IDEOGRAPH:'C5D6:50646:纖
CJK UNIFIED IDEOGRAPH:'C5D7:50647:纔
CJK UNIFIED IDEOGRAPH:'C5D8:50648:臢
CJK UNIFIED IDEOGRAPH:'C5D9:50649:蘸
CJK UNIFIED IDEOGRAPH:'C5DA:50650:蘿
CJK UNIFIED IDEOGRAPH:'C5DB:50651:蠱
CJK UNIFIED IDEOGRAPH:'C5DC:50652:變
CJK UNIFIED IDEOGRAPH:'C5DD:50653:邐
CJK UNIFIED IDEOGRAPH:'C5DE:50654:邏
CJK UNIFIED IDEOGRAPH:'C5DF:50655:鑣
CJK UNIFIED IDEOGRAPH:'C5E0:50656:鑠
CJK UNIFIED IDEOGRAPH:'C5E1:50657:鑤
CJK UNIFIED IDEOGRAPH:'C5E2:50658:靨
CJK UNIFIED IDEOGRAPH:'C5E3:50659:顯
CJK UNIFIED IDEOGRAPH:'C5E4:50660:饜
CJK UNIFIED IDEOGRAPH:'C5E5:50661:驚
CJK UNIFIED IDEOGRAPH:'C5E6:50662:驛
CJK UNIFIED IDEOGRAPH:'C5E7:50663:驗
CJK UNIFIED IDEOGRAPH:'C5E8:50664:髓
CJK UNIFIED IDEOGRAPH:'C5E9:50665:體
CJK UNIFIED IDEOGRAPH:'C5EA:50666:髑
CJK UNIFIED IDEOGRAPH:'C5EB:50667:鱔
CJK UNIFIED IDEOGRAPH:'C5EC:50668:鱗
CJK UNIFIED IDEOGRAPH:'C5ED:50669:鱖
CJK UNIFIED IDEOGRAPH:'C5EE:50670:鷥
CJK UNIFIED IDEOGRAPH:'C5EF:50671:麟
CJK UNIFIED IDEOGRAPH:'C5F0:50672:黴
CJK UNIFIED IDEOGRAPH:'C5F1:50673:囑
CJK UNIFIED IDEOGRAPH:'C5F2:50674:壩
CJK UNIFIED IDEOGRAPH:'C5F3:50675:攬
CJK UNIFIED IDEOGRAPH:'C5F4:50676:灞
CJK UNIFIED IDEOGRAPH:'C5F5:50677:癱
CJK UNIFIED IDEOGRAPH:'C5F6:50678:癲
CJK UNIFIED IDEOGRAPH:'C5F7:50679:矗
CJK UNIFIED IDEOGRAPH:'C5F8:50680:罐
CJK UNIFIED IDEOGRAPH:'C5F9:50681:羈
CJK UNIFIED IDEOGRAPH:'C5FA:50682:蠶
CJK UNIFIED IDEOGRAPH:'C5FB:50683:蠹
CJK UNIFIED IDEOGRAPH:'C5FC:50684:衢
CJK UNIFIED IDEOGRAPH:'C5FD:50685:讓
CJK UNIFIED IDEOGRAPH:'C5FE:50686:讒
CJK UNIFIED IDEOGRAPH:'C640:50752:讖
CJK UNIFIED IDEOGRAPH:'C641:50753:艷
CJK UNIFIED IDEOGRAPH:'C642:50754:贛
CJK UNIFIED IDEOGRAPH:'C643:50755:釀
CJK UNIFIED IDEOGRAPH:'C644:50756:鑪
CJK UNIFIED IDEOGRAPH:'C645:50757:靂
CJK UNIFIED IDEOGRAPH:'C646:50758:靈
CJK UNIFIED IDEOGRAPH:'C647:50759:靄
CJK UNIFIED IDEOGRAPH:'C648:50760:韆
CJK UNIFIED IDEOGRAPH:'C649:50761:顰
CJK UNIFIED IDEOGRAPH:'C64A:50762:驟
CJK UNIFIED IDEOGRAPH:'C64B:50763:鬢
CJK UNIFIED IDEOGRAPH:'C64C:50764:魘
CJK UNIFIED IDEOGRAPH:'C64D:50765:鱟
CJK UNIFIED IDEOGRAPH:'C64E:50766:鷹
CJK UNIFIED IDEOGRAPH:'C64F:50767:鷺
CJK UNIFIED IDEOGRAPH:'C650:50768:鹼
CJK UNIFIED IDEOGRAPH:'C651:50769:鹽
CJK UNIFIED IDEOGRAPH:'C652:50770:鼇
CJK UNIFIED IDEOGRAPH:'C653:50771:齷
CJK UNIFIED IDEOGRAPH:'C654:50772:齲
CJK UNIFIED IDEOGRAPH:'C655:50773:廳
CJK UNIFIED IDEOGRAPH:'C656:50774:欖
CJK UNIFIED IDEOGRAPH:'C657:50775:灣
CJK UNIFIED IDEOGRAPH:'C658:50776:籬
CJK UNIFIED IDEOGRAPH:'C659:50777:籮
CJK UNIFIED IDEOGRAPH:'C65A:50778:蠻
CJK UNIFIED IDEOGRAPH:'C65B:50779:觀
CJK UNIFIED IDEOGRAPH:'C65C:50780:躡
CJK UNIFIED IDEOGRAPH:'C65D:50781:釁
CJK UNIFIED IDEOGRAPH:'C65E:50782:鑲
CJK UNIFIED IDEOGRAPH:'C65F:50783:鑰
CJK UNIFIED IDEOGRAPH:'C660:50784:顱
CJK UNIFIED IDEOGRAPH:'C661:50785:饞
CJK UNIFIED IDEOGRAPH:'C662:50786:髖
CJK UNIFIED IDEOGRAPH:'C663:50787:鬣
CJK UNIFIED IDEOGRAPH:'C664:50788:黌
CJK UNIFIED IDEOGRAPH:'C665:50789:灤
CJK UNIFIED IDEOGRAPH:'C666:50790:矚
CJK UNIFIED IDEOGRAPH:'C667:50791:讚
CJK UNIFIED IDEOGRAPH:'C668:50792:鑷
CJK UNIFIED IDEOGRAPH:'C669:50793:韉
CJK UNIFIED IDEOGRAPH:'C66A:50794:驢
CJK UNIFIED IDEOGRAPH:'C66B:50795:驥
CJK UNIFIED IDEOGRAPH:'C66C:50796:纜
CJK UNIFIED IDEOGRAPH:'C66D:50797:讜
CJK UNIFIED IDEOGRAPH:'C66E:50798:躪
CJK UNIFIED IDEOGRAPH:'C66F:50799:釅
CJK UNIFIED IDEOGRAPH:'C670:50800:鑽
CJK UNIFIED IDEOGRAPH:'C671:50801:鑾
CJK UNIFIED IDEOGRAPH:'C672:50802:鑼
CJK UNIFIED IDEOGRAPH:'C673:50803:鱷
CJK UNIFIED IDEOGRAPH:'C674:50804:鱸
CJK UNIFIED IDEOGRAPH:'C675:50805:黷
CJK UNIFIED IDEOGRAPH:'C676:50806:豔
CJK UNIFIED IDEOGRAPH:'C677:50807:鑿
CJK UNIFIED IDEOGRAPH:'C678:50808:鸚
CJK UNIFIED IDEOGRAPH:'C679:50809:爨
CJK UNIFIED IDEOGRAPH:'C67A:50810:驪
CJK UNIFIED IDEOGRAPH:'C67B:50811:鬱
CJK UNIFIED IDEOGRAPH:'C67C:50812:鸛
CJK UNIFIED IDEOGRAPH:'C67D:50813:鸞
CJK UNIFIED IDEOGRAPH:'C67E:50814:籲
CJK UNIFIED IDEOGRAPH:'C940:51520:乂
CJK UNIFIED IDEOGRAPH:'C941:51521:乜
CJK UNIFIED IDEOGRAPH:'C942:51522:凵
CJK UNIFIED IDEOGRAPH:'C943:51523:匚
CJK UNIFIED IDEOGRAPH:'C944:51524:厂
CJK UNIFIED IDEOGRAPH:'C945:51525:万
CJK UNIFIED IDEOGRAPH:'C946:51526:丌
CJK UNIFIED IDEOGRAPH:'C947:51527:乇
CJK UNIFIED IDEOGRAPH:'C948:51528:亍
CJK UNIFIED IDEOGRAPH:'C949:51529:囗
CJK COMPATIBILITY IDEOGRAPH:'C94A:51530:兀
CJK UNIFIED IDEOGRAPH:'C94B:51531:屮
CJK UNIFIED IDEOGRAPH:'C94C:51532:彳
CJK UNIFIED IDEOGRAPH:'C94D:51533:丏
CJK UNIFIED IDEOGRAPH:'C94E:51534:冇
CJK UNIFIED IDEOGRAPH:'C94F:51535:与
CJK UNIFIED IDEOGRAPH:'C950:51536:丮
CJK UNIFIED IDEOGRAPH:'C951:51537:亓
CJK UNIFIED IDEOGRAPH:'C952:51538:仂
CJK UNIFIED IDEOGRAPH:'C953:51539:仉
CJK UNIFIED IDEOGRAPH:'C954:51540:仈
CJK UNIFIED IDEOGRAPH:'C955:51541:冘
CJK UNIFIED IDEOGRAPH:'C956:51542:勼
CJK UNIFIED IDEOGRAPH:'C957:51543:卬
CJK UNIFIED IDEOGRAPH:'C958:51544:厹
CJK UNIFIED IDEOGRAPH:'C959:51545:圠
CJK UNIFIED IDEOGRAPH:'C95A:51546:夃
CJK UNIFIED IDEOGRAPH:'C95B:51547:夬
CJK UNIFIED IDEOGRAPH:'C95C:51548:尐
CJK UNIFIED IDEOGRAPH:'C95D:51549:巿
CJK UNIFIED IDEOGRAPH:'C95E:51550:旡
CJK UNIFIED IDEOGRAPH:'C95F:51551:殳
CJK UNIFIED IDEOGRAPH:'C960:51552:毌
CJK UNIFIED IDEOGRAPH:'C961:51553:气
CJK UNIFIED IDEOGRAPH:'C962:51554:爿
CJK UNIFIED IDEOGRAPH:'C963:51555:丱
CJK UNIFIED IDEOGRAPH:'C964:51556:丼
CJK UNIFIED IDEOGRAPH:'C965:51557:仨
CJK UNIFIED IDEOGRAPH:'C966:51558:仜
CJK UNIFIED IDEOGRAPH:'C967:51559:仩
CJK UNIFIED IDEOGRAPH:'C968:51560:仡
CJK UNIFIED IDEOGRAPH:'C969:51561:仝
CJK UNIFIED IDEOGRAPH:'C96A:51562:仚
CJK UNIFIED IDEOGRAPH:'C96B:51563:刌
CJK UNIFIED IDEOGRAPH:'C96C:51564:匜
CJK UNIFIED IDEOGRAPH:'C96D:51565:卌
CJK UNIFIED IDEOGRAPH:'C96E:51566:圢
CJK UNIFIED IDEOGRAPH:'C96F:51567:圣
CJK UNIFIED IDEOGRAPH:'C970:51568:夗
CJK UNIFIED IDEOGRAPH:'C971:51569:夯
CJK UNIFIED IDEOGRAPH:'C972:51570:宁
CJK UNIFIED IDEOGRAPH:'C973:51571:宄
CJK UNIFIED IDEOGRAPH:'C974:51572:尒
CJK UNIFIED IDEOGRAPH:'C975:51573:尻
CJK UNIFIED IDEOGRAPH:'C976:51574:屴
CJK UNIFIED IDEOGRAPH:'C977:51575:屳
CJK UNIFIED IDEOGRAPH:'C978:51576:帄
CJK UNIFIED IDEOGRAPH:'C979:51577:庀
CJK UNIFIED IDEOGRAPH:'C97A:51578:庂
CJK UNIFIED IDEOGRAPH:'C97B:51579:忉
CJK UNIFIED IDEOGRAPH:'C97C:51580:戉
CJK UNIFIED IDEOGRAPH:'C97D:51581:扐
CJK UNIFIED IDEOGRAPH:'C97E:51582:氕
CJK UNIFIED IDEOGRAPH:'C9A1:51617:氶
CJK UNIFIED IDEOGRAPH:'C9A2:51618:汃
CJK UNIFIED IDEOGRAPH:'C9A3:51619:氿
CJK UNIFIED IDEOGRAPH:'C9A4:51620:氻
CJK UNIFIED IDEOGRAPH:'C9A5:51621:犮
CJK UNIFIED IDEOGRAPH:'C9A6:51622:犰
CJK UNIFIED IDEOGRAPH:'C9A7:51623:玊
CJK UNIFIED IDEOGRAPH:'C9A8:51624:禸
CJK UNIFIED IDEOGRAPH:'C9A9:51625:肊
CJK UNIFIED IDEOGRAPH:'C9AA:51626:阞
CJK UNIFIED IDEOGRAPH:'C9AB:51627:伎
CJK UNIFIED IDEOGRAPH:'C9AC:51628:优
CJK UNIFIED IDEOGRAPH:'C9AD:51629:伬
CJK UNIFIED IDEOGRAPH:'C9AE:51630:仵
CJK UNIFIED IDEOGRAPH:'C9AF:51631:伔
CJK UNIFIED IDEOGRAPH:'C9B0:51632:仱
CJK UNIFIED IDEOGRAPH:'C9B1:51633:伀
CJK UNIFIED IDEOGRAPH:'C9B2:51634:价
CJK UNIFIED IDEOGRAPH:'C9B3:51635:伈
CJK UNIFIED IDEOGRAPH:'C9B4:51636:伝
CJK UNIFIED IDEOGRAPH:'C9B5:51637:伂
CJK UNIFIED IDEOGRAPH:'C9B6:51638:伅
CJK UNIFIED IDEOGRAPH:'C9B7:51639:伢
CJK UNIFIED IDEOGRAPH:'C9B8:51640:伓
CJK UNIFIED IDEOGRAPH:'C9B9:51641:伄
CJK UNIFIED IDEOGRAPH:'C9BA:51642:仴
CJK UNIFIED IDEOGRAPH:'C9BB:51643:伒
CJK UNIFIED IDEOGRAPH:'C9BC:51644:冱
CJK UNIFIED IDEOGRAPH:'C9BD:51645:刓
CJK UNIFIED IDEOGRAPH:'C9BE:51646:刉
CJK UNIFIED IDEOGRAPH:'C9BF:51647:刐
CJK UNIFIED IDEOGRAPH:'C9C0:51648:劦
CJK UNIFIED IDEOGRAPH:'C9C1:51649:匢
CJK UNIFIED IDEOGRAPH:'C9C2:51650:匟
CJK UNIFIED IDEOGRAPH:'C9C3:51651:卍
CJK UNIFIED IDEOGRAPH:'C9C4:51652:厊
CJK UNIFIED IDEOGRAPH:'C9C5:51653:吇
CJK UNIFIED IDEOGRAPH:'C9C6:51654:囡
CJK UNIFIED IDEOGRAPH:'C9C7:51655:囟
CJK UNIFIED IDEOGRAPH:'C9C8:51656:圮
CJK UNIFIED IDEOGRAPH:'C9C9:51657:圪
CJK UNIFIED IDEOGRAPH:'C9CA:51658:圴
CJK UNIFIED IDEOGRAPH:'C9CB:51659:夼
CJK UNIFIED IDEOGRAPH:'C9CC:51660:妀
CJK UNIFIED IDEOGRAPH:'C9CD:51661:奼
CJK UNIFIED IDEOGRAPH:'C9CE:51662:妅
CJK UNIFIED IDEOGRAPH:'C9CF:51663:奻
CJK UNIFIED IDEOGRAPH:'C9D0:51664:奾
CJK UNIFIED IDEOGRAPH:'C9D1:51665:奷
CJK UNIFIED IDEOGRAPH:'C9D2:51666:奿
CJK UNIFIED IDEOGRAPH:'C9D3:51667:孖
CJK UNIFIED IDEOGRAPH:'C9D4:51668:尕
CJK UNIFIED IDEOGRAPH:'C9D5:51669:尥
CJK UNIFIED IDEOGRAPH:'C9D6:51670:屼
CJK UNIFIED IDEOGRAPH:'C9D7:51671:屺
CJK UNIFIED IDEOGRAPH:'C9D8:51672:屻
CJK UNIFIED IDEOGRAPH:'C9D9:51673:屾
CJK UNIFIED IDEOGRAPH:'C9DA:51674:巟
CJK UNIFIED IDEOGRAPH:'C9DB:51675:幵
CJK UNIFIED IDEOGRAPH:'C9DC:51676:庄
CJK UNIFIED IDEOGRAPH:'C9DD:51677:异
CJK UNIFIED IDEOGRAPH:'C9DE:51678:弚
CJK UNIFIED IDEOGRAPH:'C9DF:51679:彴
CJK UNIFIED IDEOGRAPH:'C9E0:51680:忕
CJK UNIFIED IDEOGRAPH:'C9E1:51681:忔
CJK UNIFIED IDEOGRAPH:'C9E2:51682:忏
CJK UNIFIED IDEOGRAPH:'C9E3:51683:扜
CJK UNIFIED IDEOGRAPH:'C9E4:51684:扞
CJK UNIFIED IDEOGRAPH:'C9E5:51685:扤
CJK UNIFIED IDEOGRAPH:'C9E6:51686:扡
CJK UNIFIED IDEOGRAPH:'C9E7:51687:扦
CJK UNIFIED IDEOGRAPH:'C9E8:51688:扢
CJK UNIFIED IDEOGRAPH:'C9E9:51689:扙
CJK UNIFIED IDEOGRAPH:'C9EA:51690:扠
CJK UNIFIED IDEOGRAPH:'C9EB:51691:扚
CJK UNIFIED IDEOGRAPH:'C9EC:51692:扥
CJK UNIFIED IDEOGRAPH:'C9ED:51693:旯
CJK UNIFIED IDEOGRAPH:'C9EE:51694:旮
CJK UNIFIED IDEOGRAPH:'C9EF:51695:朾
CJK UNIFIED IDEOGRAPH:'C9F0:51696:朹
CJK UNIFIED IDEOGRAPH:'C9F1:51697:朸
CJK UNIFIED IDEOGRAPH:'C9F2:51698:朻
CJK UNIFIED IDEOGRAPH:'C9F3:51699:机
CJK UNIFIED IDEOGRAPH:'C9F4:51700:朿
CJK UNIFIED IDEOGRAPH:'C9F5:51701:朼
CJK UNIFIED IDEOGRAPH:'C9F6:51702:朳
CJK UNIFIED IDEOGRAPH:'C9F7:51703:氘
CJK UNIFIED IDEOGRAPH:'C9F8:51704:汆
CJK UNIFIED IDEOGRAPH:'C9F9:51705:汒
CJK UNIFIED IDEOGRAPH:'C9FA:51706:汜
CJK UNIFIED IDEOGRAPH:'C9FB:51707:汏
CJK UNIFIED IDEOGRAPH:'C9FC:51708:汊
CJK UNIFIED IDEOGRAPH:'C9FD:51709:汔
CJK UNIFIED IDEOGRAPH:'C9FE:51710:汋
CJK UNIFIED IDEOGRAPH:'CA40:51776:汌
CJK UNIFIED IDEOGRAPH:'CA41:51777:灱
CJK UNIFIED IDEOGRAPH:'CA42:51778:牞
CJK UNIFIED IDEOGRAPH:'CA43:51779:犴
CJK UNIFIED IDEOGRAPH:'CA44:51780:犵
CJK UNIFIED IDEOGRAPH:'CA45:51781:玎
CJK UNIFIED IDEOGRAPH:'CA46:51782:甪
CJK UNIFIED IDEOGRAPH:'CA47:51783:癿
CJK UNIFIED IDEOGRAPH:'CA48:51784:穵
CJK UNIFIED IDEOGRAPH:'CA49:51785:网
CJK UNIFIED IDEOGRAPH:'CA4A:51786:艸
CJK UNIFIED IDEOGRAPH:'CA4B:51787:艼
CJK UNIFIED IDEOGRAPH:'CA4C:51788:芀
CJK UNIFIED IDEOGRAPH:'CA4D:51789:艽
CJK UNIFIED IDEOGRAPH:'CA4E:51790:艿
CJK UNIFIED IDEOGRAPH:'CA4F:51791:虍
CJK UNIFIED IDEOGRAPH:'CA50:51792:襾
CJK UNIFIED IDEOGRAPH:'CA51:51793:邙
CJK UNIFIED IDEOGRAPH:'CA52:51794:邗
CJK UNIFIED IDEOGRAPH:'CA53:51795:邘
CJK UNIFIED IDEOGRAPH:'CA54:51796:邛
CJK UNIFIED IDEOGRAPH:'CA55:51797:邔
CJK UNIFIED IDEOGRAPH:'CA56:51798:阢
CJK UNIFIED IDEOGRAPH:'CA57:51799:阤
CJK UNIFIED IDEOGRAPH:'CA58:51800:阠
CJK UNIFIED IDEOGRAPH:'CA59:51801:阣
CJK UNIFIED IDEOGRAPH:'CA5A:51802:佖
CJK UNIFIED IDEOGRAPH:'CA5B:51803:伻
CJK UNIFIED IDEOGRAPH:'CA5C:51804:佢
CJK UNIFIED IDEOGRAPH:'CA5D:51805:佉
CJK UNIFIED IDEOGRAPH:'CA5E:51806:体
CJK UNIFIED IDEOGRAPH:'CA5F:51807:佤
CJK UNIFIED IDEOGRAPH:'CA60:51808:伾
CJK UNIFIED IDEOGRAPH:'CA61:51809:佧
CJK UNIFIED IDEOGRAPH:'CA62:51810:佒
CJK UNIFIED IDEOGRAPH:'CA63:51811:佟
CJK UNIFIED IDEOGRAPH:'CA64:51812:佁
CJK UNIFIED IDEOGRAPH:'CA65:51813:佘
CJK UNIFIED IDEOGRAPH:'CA66:51814:伭
CJK UNIFIED IDEOGRAPH:'CA67:51815:伳
CJK UNIFIED IDEOGRAPH:'CA68:51816:伿
CJK UNIFIED IDEOGRAPH:'CA69:51817:佡
CJK UNIFIED IDEOGRAPH:'CA6A:51818:冏
CJK UNIFIED IDEOGRAPH:'CA6B:51819:冹
CJK UNIFIED IDEOGRAPH:'CA6C:51820:刜
CJK UNIFIED IDEOGRAPH:'CA6D:51821:刞
CJK UNIFIED IDEOGRAPH:'CA6E:51822:刡
CJK UNIFIED IDEOGRAPH:'CA6F:51823:劭
CJK UNIFIED IDEOGRAPH:'CA70:51824:劮
CJK UNIFIED IDEOGRAPH:'CA71:51825:匉
CJK UNIFIED IDEOGRAPH:'CA72:51826:卣
CJK UNIFIED IDEOGRAPH:'CA73:51827:卲
CJK UNIFIED IDEOGRAPH:'CA74:51828:厎
CJK UNIFIED IDEOGRAPH:'CA75:51829:厏
CJK UNIFIED IDEOGRAPH:'CA76:51830:吰
CJK UNIFIED IDEOGRAPH:'CA77:51831:吷
CJK UNIFIED IDEOGRAPH:'CA78:51832:吪
CJK UNIFIED IDEOGRAPH:'CA79:51833:呔
CJK UNIFIED IDEOGRAPH:'CA7A:51834:呅
CJK UNIFIED IDEOGRAPH:'CA7B:51835:吙
CJK UNIFIED IDEOGRAPH:'CA7C:51836:吜
CJK UNIFIED IDEOGRAPH:'CA7D:51837:吥
CJK UNIFIED IDEOGRAPH:'CA7E:51838:吘
CJK UNIFIED IDEOGRAPH:'CAA1:51873:吽
CJK UNIFIED IDEOGRAPH:'CAA2:51874:呏
CJK UNIFIED IDEOGRAPH:'CAA3:51875:呁
CJK UNIFIED IDEOGRAPH:'CAA4:51876:吨
CJK UNIFIED IDEOGRAPH:'CAA5:51877:吤
CJK UNIFIED IDEOGRAPH:'CAA6:51878:呇
CJK UNIFIED IDEOGRAPH:'CAA7:51879:囮
CJK UNIFIED IDEOGRAPH:'CAA8:51880:囧
CJK UNIFIED IDEOGRAPH:'CAA9:51881:囥
CJK UNIFIED IDEOGRAPH:'CAAA:51882:坁
CJK UNIFIED IDEOGRAPH:'CAAB:51883:坅
CJK UNIFIED IDEOGRAPH:'CAAC:51884:坌
CJK UNIFIED IDEOGRAPH:'CAAD:51885:坉
CJK UNIFIED IDEOGRAPH:'CAAE:51886:坋
CJK UNIFIED IDEOGRAPH:'CAAF:51887:坒
CJK UNIFIED IDEOGRAPH:'CAB0:51888:夆
CJK UNIFIED IDEOGRAPH:'CAB1:51889:奀
CJK UNIFIED IDEOGRAPH:'CAB2:51890:妦
CJK UNIFIED IDEOGRAPH:'CAB3:51891:妘
CJK UNIFIED IDEOGRAPH:'CAB4:51892:妠
CJK UNIFIED IDEOGRAPH:'CAB5:51893:妗
CJK UNIFIED IDEOGRAPH:'CAB6:51894:妎
CJK UNIFIED IDEOGRAPH:'CAB7:51895:妢
CJK UNIFIED IDEOGRAPH:'CAB8:51896:妐
CJK UNIFIED IDEOGRAPH:'CAB9:51897:妏
CJK UNIFIED IDEOGRAPH:'CABA:51898:妧
CJK UNIFIED IDEOGRAPH:'CABB:51899:妡
CJK UNIFIED IDEOGRAPH:'CABC:51900:宎
CJK UNIFIED IDEOGRAPH:'CABD:51901:宒
CJK UNIFIED IDEOGRAPH:'CABE:51902:尨
CJK UNIFIED IDEOGRAPH:'CABF:51903:尪
CJK UNIFIED IDEOGRAPH:'CAC0:51904:岍
CJK UNIFIED IDEOGRAPH:'CAC1:51905:岏
CJK UNIFIED IDEOGRAPH:'CAC2:51906:岈
CJK UNIFIED IDEOGRAPH:'CAC3:51907:岋
CJK UNIFIED IDEOGRAPH:'CAC4:51908:岉
CJK UNIFIED IDEOGRAPH:'CAC5:51909:岒
CJK UNIFIED IDEOGRAPH:'CAC6:51910:岊
CJK UNIFIED IDEOGRAPH:'CAC7:51911:岆
CJK UNIFIED IDEOGRAPH:'CAC8:51912:岓
CJK UNIFIED IDEOGRAPH:'CAC9:51913:岕
CJK UNIFIED IDEOGRAPH:'CACA:51914:巠
CJK UNIFIED IDEOGRAPH:'CACB:51915:帊
CJK UNIFIED IDEOGRAPH:'CACC:51916:帎
CJK UNIFIED IDEOGRAPH:'CACD:51917:庋
CJK UNIFIED IDEOGRAPH:'CACE:51918:庉
CJK UNIFIED IDEOGRAPH:'CACF:51919:庌
CJK UNIFIED IDEOGRAPH:'CAD0:51920:庈
CJK UNIFIED IDEOGRAPH:'CAD1:51921:庍
CJK UNIFIED IDEOGRAPH:'CAD2:51922:弅
CJK UNIFIED IDEOGRAPH:'CAD3:51923:弝
CJK UNIFIED IDEOGRAPH:'CAD4:51924:彸
CJK UNIFIED IDEOGRAPH:'CAD5:51925:彶
CJK UNIFIED IDEOGRAPH:'CAD6:51926:忒
CJK UNIFIED IDEOGRAPH:'CAD7:51927:忑
CJK UNIFIED IDEOGRAPH:'CAD8:51928:忐
CJK UNIFIED IDEOGRAPH:'CAD9:51929:忭
CJK UNIFIED IDEOGRAPH:'CADA:51930:忨
CJK UNIFIED IDEOGRAPH:'CADB:51931:忮
CJK UNIFIED IDEOGRAPH:'CADC:51932:忳
CJK UNIFIED IDEOGRAPH:'CADD:51933:忡
CJK UNIFIED IDEOGRAPH:'CADE:51934:忤
CJK UNIFIED IDEOGRAPH:'CADF:51935:忣
CJK UNIFIED IDEOGRAPH:'CAE0:51936:忺
CJK UNIFIED IDEOGRAPH:'CAE1:51937:忯
CJK UNIFIED IDEOGRAPH:'CAE2:51938:忷
CJK UNIFIED IDEOGRAPH:'CAE3:51939:忻
CJK UNIFIED IDEOGRAPH:'CAE4:51940:怀
CJK UNIFIED IDEOGRAPH:'CAE5:51941:忴
CJK UNIFIED IDEOGRAPH:'CAE6:51942:戺
CJK UNIFIED IDEOGRAPH:'CAE7:51943:抃
CJK UNIFIED IDEOGRAPH:'CAE8:51944:抌
CJK UNIFIED IDEOGRAPH:'CAE9:51945:抎
CJK UNIFIED IDEOGRAPH:'CAEA:51946:抏
CJK UNIFIED IDEOGRAPH:'CAEB:51947:抔
CJK UNIFIED IDEOGRAPH:'CAEC:51948:抇
CJK UNIFIED IDEOGRAPH:'CAED:51949:扱
CJK UNIFIED IDEOGRAPH:'CAEE:51950:扻
CJK UNIFIED IDEOGRAPH:'CAEF:51951:扺
CJK UNIFIED IDEOGRAPH:'CAF0:51952:扰
CJK UNIFIED IDEOGRAPH:'CAF1:51953:抁
CJK UNIFIED IDEOGRAPH:'CAF2:51954:抈
CJK UNIFIED IDEOGRAPH:'CAF3:51955:扷
CJK UNIFIED IDEOGRAPH:'CAF4:51956:扽
CJK UNIFIED IDEOGRAPH:'CAF5:51957:扲
CJK UNIFIED IDEOGRAPH:'CAF6:51958:扴
CJK UNIFIED IDEOGRAPH:'CAF7:51959:攷
CJK UNIFIED IDEOGRAPH:'CAF8:51960:旰
CJK UNIFIED IDEOGRAPH:'CAF9:51961:旴
CJK UNIFIED IDEOGRAPH:'CAFA:51962:旳
CJK UNIFIED IDEOGRAPH:'CAFB:51963:旲
CJK UNIFIED IDEOGRAPH:'CAFC:51964:旵
CJK UNIFIED IDEOGRAPH:'CAFD:51965:杅
CJK UNIFIED IDEOGRAPH:'CAFE:51966:杇
CJK UNIFIED IDEOGRAPH:'CB40:52032:杙
CJK UNIFIED IDEOGRAPH:'CB41:52033:杕
CJK UNIFIED IDEOGRAPH:'CB42:52034:杌
CJK UNIFIED IDEOGRAPH:'CB43:52035:杈
CJK UNIFIED IDEOGRAPH:'CB44:52036:杝
CJK UNIFIED IDEOGRAPH:'CB45:52037:杍
CJK UNIFIED IDEOGRAPH:'CB46:52038:杚
CJK UNIFIED IDEOGRAPH:'CB47:52039:杋
CJK UNIFIED IDEOGRAPH:'CB48:52040:毐
CJK UNIFIED IDEOGRAPH:'CB49:52041:氙
CJK UNIFIED IDEOGRAPH:'CB4A:52042:氚
CJK UNIFIED IDEOGRAPH:'CB4B:52043:汸
CJK UNIFIED IDEOGRAPH:'CB4C:52044:汧
CJK UNIFIED IDEOGRAPH:'CB4D:52045:汫
CJK UNIFIED IDEOGRAPH:'CB4E:52046:沄
CJK UNIFIED IDEOGRAPH:'CB4F:52047:沋
CJK UNIFIED IDEOGRAPH:'CB50:52048:沏
CJK UNIFIED IDEOGRAPH:'CB51:52049:汱
CJK UNIFIED IDEOGRAPH:'CB52:52050:汯
CJK UNIFIED IDEOGRAPH:'CB53:52051:汩
CJK UNIFIED IDEOGRAPH:'CB54:52052:沚
CJK UNIFIED IDEOGRAPH:'CB55:52053:汭
CJK UNIFIED IDEOGRAPH:'CB56:52054:沇
CJK UNIFIED IDEOGRAPH:'CB57:52055:沕
CJK UNIFIED IDEOGRAPH:'CB58:52056:沜
CJK UNIFIED IDEOGRAPH:'CB59:52057:汦
CJK UNIFIED IDEOGRAPH:'CB5A:52058:汳
CJK UNIFIED IDEOGRAPH:'CB5B:52059:汥
CJK UNIFIED IDEOGRAPH:'CB5C:52060:汻
CJK UNIFIED IDEOGRAPH:'CB5D:52061:沎
CJK UNIFIED IDEOGRAPH:'CB5E:52062:灴
CJK UNIFIED IDEOGRAPH:'CB5F:52063:灺
CJK UNIFIED IDEOGRAPH:'CB60:52064:牣
CJK UNIFIED IDEOGRAPH:'CB61:52065:犿
CJK UNIFIED IDEOGRAPH:'CB62:52066:犽
CJK UNIFIED IDEOGRAPH:'CB63:52067:狃
CJK UNIFIED IDEOGRAPH:'CB64:52068:狆
CJK UNIFIED IDEOGRAPH:'CB65:52069:狁
CJK UNIFIED IDEOGRAPH:'CB66:52070:犺
CJK UNIFIED IDEOGRAPH:'CB67:52071:狅
CJK UNIFIED IDEOGRAPH:'CB68:52072:玕
CJK UNIFIED IDEOGRAPH:'CB69:52073:玗
CJK UNIFIED IDEOGRAPH:'CB6A:52074:玓
CJK UNIFIED IDEOGRAPH:'CB6B:52075:玔
CJK UNIFIED IDEOGRAPH:'CB6C:52076:玒
CJK UNIFIED IDEOGRAPH:'CB6D:52077:町
CJK UNIFIED IDEOGRAPH:'CB6E:52078:甹
CJK UNIFIED IDEOGRAPH:'CB6F:52079:疔
CJK UNIFIED IDEOGRAPH:'CB70:52080:疕
CJK UNIFIED IDEOGRAPH:'CB71:52081:皁
CJK UNIFIED IDEOGRAPH:'CB72:52082:礽
CJK UNIFIED IDEOGRAPH:'CB73:52083:耴
CJK UNIFIED IDEOGRAPH:'CB74:52084:肕
CJK UNIFIED IDEOGRAPH:'CB75:52085:肙
CJK UNIFIED IDEOGRAPH:'CB76:52086:肐
CJK UNIFIED IDEOGRAPH:'CB77:52087:肒
CJK UNIFIED IDEOGRAPH:'CB78:52088:肜
CJK UNIFIED IDEOGRAPH:'CB79:52089:芐
CJK UNIFIED IDEOGRAPH:'CB7A:52090:芏
CJK UNIFIED IDEOGRAPH:'CB7B:52091:芅
CJK UNIFIED IDEOGRAPH:'CB7C:52092:芎
CJK UNIFIED IDEOGRAPH:'CB7D:52093:芑
CJK UNIFIED IDEOGRAPH:'CB7E:52094:芓
CJK UNIFIED IDEOGRAPH:'CBA1:52129:芊
CJK UNIFIED IDEOGRAPH:'CBA2:52130:芃
CJK UNIFIED IDEOGRAPH:'CBA3:52131:芄
CJK UNIFIED IDEOGRAPH:'CBA4:52132:豸
CJK UNIFIED IDEOGRAPH:'CBA5:52133:迉
CJK UNIFIED IDEOGRAPH:'CBA6:52134:辿
CJK UNIFIED IDEOGRAPH:'CBA7:52135:邟
CJK UNIFIED IDEOGRAPH:'CBA8:52136:邡
CJK UNIFIED IDEOGRAPH:'CBA9:52137:邥
CJK UNIFIED IDEOGRAPH:'CBAA:52138:邞
CJK UNIFIED IDEOGRAPH:'CBAB:52139:邧
CJK UNIFIED IDEOGRAPH:'CBAC:52140:邠
CJK UNIFIED IDEOGRAPH:'CBAD:52141:阰
CJK UNIFIED IDEOGRAPH:'CBAE:52142:阨
CJK UNIFIED IDEOGRAPH:'CBAF:52143:阯
CJK UNIFIED IDEOGRAPH:'CBB0:52144:阭
CJK UNIFIED IDEOGRAPH:'CBB1:52145:丳
CJK UNIFIED IDEOGRAPH:'CBB2:52146:侘
CJK UNIFIED IDEOGRAPH:'CBB3:52147:佼
CJK UNIFIED IDEOGRAPH:'CBB4:52148:侅
CJK UNIFIED IDEOGRAPH:'CBB5:52149:佽
CJK UNIFIED IDEOGRAPH:'CBB6:52150:侀
CJK UNIFIED IDEOGRAPH:'CBB7:52151:侇
CJK UNIFIED IDEOGRAPH:'CBB8:52152:佶
CJK UNIFIED IDEOGRAPH:'CBB9:52153:佴
CJK UNIFIED IDEOGRAPH:'CBBA:52154:侉
CJK UNIFIED IDEOGRAPH:'CBBB:52155:侄
CJK UNIFIED IDEOGRAPH:'CBBC:52156:佷
CJK UNIFIED IDEOGRAPH:'CBBD:52157:佌
CJK UNIFIED IDEOGRAPH:'CBBE:52158:侗
CJK UNIFIED IDEOGRAPH:'CBBF:52159:佪
CJK UNIFIED IDEOGRAPH:'CBC0:52160:侚
CJK UNIFIED IDEOGRAPH:'CBC1:52161:佹
CJK UNIFIED IDEOGRAPH:'CBC2:52162:侁
CJK UNIFIED IDEOGRAPH:'CBC3:52163:佸
CJK UNIFIED IDEOGRAPH:'CBC4:52164:侐
CJK UNIFIED IDEOGRAPH:'CBC5:52165:侜
CJK UNIFIED IDEOGRAPH:'CBC6:52166:侔
CJK UNIFIED IDEOGRAPH:'CBC7:52167:侞
CJK UNIFIED IDEOGRAPH:'CBC8:52168:侒
CJK UNIFIED IDEOGRAPH:'CBC9:52169:侂
CJK UNIFIED IDEOGRAPH:'CBCA:52170:侕
CJK UNIFIED IDEOGRAPH:'CBCB:52171:佫
CJK UNIFIED IDEOGRAPH:'CBCC:52172:佮
CJK UNIFIED IDEOGRAPH:'CBCD:52173:冞
CJK UNIFIED IDEOGRAPH:'CBCE:52174:冼
CJK UNIFIED IDEOGRAPH:'CBCF:52175:冾
CJK UNIFIED IDEOGRAPH:'CBD0:52176:刵
CJK UNIFIED IDEOGRAPH:'CBD1:52177:刲
CJK UNIFIED IDEOGRAPH:'CBD2:52178:刳
CJK UNIFIED IDEOGRAPH:'CBD3:52179:剆
CJK UNIFIED IDEOGRAPH:'CBD4:52180:刱
CJK UNIFIED IDEOGRAPH:'CBD5:52181:劼
CJK UNIFIED IDEOGRAPH:'CBD6:52182:匊
CJK UNIFIED IDEOGRAPH:'CBD7:52183:匋
CJK UNIFIED IDEOGRAPH:'CBD8:52184:匼
CJK UNIFIED IDEOGRAPH:'CBD9:52185:厒
CJK UNIFIED IDEOGRAPH:'CBDA:52186:厔
CJK UNIFIED IDEOGRAPH:'CBDB:52187:咇
CJK UNIFIED IDEOGRAPH:'CBDC:52188:呿
CJK UNIFIED IDEOGRAPH:'CBDD:52189:咁
CJK UNIFIED IDEOGRAPH:'CBDE:52190:咑
CJK UNIFIED IDEOGRAPH:'CBDF:52191:咂
CJK UNIFIED IDEOGRAPH:'CBE0:52192:咈
CJK UNIFIED IDEOGRAPH:'CBE1:52193:呫
CJK UNIFIED IDEOGRAPH:'CBE2:52194:呺
CJK UNIFIED IDEOGRAPH:'CBE3:52195:呾
CJK UNIFIED IDEOGRAPH:'CBE4:52196:呥
CJK UNIFIED IDEOGRAPH:'CBE5:52197:呬
CJK UNIFIED IDEOGRAPH:'CBE6:52198:呴
CJK UNIFIED IDEOGRAPH:'CBE7:52199:呦
CJK UNIFIED IDEOGRAPH:'CBE8:52200:咍
CJK UNIFIED IDEOGRAPH:'CBE9:52201:呯
CJK UNIFIED IDEOGRAPH:'CBEA:52202:呡
CJK UNIFIED IDEOGRAPH:'CBEB:52203:呠
CJK UNIFIED IDEOGRAPH:'CBEC:52204:咘
CJK UNIFIED IDEOGRAPH:'CBED:52205:呣
CJK UNIFIED IDEOGRAPH:'CBEE:52206:呧
CJK UNIFIED IDEOGRAPH:'CBEF:52207:呤
CJK UNIFIED IDEOGRAPH:'CBF0:52208:囷
CJK UNIFIED IDEOGRAPH:'CBF1:52209:囹
CJK UNIFIED IDEOGRAPH:'CBF2:52210:坯
CJK UNIFIED IDEOGRAPH:'CBF3:52211:坲
CJK UNIFIED IDEOGRAPH:'CBF4:52212:坭
CJK UNIFIED IDEOGRAPH:'CBF5:52213:坫
CJK UNIFIED IDEOGRAPH:'CBF6:52214:坱
CJK UNIFIED IDEOGRAPH:'CBF7:52215:坰
CJK UNIFIED IDEOGRAPH:'CBF8:52216:坶
CJK UNIFIED IDEOGRAPH:'CBF9:52217:垀
CJK UNIFIED IDEOGRAPH:'CBFA:52218:坵
CJK UNIFIED IDEOGRAPH:'CBFB:52219:坻
CJK UNIFIED IDEOGRAPH:'CBFC:52220:坳
CJK UNIFIED IDEOGRAPH:'CBFD:52221:坴
CJK UNIFIED IDEOGRAPH:'CBFE:52222:坢
CJK UNIFIED IDEOGRAPH:'CC40:52288:坨
CJK UNIFIED IDEOGRAPH:'CC41:52289:坽
CJK UNIFIED IDEOGRAPH:'CC42:52290:夌
CJK UNIFIED IDEOGRAPH:'CC43:52291:奅
CJK UNIFIED IDEOGRAPH:'CC44:52292:妵
CJK UNIFIED IDEOGRAPH:'CC45:52293:妺
CJK UNIFIED IDEOGRAPH:'CC46:52294:姏
CJK UNIFIED IDEOGRAPH:'CC47:52295:姎
CJK UNIFIED IDEOGRAPH:'CC48:52296:妲
CJK UNIFIED IDEOGRAPH:'CC49:52297:姌
CJK UNIFIED IDEOGRAPH:'CC4A:52298:姁
CJK UNIFIED IDEOGRAPH:'CC4B:52299:妶
CJK UNIFIED IDEOGRAPH:'CC4C:52300:妼
CJK UNIFIED IDEOGRAPH:'CC4D:52301:姃
CJK UNIFIED IDEOGRAPH:'CC4E:52302:姖
CJK UNIFIED IDEOGRAPH:'CC4F:52303:妱
CJK UNIFIED IDEOGRAPH:'CC50:52304:妽
CJK UNIFIED IDEOGRAPH:'CC51:52305:姀
CJK UNIFIED IDEOGRAPH:'CC52:52306:姈
CJK UNIFIED IDEOGRAPH:'CC53:52307:妴
CJK UNIFIED IDEOGRAPH:'CC54:52308:姇
CJK UNIFIED IDEOGRAPH:'CC55:52309:孢
CJK UNIFIED IDEOGRAPH:'CC56:52310:孥
CJK UNIFIED IDEOGRAPH:'CC57:52311:宓
CJK UNIFIED IDEOGRAPH:'CC58:52312:宕
CJK UNIFIED IDEOGRAPH:'CC59:52313:屄
CJK UNIFIED IDEOGRAPH:'CC5A:52314:屇
CJK UNIFIED IDEOGRAPH:'CC5B:52315:岮
CJK UNIFIED IDEOGRAPH:'CC5C:52316:岤
CJK UNIFIED IDEOGRAPH:'CC5D:52317:岠
CJK UNIFIED IDEOGRAPH:'CC5E:52318:岵
CJK UNIFIED IDEOGRAPH:'CC5F:52319:岯
CJK UNIFIED IDEOGRAPH:'CC60:52320:岨
CJK UNIFIED IDEOGRAPH:'CC61:52321:岬
CJK UNIFIED IDEOGRAPH:'CC62:52322:岟
CJK UNIFIED IDEOGRAPH:'CC63:52323:岣
CJK UNIFIED IDEOGRAPH:'CC64:52324:岭
CJK UNIFIED IDEOGRAPH:'CC65:52325:岢
CJK UNIFIED IDEOGRAPH:'CC66:52326:岪
CJK UNIFIED IDEOGRAPH:'CC67:52327:岧
CJK UNIFIED IDEOGRAPH:'CC68:52328:岝
CJK UNIFIED IDEOGRAPH:'CC69:52329:岥
CJK UNIFIED IDEOGRAPH:'CC6A:52330:岶
CJK UNIFIED IDEOGRAPH:'CC6B:52331:岰
CJK UNIFIED IDEOGRAPH:'CC6C:52332:岦
CJK UNIFIED IDEOGRAPH:'CC6D:52333:帗
CJK UNIFIED IDEOGRAPH:'CC6E:52334:帔
CJK UNIFIED IDEOGRAPH:'CC6F:52335:帙
CJK UNIFIED IDEOGRAPH:'CC70:52336:弨
CJK UNIFIED IDEOGRAPH:'CC71:52337:弢
CJK UNIFIED IDEOGRAPH:'CC72:52338:弣
CJK UNIFIED IDEOGRAPH:'CC73:52339:弤
CJK UNIFIED IDEOGRAPH:'CC74:52340:彔
CJK UNIFIED IDEOGRAPH:'CC75:52341:徂
CJK UNIFIED IDEOGRAPH:'CC76:52342:彾
CJK UNIFIED IDEOGRAPH:'CC77:52343:彽
CJK UNIFIED IDEOGRAPH:'CC78:52344:忞
CJK UNIFIED IDEOGRAPH:'CC79:52345:忥
CJK UNIFIED IDEOGRAPH:'CC7A:52346:怭
CJK UNIFIED IDEOGRAPH:'CC7B:52347:怦
CJK UNIFIED IDEOGRAPH:'CC7C:52348:怙
CJK UNIFIED IDEOGRAPH:'CC7D:52349:怲
CJK UNIFIED IDEOGRAPH:'CC7E:52350:怋
CJK UNIFIED IDEOGRAPH:'CCA1:52385:怴
CJK UNIFIED IDEOGRAPH:'CCA2:52386:怊
CJK UNIFIED IDEOGRAPH:'CCA3:52387:怗
CJK UNIFIED IDEOGRAPH:'CCA4:52388:怳
CJK UNIFIED IDEOGRAPH:'CCA5:52389:怚
CJK UNIFIED IDEOGRAPH:'CCA6:52390:怞
CJK UNIFIED IDEOGRAPH:'CCA7:52391:怬
CJK UNIFIED IDEOGRAPH:'CCA8:52392:怢
CJK UNIFIED IDEOGRAPH:'CCA9:52393:怍
CJK UNIFIED IDEOGRAPH:'CCAA:52394:怐
CJK UNIFIED IDEOGRAPH:'CCAB:52395:怮
CJK UNIFIED IDEOGRAPH:'CCAC:52396:怓
CJK UNIFIED IDEOGRAPH:'CCAD:52397:怑
CJK UNIFIED IDEOGRAPH:'CCAE:52398:怌
CJK UNIFIED IDEOGRAPH:'CCAF:52399:怉
CJK UNIFIED IDEOGRAPH:'CCB0:52400:怜
CJK UNIFIED IDEOGRAPH:'CCB1:52401:戔
CJK UNIFIED IDEOGRAPH:'CCB2:52402:戽
CJK UNIFIED IDEOGRAPH:'CCB3:52403:抭
CJK UNIFIED IDEOGRAPH:'CCB4:52404:抴
CJK UNIFIED IDEOGRAPH:'CCB5:52405:拑
CJK UNIFIED IDEOGRAPH:'CCB6:52406:抾
CJK UNIFIED IDEOGRAPH:'CCB7:52407:抪
CJK UNIFIED IDEOGRAPH:'CCB8:52408:抶
CJK UNIFIED IDEOGRAPH:'CCB9:52409:拊
CJK UNIFIED IDEOGRAPH:'CCBA:52410:抮
CJK UNIFIED IDEOGRAPH:'CCBB:52411:抳
CJK UNIFIED IDEOGRAPH:'CCBC:52412:抯
CJK UNIFIED IDEOGRAPH:'CCBD:52413:抻
CJK UNIFIED IDEOGRAPH:'CCBE:52414:抩
CJK UNIFIED IDEOGRAPH:'CCBF:52415:抰
CJK UNIFIED IDEOGRAPH:'CCC0:52416:抸
CJK UNIFIED IDEOGRAPH:'CCC1:52417:攽
CJK UNIFIED IDEOGRAPH:'CCC2:52418:斨
CJK UNIFIED IDEOGRAPH:'CCC3:52419:斻
CJK UNIFIED IDEOGRAPH:'CCC4:52420:昉
CJK UNIFIED IDEOGRAPH:'CCC5:52421:旼
CJK UNIFIED IDEOGRAPH:'CCC6:52422:昄
CJK UNIFIED IDEOGRAPH:'CCC7:52423:昒
CJK UNIFIED IDEOGRAPH:'CCC8:52424:昈
CJK UNIFIED IDEOGRAPH:'CCC9:52425:旻
CJK UNIFIED IDEOGRAPH:'CCCA:52426:昃
CJK UNIFIED IDEOGRAPH:'CCCB:52427:昋
CJK UNIFIED IDEOGRAPH:'CCCC:52428:昍
CJK UNIFIED IDEOGRAPH:'CCCD:52429:昅
CJK UNIFIED IDEOGRAPH:'CCCE:52430:旽
CJK UNIFIED IDEOGRAPH:'CCCF:52431:昑
CJK UNIFIED IDEOGRAPH:'CCD0:52432:昐
CJK UNIFIED IDEOGRAPH:'CCD1:52433:曶
CJK UNIFIED IDEOGRAPH:'CCD2:52434:朊
CJK UNIFIED IDEOGRAPH:'CCD3:52435:枅
CJK UNIFIED IDEOGRAPH:'CCD4:52436:杬
CJK UNIFIED IDEOGRAPH:'CCD5:52437:枎
CJK UNIFIED IDEOGRAPH:'CCD6:52438:枒
CJK UNIFIED IDEOGRAPH:'CCD7:52439:杶
CJK UNIFIED IDEOGRAPH:'CCD8:52440:杻
CJK UNIFIED IDEOGRAPH:'CCD9:52441:枘
CJK UNIFIED IDEOGRAPH:'CCDA:52442:枆
CJK UNIFIED IDEOGRAPH:'CCDB:52443:构
CJK UNIFIED IDEOGRAPH:'CCDC:52444:杴
CJK UNIFIED IDEOGRAPH:'CCDD:52445:枍
CJK UNIFIED IDEOGRAPH:'CCDE:52446:枌
CJK UNIFIED IDEOGRAPH:'CCDF:52447:杺
CJK UNIFIED IDEOGRAPH:'CCE0:52448:枟
CJK UNIFIED IDEOGRAPH:'CCE1:52449:枑
CJK UNIFIED IDEOGRAPH:'CCE2:52450:枙
CJK UNIFIED IDEOGRAPH:'CCE3:52451:枃
CJK UNIFIED IDEOGRAPH:'CCE4:52452:杽
CJK UNIFIED IDEOGRAPH:'CCE5:52453:极
CJK UNIFIED IDEOGRAPH:'CCE6:52454:杸
CJK UNIFIED IDEOGRAPH:'CCE7:52455:杹
CJK UNIFIED IDEOGRAPH:'CCE8:52456:枔
CJK UNIFIED IDEOGRAPH:'CCE9:52457:欥
CJK UNIFIED IDEOGRAPH:'CCEA:52458:殀
CJK UNIFIED IDEOGRAPH:'CCEB:52459:歾
CJK UNIFIED IDEOGRAPH:'CCEC:52460:毞
CJK UNIFIED IDEOGRAPH:'CCED:52461:氝
CJK UNIFIED IDEOGRAPH:'CCEE:52462:沓
CJK UNIFIED IDEOGRAPH:'CCEF:52463:泬
CJK UNIFIED IDEOGRAPH:'CCF0:52464:泫
CJK UNIFIED IDEOGRAPH:'CCF1:52465:泮
CJK UNIFIED IDEOGRAPH:'CCF2:52466:泙
CJK UNIFIED IDEOGRAPH:'CCF3:52467:沶
CJK UNIFIED IDEOGRAPH:'CCF4:52468:泔
CJK UNIFIED IDEOGRAPH:'CCF5:52469:沭
CJK UNIFIED IDEOGRAPH:'CCF6:52470:泧
CJK UNIFIED IDEOGRAPH:'CCF7:52471:沷
CJK UNIFIED IDEOGRAPH:'CCF8:52472:泐
CJK UNIFIED IDEOGRAPH:'CCF9:52473:泂
CJK UNIFIED IDEOGRAPH:'CCFA:52474:沺
CJK UNIFIED IDEOGRAPH:'CCFB:52475:泃
CJK UNIFIED IDEOGRAPH:'CCFC:52476:泆
CJK UNIFIED IDEOGRAPH:'CCFD:52477:泭
CJK UNIFIED IDEOGRAPH:'CCFE:52478:泲
CJK UNIFIED IDEOGRAPH:'CD40:52544:泒
CJK UNIFIED IDEOGRAPH:'CD41:52545:泝
CJK UNIFIED IDEOGRAPH:'CD42:52546:沴
CJK UNIFIED IDEOGRAPH:'CD43:52547:沊
CJK UNIFIED IDEOGRAPH:'CD44:52548:沝
CJK UNIFIED IDEOGRAPH:'CD45:52549:沀
CJK UNIFIED IDEOGRAPH:'CD46:52550:泞
CJK UNIFIED IDEOGRAPH:'CD47:52551:泀
CJK UNIFIED IDEOGRAPH:'CD48:52552:洰
CJK UNIFIED IDEOGRAPH:'CD49:52553:泍
CJK UNIFIED IDEOGRAPH:'CD4A:52554:泇
CJK UNIFIED IDEOGRAPH:'CD4B:52555:沰
CJK UNIFIED IDEOGRAPH:'CD4C:52556:泹
CJK UNIFIED IDEOGRAPH:'CD4D:52557:泏
CJK UNIFIED IDEOGRAPH:'CD4E:52558:泩
CJK UNIFIED IDEOGRAPH:'CD4F:52559:泑
CJK UNIFIED IDEOGRAPH:'CD50:52560:炔
CJK UNIFIED IDEOGRAPH:'CD51:52561:炘
CJK UNIFIED IDEOGRAPH:'CD52:52562:炅
CJK UNIFIED IDEOGRAPH:'CD53:52563:炓
CJK UNIFIED IDEOGRAPH:'CD54:52564:炆
CJK UNIFIED IDEOGRAPH:'CD55:52565:炄
CJK UNIFIED IDEOGRAPH:'CD56:52566:炑
CJK UNIFIED IDEOGRAPH:'CD57:52567:炖
CJK UNIFIED IDEOGRAPH:'CD58:52568:炂
CJK UNIFIED IDEOGRAPH:'CD59:52569:炚
CJK UNIFIED IDEOGRAPH:'CD5A:52570:炃
CJK UNIFIED IDEOGRAPH:'CD5B:52571:牪
CJK UNIFIED IDEOGRAPH:'CD5C:52572:狖
CJK UNIFIED IDEOGRAPH:'CD5D:52573:狋
CJK UNIFIED IDEOGRAPH:'CD5E:52574:狘
CJK UNIFIED IDEOGRAPH:'CD5F:52575:狉
CJK UNIFIED IDEOGRAPH:'CD60:52576:狜
CJK UNIFIED IDEOGRAPH:'CD61:52577:狒
CJK UNIFIED IDEOGRAPH:'CD62:52578:狔
CJK UNIFIED IDEOGRAPH:'CD63:52579:狚
CJK UNIFIED IDEOGRAPH:'CD64:52580:狌
CJK UNIFIED IDEOGRAPH:'CD65:52581:狑
CJK UNIFIED IDEOGRAPH:'CD66:52582:玤
CJK UNIFIED IDEOGRAPH:'CD67:52583:玡
CJK UNIFIED IDEOGRAPH:'CD68:52584:玭
CJK UNIFIED IDEOGRAPH:'CD69:52585:玦
CJK UNIFIED IDEOGRAPH:'CD6A:52586:玢
CJK UNIFIED IDEOGRAPH:'CD6B:52587:玠
CJK UNIFIED IDEOGRAPH:'CD6C:52588:玬
CJK UNIFIED IDEOGRAPH:'CD6D:52589:玝
CJK UNIFIED IDEOGRAPH:'CD6E:52590:瓝
CJK UNIFIED IDEOGRAPH:'CD6F:52591:瓨
CJK UNIFIED IDEOGRAPH:'CD70:52592:甿
CJK UNIFIED IDEOGRAPH:'CD71:52593:畀
CJK UNIFIED IDEOGRAPH:'CD72:52594:甾
CJK UNIFIED IDEOGRAPH:'CD73:52595:疌
CJK UNIFIED IDEOGRAPH:'CD74:52596:疘
CJK UNIFIED IDEOGRAPH:'CD75:52597:皯
CJK UNIFIED IDEOGRAPH:'CD76:52598:盳
CJK UNIFIED IDEOGRAPH:'CD77:52599:盱
CJK UNIFIED IDEOGRAPH:'CD78:52600:盰
CJK UNIFIED IDEOGRAPH:'CD79:52601:盵
CJK UNIFIED IDEOGRAPH:'CD7A:52602:矸
CJK UNIFIED IDEOGRAPH:'CD7B:52603:矼
CJK UNIFIED IDEOGRAPH:'CD7C:52604:矹
CJK UNIFIED IDEOGRAPH:'CD7D:52605:矻
CJK UNIFIED IDEOGRAPH:'CD7E:52606:矺
CJK UNIFIED IDEOGRAPH:'CDA1:52641:矷
CJK UNIFIED IDEOGRAPH:'CDA2:52642:祂
CJK UNIFIED IDEOGRAPH:'CDA3:52643:礿
CJK UNIFIED IDEOGRAPH:'CDA4:52644:秅
CJK UNIFIED IDEOGRAPH:'CDA5:52645:穸
CJK UNIFIED IDEOGRAPH:'CDA6:52646:穻
CJK UNIFIED IDEOGRAPH:'CDA7:52647:竻
CJK UNIFIED IDEOGRAPH:'CDA8:52648:籵
CJK UNIFIED IDEOGRAPH:'CDA9:52649:糽
CJK UNIFIED IDEOGRAPH:'CDAA:52650:耵
CJK UNIFIED IDEOGRAPH:'CDAB:52651:肏
CJK UNIFIED IDEOGRAPH:'CDAC:52652:肮
CJK UNIFIED IDEOGRAPH:'CDAD:52653:肣
CJK UNIFIED IDEOGRAPH:'CDAE:52654:肸
CJK UNIFIED IDEOGRAPH:'CDAF:52655:肵
CJK UNIFIED IDEOGRAPH:'CDB0:52656:肭
CJK UNIFIED IDEOGRAPH:'CDB1:52657:舠
CJK UNIFIED IDEOGRAPH:'CDB2:52658:芠
CJK UNIFIED IDEOGRAPH:'CDB3:52659:苀
CJK UNIFIED IDEOGRAPH:'CDB4:52660:芫
CJK UNIFIED IDEOGRAPH:'CDB5:52661:芚
CJK UNIFIED IDEOGRAPH:'CDB6:52662:芘
CJK UNIFIED IDEOGRAPH:'CDB7:52663:芛
CJK UNIFIED IDEOGRAPH:'CDB8:52664:芵
CJK UNIFIED IDEOGRAPH:'CDB9:52665:芧
CJK UNIFIED IDEOGRAPH:'CDBA:52666:芮
CJK UNIFIED IDEOGRAPH:'CDBB:52667:芼
CJK UNIFIED IDEOGRAPH:'CDBC:52668:芞
CJK UNIFIED IDEOGRAPH:'CDBD:52669:芺
CJK UNIFIED IDEOGRAPH:'CDBE:52670:芴
CJK UNIFIED IDEOGRAPH:'CDBF:52671:芨
CJK UNIFIED IDEOGRAPH:'CDC0:52672:芡
CJK UNIFIED IDEOGRAPH:'CDC1:52673:芩
CJK UNIFIED IDEOGRAPH:'CDC2:52674:苂
CJK UNIFIED IDEOGRAPH:'CDC3:52675:芤
CJK UNIFIED IDEOGRAPH:'CDC4:52676:苃
CJK UNIFIED IDEOGRAPH:'CDC5:52677:芶
CJK UNIFIED IDEOGRAPH:'CDC6:52678:芢
CJK UNIFIED IDEOGRAPH:'CDC7:52679:虰
CJK UNIFIED IDEOGRAPH:'CDC8:52680:虯
CJK UNIFIED IDEOGRAPH:'CDC9:52681:虭
CJK UNIFIED IDEOGRAPH:'CDCA:52682:虮
CJK UNIFIED IDEOGRAPH:'CDCB:52683:豖
CJK UNIFIED IDEOGRAPH:'CDCC:52684:迒
CJK UNIFIED IDEOGRAPH:'CDCD:52685:迋
CJK UNIFIED IDEOGRAPH:'CDCE:52686:迓
CJK UNIFIED IDEOGRAPH:'CDCF:52687:迍
CJK UNIFIED IDEOGRAPH:'CDD0:52688:迖
CJK UNIFIED IDEOGRAPH:'CDD1:52689:迕
CJK UNIFIED IDEOGRAPH:'CDD2:52690:迗
CJK UNIFIED IDEOGRAPH:'CDD3:52691:邲
CJK UNIFIED IDEOGRAPH:'CDD4:52692:邴
CJK UNIFIED IDEOGRAPH:'CDD5:52693:邯
CJK UNIFIED IDEOGRAPH:'CDD6:52694:邳
CJK UNIFIED IDEOGRAPH:'CDD7:52695:邰
CJK UNIFIED IDEOGRAPH:'CDD8:52696:阹
CJK UNIFIED IDEOGRAPH:'CDD9:52697:阽
CJK UNIFIED IDEOGRAPH:'CDDA:52698:阼
CJK UNIFIED IDEOGRAPH:'CDDB:52699:阺
CJK UNIFIED IDEOGRAPH:'CDDC:52700:陃
CJK UNIFIED IDEOGRAPH:'CDDD:52701:俍
CJK UNIFIED IDEOGRAPH:'CDDE:52702:俅
CJK UNIFIED IDEOGRAPH:'CDDF:52703:俓
CJK UNIFIED IDEOGRAPH:'CDE0:52704:侲
CJK UNIFIED IDEOGRAPH:'CDE1:52705:俉
CJK UNIFIED IDEOGRAPH:'CDE2:52706:俋
CJK UNIFIED IDEOGRAPH:'CDE3:52707:俁
CJK UNIFIED IDEOGRAPH:'CDE4:52708:俔
CJK UNIFIED IDEOGRAPH:'CDE5:52709:俜
CJK UNIFIED IDEOGRAPH:'CDE6:52710:俙
CJK UNIFIED IDEOGRAPH:'CDE7:52711:侻
CJK UNIFIED IDEOGRAPH:'CDE8:52712:侳
CJK UNIFIED IDEOGRAPH:'CDE9:52713:俛
CJK UNIFIED IDEOGRAPH:'CDEA:52714:俇
CJK UNIFIED IDEOGRAPH:'CDEB:52715:俖
CJK UNIFIED IDEOGRAPH:'CDEC:52716:侺
CJK UNIFIED IDEOGRAPH:'CDED:52717:俀
CJK UNIFIED IDEOGRAPH:'CDEE:52718:侹
CJK UNIFIED IDEOGRAPH:'CDEF:52719:俬
CJK UNIFIED IDEOGRAPH:'CDF0:52720:剄
CJK UNIFIED IDEOGRAPH:'CDF1:52721:剉
CJK UNIFIED IDEOGRAPH:'CDF2:52722:勀
CJK UNIFIED IDEOGRAPH:'CDF3:52723:勂
CJK UNIFIED IDEOGRAPH:'CDF4:52724:匽
CJK UNIFIED IDEOGRAPH:'CDF5:52725:卼
CJK UNIFIED IDEOGRAPH:'CDF6:52726:厗
CJK UNIFIED IDEOGRAPH:'CDF7:52727:厖
CJK UNIFIED IDEOGRAPH:'CDF8:52728:厙
CJK UNIFIED IDEOGRAPH:'CDF9:52729:厘
CJK UNIFIED IDEOGRAPH:'CDFA:52730:咺
CJK UNIFIED IDEOGRAPH:'CDFB:52731:咡
CJK UNIFIED IDEOGRAPH:'CDFC:52732:咭
CJK UNIFIED IDEOGRAPH:'CDFD:52733:咥
CJK UNIFIED IDEOGRAPH:'CDFE:52734:哏
CJK UNIFIED IDEOGRAPH:'CE40:52800:哃
CJK UNIFIED IDEOGRAPH:'CE41:52801:茍
CJK UNIFIED IDEOGRAPH:'CE42:52802:咷
CJK UNIFIED IDEOGRAPH:'CE43:52803:咮
CJK UNIFIED IDEOGRAPH:'CE44:52804:哖
CJK UNIFIED IDEOGRAPH:'CE45:52805:咶
CJK UNIFIED IDEOGRAPH:'CE46:52806:哅
CJK UNIFIED IDEOGRAPH:'CE47:52807:哆
CJK UNIFIED IDEOGRAPH:'CE48:52808:咠
CJK UNIFIED IDEOGRAPH:'CE49:52809:呰
CJK UNIFIED IDEOGRAPH:'CE4A:52810:咼
CJK UNIFIED IDEOGRAPH:'CE4B:52811:咢
CJK UNIFIED IDEOGRAPH:'CE4C:52812:咾
CJK UNIFIED IDEOGRAPH:'CE4D:52813:呲
CJK UNIFIED IDEOGRAPH:'CE4E:52814:哞
CJK UNIFIED IDEOGRAPH:'CE4F:52815:咰
CJK UNIFIED IDEOGRAPH:'CE50:52816:垵
CJK UNIFIED IDEOGRAPH:'CE51:52817:垞
CJK UNIFIED IDEOGRAPH:'CE52:52818:垟
CJK UNIFIED IDEOGRAPH:'CE53:52819:垤
CJK UNIFIED IDEOGRAPH:'CE54:52820:垌
CJK UNIFIED IDEOGRAPH:'CE55:52821:垗
CJK UNIFIED IDEOGRAPH:'CE56:52822:垝
CJK UNIFIED IDEOGRAPH:'CE57:52823:垛
CJK UNIFIED IDEOGRAPH:'CE58:52824:垔
CJK UNIFIED IDEOGRAPH:'CE59:52825:垘
CJK UNIFIED IDEOGRAPH:'CE5A:52826:垏
CJK UNIFIED IDEOGRAPH:'CE5B:52827:垙
CJK UNIFIED IDEOGRAPH:'CE5C:52828:垥
CJK UNIFIED IDEOGRAPH:'CE5D:52829:垚
CJK UNIFIED IDEOGRAPH:'CE5E:52830:垕
CJK UNIFIED IDEOGRAPH:'CE5F:52831:壴
CJK UNIFIED IDEOGRAPH:'CE60:52832:复
CJK UNIFIED IDEOGRAPH:'CE61:52833:奓
CJK UNIFIED IDEOGRAPH:'CE62:52834:姡
CJK UNIFIED IDEOGRAPH:'CE63:52835:姞
CJK UNIFIED IDEOGRAPH:'CE64:52836:姮
CJK UNIFIED IDEOGRAPH:'CE65:52837:娀
CJK UNIFIED IDEOGRAPH:'CE66:52838:姱
CJK UNIFIED IDEOGRAPH:'CE67:52839:姝
CJK UNIFIED IDEOGRAPH:'CE68:52840:姺
CJK UNIFIED IDEOGRAPH:'CE69:52841:姽
CJK UNIFIED IDEOGRAPH:'CE6A:52842:姼
CJK UNIFIED IDEOGRAPH:'CE6B:52843:姶
CJK UNIFIED IDEOGRAPH:'CE6C:52844:姤
CJK UNIFIED IDEOGRAPH:'CE6D:52845:姲
CJK UNIFIED IDEOGRAPH:'CE6E:52846:姷
CJK UNIFIED IDEOGRAPH:'CE6F:52847:姛
CJK UNIFIED IDEOGRAPH:'CE70:52848:姩
CJK UNIFIED IDEOGRAPH:'CE71:52849:姳
CJK UNIFIED IDEOGRAPH:'CE72:52850:姵
CJK UNIFIED IDEOGRAPH:'CE73:52851:姠
CJK UNIFIED IDEOGRAPH:'CE74:52852:姾
CJK UNIFIED IDEOGRAPH:'CE75:52853:姴
CJK UNIFIED IDEOGRAPH:'CE76:52854:姭
CJK UNIFIED IDEOGRAPH:'CE77:52855:宨
CJK UNIFIED IDEOGRAPH:'CE78:52856:屌
CJK UNIFIED IDEOGRAPH:'CE79:52857:峐
CJK UNIFIED IDEOGRAPH:'CE7A:52858:峘
CJK UNIFIED IDEOGRAPH:'CE7B:52859:峌
CJK UNIFIED IDEOGRAPH:'CE7C:52860:峗
CJK UNIFIED IDEOGRAPH:'CE7D:52861:峋
CJK UNIFIED IDEOGRAPH:'CE7E:52862:峛
CJK UNIFIED IDEOGRAPH:'CEA1:52897:峞
CJK UNIFIED IDEOGRAPH:'CEA2:52898:峚
CJK UNIFIED IDEOGRAPH:'CEA3:52899:峉
CJK UNIFIED IDEOGRAPH:'CEA4:52900:峇
CJK UNIFIED IDEOGRAPH:'CEA5:52901:峊
CJK UNIFIED IDEOGRAPH:'CEA6:52902:峖
CJK UNIFIED IDEOGRAPH:'CEA7:52903:峓
CJK UNIFIED IDEOGRAPH:'CEA8:52904:峔
CJK UNIFIED IDEOGRAPH:'CEA9:52905:峏
CJK UNIFIED IDEOGRAPH:'CEAA:52906:峈
CJK UNIFIED IDEOGRAPH:'CEAB:52907:峆
CJK UNIFIED IDEOGRAPH:'CEAC:52908:峎
CJK UNIFIED IDEOGRAPH:'CEAD:52909:峟
CJK UNIFIED IDEOGRAPH:'CEAE:52910:峸
CJK UNIFIED IDEOGRAPH:'CEAF:52911:巹
CJK UNIFIED IDEOGRAPH:'CEB0:52912:帡
CJK UNIFIED IDEOGRAPH:'CEB1:52913:帢
CJK UNIFIED IDEOGRAPH:'CEB2:52914:帣
CJK UNIFIED IDEOGRAPH:'CEB3:52915:帠
CJK UNIFIED IDEOGRAPH:'CEB4:52916:帤
CJK UNIFIED IDEOGRAPH:'CEB5:52917:庰
CJK UNIFIED IDEOGRAPH:'CEB6:52918:庤
CJK UNIFIED IDEOGRAPH:'CEB7:52919:庢
CJK UNIFIED IDEOGRAPH:'CEB8:52920:庛
CJK UNIFIED IDEOGRAPH:'CEB9:52921:庣
CJK UNIFIED IDEOGRAPH:'CEBA:52922:庥
CJK UNIFIED IDEOGRAPH:'CEBB:52923:弇
CJK UNIFIED IDEOGRAPH:'CEBC:52924:弮
CJK UNIFIED IDEOGRAPH:'CEBD:52925:彖
CJK UNIFIED IDEOGRAPH:'CEBE:52926:徆
CJK UNIFIED IDEOGRAPH:'CEBF:52927:怷
CJK UNIFIED IDEOGRAPH:'CEC0:52928:怹
CJK UNIFIED IDEOGRAPH:'CEC1:52929:恔
CJK UNIFIED IDEOGRAPH:'CEC2:52930:恲
CJK UNIFIED IDEOGRAPH:'CEC3:52931:恞
CJK UNIFIED IDEOGRAPH:'CEC4:52932:恅
CJK UNIFIED IDEOGRAPH:'CEC5:52933:恓
CJK UNIFIED IDEOGRAPH:'CEC6:52934:恇
CJK UNIFIED IDEOGRAPH:'CEC7:52935:恉
CJK UNIFIED IDEOGRAPH:'CEC8:52936:恛
CJK UNIFIED IDEOGRAPH:'CEC9:52937:恌
CJK UNIFIED IDEOGRAPH:'CECA:52938:恀
CJK UNIFIED IDEOGRAPH:'CECB:52939:恂
CJK UNIFIED IDEOGRAPH:'CECC:52940:恟
CJK UNIFIED IDEOGRAPH:'CECD:52941:怤
CJK UNIFIED IDEOGRAPH:'CECE:52942:恄
CJK UNIFIED IDEOGRAPH:'CECF:52943:恘
CJK UNIFIED IDEOGRAPH:'CED0:52944:恦
CJK UNIFIED IDEOGRAPH:'CED1:52945:恮
CJK UNIFIED IDEOGRAPH:'CED2:52946:扂
CJK UNIFIED IDEOGRAPH:'CED3:52947:扃
CJK UNIFIED IDEOGRAPH:'CED4:52948:拏
CJK UNIFIED IDEOGRAPH:'CED5:52949:挍
CJK UNIFIED IDEOGRAPH:'CED6:52950:挋
CJK UNIFIED IDEOGRAPH:'CED7:52951:拵
CJK UNIFIED IDEOGRAPH:'CED8:52952:挎
CJK UNIFIED IDEOGRAPH:'CED9:52953:挃
CJK UNIFIED IDEOGRAPH:'CEDA:52954:拫
CJK UNIFIED IDEOGRAPH:'CEDB:52955:拹
CJK UNIFIED IDEOGRAPH:'CEDC:52956:挏
CJK UNIFIED IDEOGRAPH:'CEDD:52957:挌
CJK UNIFIED IDEOGRAPH:'CEDE:52958:拸
CJK UNIFIED IDEOGRAPH:'CEDF:52959:拶
CJK UNIFIED IDEOGRAPH:'CEE0:52960:挀
CJK UNIFIED IDEOGRAPH:'CEE1:52961:挓
CJK UNIFIED IDEOGRAPH:'CEE2:52962:挔
CJK UNIFIED IDEOGRAPH:'CEE3:52963:拺
CJK UNIFIED IDEOGRAPH:'CEE4:52964:挕
CJK UNIFIED IDEOGRAPH:'CEE5:52965:拻
CJK UNIFIED IDEOGRAPH:'CEE6:52966:拰
CJK UNIFIED IDEOGRAPH:'CEE7:52967:敁
CJK UNIFIED IDEOGRAPH:'CEE8:52968:敃
CJK UNIFIED IDEOGRAPH:'CEE9:52969:斪
CJK UNIFIED IDEOGRAPH:'CEEA:52970:斿
CJK UNIFIED IDEOGRAPH:'CEEB:52971:昶
CJK UNIFIED IDEOGRAPH:'CEEC:52972:昡
CJK UNIFIED IDEOGRAPH:'CEED:52973:昲
CJK UNIFIED IDEOGRAPH:'CEEE:52974:昵
CJK UNIFIED IDEOGRAPH:'CEEF:52975:昜
CJK UNIFIED IDEOGRAPH:'CEF0:52976:昦
CJK UNIFIED IDEOGRAPH:'CEF1:52977:昢
CJK UNIFIED IDEOGRAPH:'CEF2:52978:昳
CJK UNIFIED IDEOGRAPH:'CEF3:52979:昫
CJK UNIFIED IDEOGRAPH:'CEF4:52980:昺
CJK UNIFIED IDEOGRAPH:'CEF5:52981:昝
CJK UNIFIED IDEOGRAPH:'CEF6:52982:昴
CJK UNIFIED IDEOGRAPH:'CEF7:52983:昹
CJK UNIFIED IDEOGRAPH:'CEF8:52984:昮
CJK UNIFIED IDEOGRAPH:'CEF9:52985:朏
CJK UNIFIED IDEOGRAPH:'CEFA:52986:朐
CJK UNIFIED IDEOGRAPH:'CEFB:52987:柁
CJK UNIFIED IDEOGRAPH:'CEFC:52988:柲
CJK UNIFIED IDEOGRAPH:'CEFD:52989:柈
CJK UNIFIED IDEOGRAPH:'CEFE:52990:枺
CJK UNIFIED IDEOGRAPH:'CF40:53056:柜
CJK UNIFIED IDEOGRAPH:'CF41:53057:枻
CJK UNIFIED IDEOGRAPH:'CF42:53058:柸
CJK UNIFIED IDEOGRAPH:'CF43:53059:柘
CJK UNIFIED IDEOGRAPH:'CF44:53060:柀
CJK UNIFIED IDEOGRAPH:'CF45:53061:枷
CJK UNIFIED IDEOGRAPH:'CF46:53062:柅
CJK UNIFIED IDEOGRAPH:'CF47:53063:柫
CJK UNIFIED IDEOGRAPH:'CF48:53064:柤
CJK UNIFIED IDEOGRAPH:'CF49:53065:柟
CJK UNIFIED IDEOGRAPH:'CF4A:53066:枵
CJK UNIFIED IDEOGRAPH:'CF4B:53067:柍
CJK UNIFIED IDEOGRAPH:'CF4C:53068:枳
CJK UNIFIED IDEOGRAPH:'CF4D:53069:柷
CJK UNIFIED IDEOGRAPH:'CF4E:53070:柶
CJK UNIFIED IDEOGRAPH:'CF4F:53071:柮
CJK UNIFIED IDEOGRAPH:'CF50:53072:柣
CJK UNIFIED IDEOGRAPH:'CF51:53073:柂
CJK UNIFIED IDEOGRAPH:'CF52:53074:枹
CJK UNIFIED IDEOGRAPH:'CF53:53075:柎
CJK UNIFIED IDEOGRAPH:'CF54:53076:柧
CJK UNIFIED IDEOGRAPH:'CF55:53077:柰
CJK UNIFIED IDEOGRAPH:'CF56:53078:枲
CJK UNIFIED IDEOGRAPH:'CF57:53079:柼
CJK UNIFIED IDEOGRAPH:'CF58:53080:柆
CJK UNIFIED IDEOGRAPH:'CF59:53081:柭
CJK UNIFIED IDEOGRAPH:'CF5A:53082:柌
CJK UNIFIED IDEOGRAPH:'CF5B:53083:枮
CJK UNIFIED IDEOGRAPH:'CF5C:53084:柦
CJK UNIFIED IDEOGRAPH:'CF5D:53085:柛
CJK UNIFIED IDEOGRAPH:'CF5E:53086:柺
CJK UNIFIED IDEOGRAPH:'CF5F:53087:柉
CJK UNIFIED IDEOGRAPH:'CF60:53088:柊
CJK UNIFIED IDEOGRAPH:'CF61:53089:柃
CJK UNIFIED IDEOGRAPH:'CF62:53090:柪
CJK UNIFIED IDEOGRAPH:'CF63:53091:柋
CJK UNIFIED IDEOGRAPH:'CF64:53092:欨
CJK UNIFIED IDEOGRAPH:'CF65:53093:殂
CJK UNIFIED IDEOGRAPH:'CF66:53094:殄
CJK UNIFIED IDEOGRAPH:'CF67:53095:殶
CJK UNIFIED IDEOGRAPH:'CF68:53096:毖
CJK UNIFIED IDEOGRAPH:'CF69:53097:毘
CJK UNIFIED IDEOGRAPH:'CF6A:53098:毠
CJK UNIFIED IDEOGRAPH:'CF6B:53099:氠
CJK UNIFIED IDEOGRAPH:'CF6C:53100:氡
CJK UNIFIED IDEOGRAPH:'CF6D:53101:洨
CJK UNIFIED IDEOGRAPH:'CF6E:53102:洴
CJK UNIFIED IDEOGRAPH:'CF6F:53103:洭
CJK UNIFIED IDEOGRAPH:'CF70:53104:洟
CJK UNIFIED IDEOGRAPH:'CF71:53105:洼
CJK UNIFIED IDEOGRAPH:'CF72:53106:洿
CJK UNIFIED IDEOGRAPH:'CF73:53107:洒
CJK UNIFIED IDEOGRAPH:'CF74:53108:洊
CJK UNIFIED IDEOGRAPH:'CF75:53109:泚
CJK UNIFIED IDEOGRAPH:'CF76:53110:洳
CJK UNIFIED IDEOGRAPH:'CF77:53111:洄
CJK UNIFIED IDEOGRAPH:'CF78:53112:洙
CJK UNIFIED IDEOGRAPH:'CF79:53113:洺
CJK UNIFIED IDEOGRAPH:'CF7A:53114:洚
CJK UNIFIED IDEOGRAPH:'CF7B:53115:洑
CJK UNIFIED IDEOGRAPH:'CF7C:53116:洀
CJK UNIFIED IDEOGRAPH:'CF7D:53117:洝
CJK UNIFIED IDEOGRAPH:'CF7E:53118:浂
CJK UNIFIED IDEOGRAPH:'CFA1:53153:洁
CJK UNIFIED IDEOGRAPH:'CFA2:53154:洘
CJK UNIFIED IDEOGRAPH:'CFA3:53155:洷
CJK UNIFIED IDEOGRAPH:'CFA4:53156:洃
CJK UNIFIED IDEOGRAPH:'CFA5:53157:洏
CJK UNIFIED IDEOGRAPH:'CFA6:53158:浀
CJK UNIFIED IDEOGRAPH:'CFA7:53159:洇
CJK UNIFIED IDEOGRAPH:'CFA8:53160:洠
CJK UNIFIED IDEOGRAPH:'CFA9:53161:洬
CJK UNIFIED IDEOGRAPH:'CFAA:53162:洈
CJK UNIFIED IDEOGRAPH:'CFAB:53163:洢
CJK UNIFIED IDEOGRAPH:'CFAC:53164:洉
CJK UNIFIED IDEOGRAPH:'CFAD:53165:洐
CJK UNIFIED IDEOGRAPH:'CFAE:53166:炷
CJK UNIFIED IDEOGRAPH:'CFAF:53167:炟
CJK UNIFIED IDEOGRAPH:'CFB0:53168:炾
CJK UNIFIED IDEOGRAPH:'CFB1:53169:炱
CJK UNIFIED IDEOGRAPH:'CFB2:53170:炰
CJK UNIFIED IDEOGRAPH:'CFB3:53171:炡
CJK UNIFIED IDEOGRAPH:'CFB4:53172:炴
CJK UNIFIED IDEOGRAPH:'CFB5:53173:炵
CJK UNIFIED IDEOGRAPH:'CFB6:53174:炩
CJK UNIFIED IDEOGRAPH:'CFB7:53175:牁
CJK UNIFIED IDEOGRAPH:'CFB8:53176:牉
CJK UNIFIED IDEOGRAPH:'CFB9:53177:牊
CJK UNIFIED IDEOGRAPH:'CFBA:53178:牬
CJK UNIFIED IDEOGRAPH:'CFBB:53179:牰
CJK UNIFIED IDEOGRAPH:'CFBC:53180:牳
CJK UNIFIED IDEOGRAPH:'CFBD:53181:牮
CJK UNIFIED IDEOGRAPH:'CFBE:53182:狊
CJK UNIFIED IDEOGRAPH:'CFBF:53183:狤
CJK UNIFIED IDEOGRAPH:'CFC0:53184:狨
CJK UNIFIED IDEOGRAPH:'CFC1:53185:狫
CJK UNIFIED IDEOGRAPH:'CFC2:53186:狟
CJK UNIFIED IDEOGRAPH:'CFC3:53187:狪
CJK UNIFIED IDEOGRAPH:'CFC4:53188:狦
CJK UNIFIED IDEOGRAPH:'CFC5:53189:狣
CJK UNIFIED IDEOGRAPH:'CFC6:53190:玅
CJK UNIFIED IDEOGRAPH:'CFC7:53191:珌
CJK UNIFIED IDEOGRAPH:'CFC8:53192:珂
CJK UNIFIED IDEOGRAPH:'CFC9:53193:珈
CJK UNIFIED IDEOGRAPH:'CFCA:53194:珅
CJK UNIFIED IDEOGRAPH:'CFCB:53195:玹
CJK UNIFIED IDEOGRAPH:'CFCC:53196:玶
CJK UNIFIED IDEOGRAPH:'CFCD:53197:玵
CJK UNIFIED IDEOGRAPH:'CFCE:53198:玴
CJK UNIFIED IDEOGRAPH:'CFCF:53199:珫
CJK UNIFIED IDEOGRAPH:'CFD0:53200:玿
CJK UNIFIED IDEOGRAPH:'CFD1:53201:珇
CJK UNIFIED IDEOGRAPH:'CFD2:53202:玾
CJK UNIFIED IDEOGRAPH:'CFD3:53203:珃
CJK UNIFIED IDEOGRAPH:'CFD4:53204:珆
CJK UNIFIED IDEOGRAPH:'CFD5:53205:玸
CJK UNIFIED IDEOGRAPH:'CFD6:53206:珋
CJK UNIFIED IDEOGRAPH:'CFD7:53207:瓬
CJK UNIFIED IDEOGRAPH:'CFD8:53208:瓮
CJK UNIFIED IDEOGRAPH:'CFD9:53209:甮
CJK UNIFIED IDEOGRAPH:'CFDA:53210:畇
CJK UNIFIED IDEOGRAPH:'CFDB:53211:畈
CJK UNIFIED IDEOGRAPH:'CFDC:53212:疧
CJK UNIFIED IDEOGRAPH:'CFDD:53213:疪
CJK UNIFIED IDEOGRAPH:'CFDE:53214:癹
CJK UNIFIED IDEOGRAPH:'CFDF:53215:盄
CJK UNIFIED IDEOGRAPH:'CFE0:53216:眈
CJK UNIFIED IDEOGRAPH:'CFE1:53217:眃
CJK UNIFIED IDEOGRAPH:'CFE2:53218:眄
CJK UNIFIED IDEOGRAPH:'CFE3:53219:眅
CJK UNIFIED IDEOGRAPH:'CFE4:53220:眊
CJK UNIFIED IDEOGRAPH:'CFE5:53221:盷
CJK UNIFIED IDEOGRAPH:'CFE6:53222:盻
CJK UNIFIED IDEOGRAPH:'CFE7:53223:盺
CJK UNIFIED IDEOGRAPH:'CFE8:53224:矧
CJK UNIFIED IDEOGRAPH:'CFE9:53225:矨
CJK UNIFIED IDEOGRAPH:'CFEA:53226:砆
CJK UNIFIED IDEOGRAPH:'CFEB:53227:砑
CJK UNIFIED IDEOGRAPH:'CFEC:53228:砒
CJK UNIFIED IDEOGRAPH:'CFED:53229:砅
CJK UNIFIED IDEOGRAPH:'CFEE:53230:砐
CJK UNIFIED IDEOGRAPH:'CFEF:53231:砏
CJK UNIFIED IDEOGRAPH:'CFF0:53232:砎
CJK UNIFIED IDEOGRAPH:'CFF1:53233:砉
CJK UNIFIED IDEOGRAPH:'CFF2:53234:砃
CJK UNIFIED IDEOGRAPH:'CFF3:53235:砓
CJK UNIFIED IDEOGRAPH:'CFF4:53236:祊
CJK UNIFIED IDEOGRAPH:'CFF5:53237:祌
CJK UNIFIED IDEOGRAPH:'CFF6:53238:祋
CJK UNIFIED IDEOGRAPH:'CFF7:53239:祅
CJK UNIFIED IDEOGRAPH:'CFF8:53240:祄
CJK UNIFIED IDEOGRAPH:'CFF9:53241:秕
CJK UNIFIED IDEOGRAPH:'CFFA:53242:种
CJK UNIFIED IDEOGRAPH:'CFFB:53243:秏
CJK UNIFIED IDEOGRAPH:'CFFC:53244:秖
CJK UNIFIED IDEOGRAPH:'CFFD:53245:秎
CJK UNIFIED IDEOGRAPH:'CFFE:53246:窀
CJK UNIFIED IDEOGRAPH:'D040:53312:穾
CJK UNIFIED IDEOGRAPH:'D041:53313:竑
CJK UNIFIED IDEOGRAPH:'D042:53314:笀
CJK UNIFIED IDEOGRAPH:'D043:53315:笁
CJK UNIFIED IDEOGRAPH:'D044:53316:籺
CJK UNIFIED IDEOGRAPH:'D045:53317:籸
CJK UNIFIED IDEOGRAPH:'D046:53318:籹
CJK UNIFIED IDEOGRAPH:'D047:53319:籿
CJK UNIFIED IDEOGRAPH:'D048:53320:粀
CJK UNIFIED IDEOGRAPH:'D049:53321:粁
CJK UNIFIED IDEOGRAPH:'D04A:53322:紃
CJK UNIFIED IDEOGRAPH:'D04B:53323:紈
CJK UNIFIED IDEOGRAPH:'D04C:53324:紁
CJK UNIFIED IDEOGRAPH:'D04D:53325:罘
CJK UNIFIED IDEOGRAPH:'D04E:53326:羑
CJK UNIFIED IDEOGRAPH:'D04F:53327:羍
CJK UNIFIED IDEOGRAPH:'D050:53328:羾
CJK UNIFIED IDEOGRAPH:'D051:53329:耇
CJK UNIFIED IDEOGRAPH:'D052:53330:耎
CJK UNIFIED IDEOGRAPH:'D053:53331:耏
CJK UNIFIED IDEOGRAPH:'D054:53332:耔
CJK UNIFIED IDEOGRAPH:'D055:53333:耷
CJK UNIFIED IDEOGRAPH:'D056:53334:胘
CJK UNIFIED IDEOGRAPH:'D057:53335:胇
CJK UNIFIED IDEOGRAPH:'D058:53336:胠
CJK UNIFIED IDEOGRAPH:'D059:53337:胑
CJK UNIFIED IDEOGRAPH:'D05A:53338:胈
CJK UNIFIED IDEOGRAPH:'D05B:53339:胂
CJK UNIFIED IDEOGRAPH:'D05C:53340:胐
CJK UNIFIED IDEOGRAPH:'D05D:53341:胅
CJK UNIFIED IDEOGRAPH:'D05E:53342:胣
CJK UNIFIED IDEOGRAPH:'D05F:53343:胙
CJK UNIFIED IDEOGRAPH:'D060:53344:胜
CJK UNIFIED IDEOGRAPH:'D061:53345:胊
CJK UNIFIED IDEOGRAPH:'D062:53346:胕
CJK UNIFIED IDEOGRAPH:'D063:53347:胉
CJK UNIFIED IDEOGRAPH:'D064:53348:胏
CJK UNIFIED IDEOGRAPH:'D065:53349:胗
CJK UNIFIED IDEOGRAPH:'D066:53350:胦
CJK UNIFIED IDEOGRAPH:'D067:53351:胍
CJK UNIFIED IDEOGRAPH:'D068:53352:臿
CJK UNIFIED IDEOGRAPH:'D069:53353:舡
CJK UNIFIED IDEOGRAPH:'D06A:53354:芔
CJK UNIFIED IDEOGRAPH:'D06B:53355:苙
CJK UNIFIED IDEOGRAPH:'D06C:53356:苾
CJK UNIFIED IDEOGRAPH:'D06D:53357:苹
CJK UNIFIED IDEOGRAPH:'D06E:53358:茇
CJK UNIFIED IDEOGRAPH:'D06F:53359:苨
CJK UNIFIED IDEOGRAPH:'D070:53360:茀
CJK UNIFIED IDEOGRAPH:'D071:53361:苕
CJK UNIFIED IDEOGRAPH:'D072:53362:茺
CJK UNIFIED IDEOGRAPH:'D073:53363:苫
CJK UNIFIED IDEOGRAPH:'D074:53364:苖
CJK UNIFIED IDEOGRAPH:'D075:53365:苴
CJK UNIFIED IDEOGRAPH:'D076:53366:苬
CJK UNIFIED IDEOGRAPH:'D077:53367:苡
CJK UNIFIED IDEOGRAPH:'D078:53368:苲
CJK UNIFIED IDEOGRAPH:'D079:53369:苵
CJK UNIFIED IDEOGRAPH:'D07A:53370:茌
CJK UNIFIED IDEOGRAPH:'D07B:53371:苻
CJK UNIFIED IDEOGRAPH:'D07C:53372:苶
CJK UNIFIED IDEOGRAPH:'D07D:53373:苰
CJK UNIFIED IDEOGRAPH:'D07E:53374:苪
CJK UNIFIED IDEOGRAPH:'D0A1:53409:苤
CJK UNIFIED IDEOGRAPH:'D0A2:53410:苠
CJK UNIFIED IDEOGRAPH:'D0A3:53411:苺
CJK UNIFIED IDEOGRAPH:'D0A4:53412:苳
CJK UNIFIED IDEOGRAPH:'D0A5:53413:苭
CJK UNIFIED IDEOGRAPH:'D0A6:53414:虷
CJK UNIFIED IDEOGRAPH:'D0A7:53415:虴
CJK UNIFIED IDEOGRAPH:'D0A8:53416:虼
CJK UNIFIED IDEOGRAPH:'D0A9:53417:虳
CJK UNIFIED IDEOGRAPH:'D0AA:53418:衁
CJK UNIFIED IDEOGRAPH:'D0AB:53419:衎
CJK UNIFIED IDEOGRAPH:'D0AC:53420:衧
CJK UNIFIED IDEOGRAPH:'D0AD:53421:衪
CJK UNIFIED IDEOGRAPH:'D0AE:53422:衩
CJK UNIFIED IDEOGRAPH:'D0AF:53423:觓
CJK UNIFIED IDEOGRAPH:'D0B0:53424:訄
CJK UNIFIED IDEOGRAPH:'D0B1:53425:訇
CJK UNIFIED IDEOGRAPH:'D0B2:53426:赲
CJK UNIFIED IDEOGRAPH:'D0B3:53427:迣
CJK UNIFIED IDEOGRAPH:'D0B4:53428:迡
CJK UNIFIED IDEOGRAPH:'D0B5:53429:迮
CJK UNIFIED IDEOGRAPH:'D0B6:53430:迠
CJK UNIFIED IDEOGRAPH:'D0B7:53431:郱
CJK UNIFIED IDEOGRAPH:'D0B8:53432:邽
CJK UNIFIED IDEOGRAPH:'D0B9:53433:邿
CJK UNIFIED IDEOGRAPH:'D0BA:53434:郕
CJK UNIFIED IDEOGRAPH:'D0BB:53435:郅
CJK UNIFIED IDEOGRAPH:'D0BC:53436:邾
CJK UNIFIED IDEOGRAPH:'D0BD:53437:郇
CJK UNIFIED IDEOGRAPH:'D0BE:53438:郋
CJK UNIFIED IDEOGRAPH:'D0BF:53439:郈
CJK UNIFIED IDEOGRAPH:'D0C0:53440:釔
CJK UNIFIED IDEOGRAPH:'D0C1:53441:釓
CJK UNIFIED IDEOGRAPH:'D0C2:53442:陔
CJK UNIFIED IDEOGRAPH:'D0C3:53443:陏
CJK UNIFIED IDEOGRAPH:'D0C4:53444:陑
CJK UNIFIED IDEOGRAPH:'D0C5:53445:陓
CJK UNIFIED IDEOGRAPH:'D0C6:53446:陊
CJK UNIFIED IDEOGRAPH:'D0C7:53447:陎
CJK UNIFIED IDEOGRAPH:'D0C8:53448:倞
CJK UNIFIED IDEOGRAPH:'D0C9:53449:倅
CJK UNIFIED IDEOGRAPH:'D0CA:53450:倇
CJK UNIFIED IDEOGRAPH:'D0CB:53451:倓
CJK UNIFIED IDEOGRAPH:'D0CC:53452:倢
CJK UNIFIED IDEOGRAPH:'D0CD:53453:倰
CJK UNIFIED IDEOGRAPH:'D0CE:53454:倛
CJK UNIFIED IDEOGRAPH:'D0CF:53455:俵
CJK UNIFIED IDEOGRAPH:'D0D0:53456:俴
CJK UNIFIED IDEOGRAPH:'D0D1:53457:倳
CJK UNIFIED IDEOGRAPH:'D0D2:53458:倷
CJK UNIFIED IDEOGRAPH:'D0D3:53459:倬
CJK UNIFIED IDEOGRAPH:'D0D4:53460:俶
CJK UNIFIED IDEOGRAPH:'D0D5:53461:俷
CJK UNIFIED IDEOGRAPH:'D0D6:53462:倗
CJK UNIFIED IDEOGRAPH:'D0D7:53463:倜
CJK UNIFIED IDEOGRAPH:'D0D8:53464:倠
CJK UNIFIED IDEOGRAPH:'D0D9:53465:倧
CJK UNIFIED IDEOGRAPH:'D0DA:53466:倵
CJK UNIFIED IDEOGRAPH:'D0DB:53467:倯
CJK UNIFIED IDEOGRAPH:'D0DC:53468:倱
CJK UNIFIED IDEOGRAPH:'D0DD:53469:倎
CJK UNIFIED IDEOGRAPH:'D0DE:53470:党
CJK UNIFIED IDEOGRAPH:'D0DF:53471:冔
CJK UNIFIED IDEOGRAPH:'D0E0:53472:冓
CJK UNIFIED IDEOGRAPH:'D0E1:53473:凊
CJK UNIFIED IDEOGRAPH:'D0E2:53474:凄
CJK UNIFIED IDEOGRAPH:'D0E3:53475:凅
CJK UNIFIED IDEOGRAPH:'D0E4:53476:凈
CJK UNIFIED IDEOGRAPH:'D0E5:53477:凎
CJK UNIFIED IDEOGRAPH:'D0E6:53478:剡
CJK UNIFIED IDEOGRAPH:'D0E7:53479:剚
CJK UNIFIED IDEOGRAPH:'D0E8:53480:剒
CJK UNIFIED IDEOGRAPH:'D0E9:53481:剞
CJK UNIFIED IDEOGRAPH:'D0EA:53482:剟
CJK UNIFIED IDEOGRAPH:'D0EB:53483:剕
CJK UNIFIED IDEOGRAPH:'D0EC:53484:剢
CJK UNIFIED IDEOGRAPH:'D0ED:53485:勍
CJK UNIFIED IDEOGRAPH:'D0EE:53486:匎
CJK UNIFIED IDEOGRAPH:'D0EF:53487:厞
CJK UNIFIED IDEOGRAPH:'D0F0:53488:唦
CJK UNIFIED IDEOGRAPH:'D0F1:53489:哢
CJK UNIFIED IDEOGRAPH:'D0F2:53490:唗
CJK UNIFIED IDEOGRAPH:'D0F3:53491:唒
CJK UNIFIED IDEOGRAPH:'D0F4:53492:哧
CJK UNIFIED IDEOGRAPH:'D0F5:53493:哳
CJK UNIFIED IDEOGRAPH:'D0F6:53494:哤
CJK UNIFIED IDEOGRAPH:'D0F7:53495:唚
CJK UNIFIED IDEOGRAPH:'D0F8:53496:哿
CJK UNIFIED IDEOGRAPH:'D0F9:53497:唄
CJK UNIFIED IDEOGRAPH:'D0FA:53498:唈
CJK UNIFIED IDEOGRAPH:'D0FB:53499:哫
CJK UNIFIED IDEOGRAPH:'D0FC:53500:唑
CJK UNIFIED IDEOGRAPH:'D0FD:53501:唅
CJK UNIFIED IDEOGRAPH:'D0FE:53502:哱
CJK UNIFIED IDEOGRAPH:'D140:53568:唊
CJK UNIFIED IDEOGRAPH:'D141:53569:哻
CJK UNIFIED IDEOGRAPH:'D142:53570:哷
CJK UNIFIED IDEOGRAPH:'D143:53571:哸
CJK UNIFIED IDEOGRAPH:'D144:53572:哠
CJK UNIFIED IDEOGRAPH:'D145:53573:唎
CJK UNIFIED IDEOGRAPH:'D146:53574:唃
CJK UNIFIED IDEOGRAPH:'D147:53575:唋
CJK UNIFIED IDEOGRAPH:'D148:53576:圁
CJK UNIFIED IDEOGRAPH:'D149:53577:圂
CJK UNIFIED IDEOGRAPH:'D14A:53578:埌
CJK UNIFIED IDEOGRAPH:'D14B:53579:堲
CJK UNIFIED IDEOGRAPH:'D14C:53580:埕
CJK UNIFIED IDEOGRAPH:'D14D:53581:埒
CJK UNIFIED IDEOGRAPH:'D14E:53582:垺
CJK UNIFIED IDEOGRAPH:'D14F:53583:埆
CJK UNIFIED IDEOGRAPH:'D150:53584:垽
CJK UNIFIED IDEOGRAPH:'D151:53585:垼
CJK UNIFIED IDEOGRAPH:'D152:53586:垸
CJK UNIFIED IDEOGRAPH:'D153:53587:垶
CJK UNIFIED IDEOGRAPH:'D154:53588:垿
CJK UNIFIED IDEOGRAPH:'D155:53589:埇
CJK UNIFIED IDEOGRAPH:'D156:53590:埐
CJK UNIFIED IDEOGRAPH:'D157:53591:垹
CJK UNIFIED IDEOGRAPH:'D158:53592:埁
CJK UNIFIED IDEOGRAPH:'D159:53593:夎
CJK UNIFIED IDEOGRAPH:'D15A:53594:奊
CJK UNIFIED IDEOGRAPH:'D15B:53595:娙
CJK UNIFIED IDEOGRAPH:'D15C:53596:娖
CJK UNIFIED IDEOGRAPH:'D15D:53597:娭
CJK UNIFIED IDEOGRAPH:'D15E:53598:娮
CJK UNIFIED IDEOGRAPH:'D15F:53599:娕
CJK UNIFIED IDEOGRAPH:'D160:53600:娏
CJK UNIFIED IDEOGRAPH:'D161:53601:娗
CJK UNIFIED IDEOGRAPH:'D162:53602:娊
CJK UNIFIED IDEOGRAPH:'D163:53603:娞
CJK UNIFIED IDEOGRAPH:'D164:53604:娳
CJK UNIFIED IDEOGRAPH:'D165:53605:孬
CJK UNIFIED IDEOGRAPH:'D166:53606:宧
CJK UNIFIED IDEOGRAPH:'D167:53607:宭
CJK UNIFIED IDEOGRAPH:'D168:53608:宬
CJK UNIFIED IDEOGRAPH:'D169:53609:尃
CJK UNIFIED IDEOGRAPH:'D16A:53610:屖
CJK UNIFIED IDEOGRAPH:'D16B:53611:屔
CJK UNIFIED IDEOGRAPH:'D16C:53612:峬
CJK UNIFIED IDEOGRAPH:'D16D:53613:峿
CJK UNIFIED IDEOGRAPH:'D16E:53614:峮
CJK UNIFIED IDEOGRAPH:'D16F:53615:峱
CJK UNIFIED IDEOGRAPH:'D170:53616:峷
CJK UNIFIED IDEOGRAPH:'D171:53617:崀
CJK UNIFIED IDEOGRAPH:'D172:53618:峹
CJK UNIFIED IDEOGRAPH:'D173:53619:帩
CJK UNIFIED IDEOGRAPH:'D174:53620:帨
CJK UNIFIED IDEOGRAPH:'D175:53621:庨
CJK UNIFIED IDEOGRAPH:'D176:53622:庮
CJK UNIFIED IDEOGRAPH:'D177:53623:庪
CJK UNIFIED IDEOGRAPH:'D178:53624:庬
CJK UNIFIED IDEOGRAPH:'D179:53625:弳
CJK UNIFIED IDEOGRAPH:'D17A:53626:弰
CJK UNIFIED IDEOGRAPH:'D17B:53627:彧
CJK UNIFIED IDEOGRAPH:'D17C:53628:恝
CJK UNIFIED IDEOGRAPH:'D17D:53629:恚
CJK UNIFIED IDEOGRAPH:'D17E:53630:恧
CJK UNIFIED IDEOGRAPH:'D1A1:53665:恁
CJK UNIFIED IDEOGRAPH:'D1A2:53666:悢
CJK UNIFIED IDEOGRAPH:'D1A3:53667:悈
CJK UNIFIED IDEOGRAPH:'D1A4:53668:悀
CJK UNIFIED IDEOGRAPH:'D1A5:53669:悒
CJK UNIFIED IDEOGRAPH:'D1A6:53670:悁
CJK UNIFIED IDEOGRAPH:'D1A7:53671:悝
CJK UNIFIED IDEOGRAPH:'D1A8:53672:悃
CJK UNIFIED IDEOGRAPH:'D1A9:53673:悕
CJK UNIFIED IDEOGRAPH:'D1AA:53674:悛
CJK UNIFIED IDEOGRAPH:'D1AB:53675:悗
CJK UNIFIED IDEOGRAPH:'D1AC:53676:悇
CJK UNIFIED IDEOGRAPH:'D1AD:53677:悜
CJK UNIFIED IDEOGRAPH:'D1AE:53678:悎
CJK UNIFIED IDEOGRAPH:'D1AF:53679:戙
CJK UNIFIED IDEOGRAPH:'D1B0:53680:扆
CJK UNIFIED IDEOGRAPH:'D1B1:53681:拲
CJK UNIFIED IDEOGRAPH:'D1B2:53682:挐
CJK UNIFIED IDEOGRAPH:'D1B3:53683:捖
CJK UNIFIED IDEOGRAPH:'D1B4:53684:挬
CJK UNIFIED IDEOGRAPH:'D1B5:53685:捄
CJK UNIFIED IDEOGRAPH:'D1B6:53686:捅
CJK UNIFIED IDEOGRAPH:'D1B7:53687:挶
CJK UNIFIED IDEOGRAPH:'D1B8:53688:捃
CJK UNIFIED IDEOGRAPH:'D1B9:53689:揤
CJK UNIFIED IDEOGRAPH:'D1BA:53690:挹
CJK UNIFIED IDEOGRAPH:'D1BB:53691:捋
CJK UNIFIED IDEOGRAPH:'D1BC:53692:捊
CJK UNIFIED IDEOGRAPH:'D1BD:53693:挼
CJK UNIFIED IDEOGRAPH:'D1BE:53694:挩
CJK UNIFIED IDEOGRAPH:'D1BF:53695:捁
CJK UNIFIED IDEOGRAPH:'D1C0:53696:挴
CJK UNIFIED IDEOGRAPH:'D1C1:53697:捘
CJK UNIFIED IDEOGRAPH:'D1C2:53698:捔
CJK UNIFIED IDEOGRAPH:'D1C3:53699:捙
CJK UNIFIED IDEOGRAPH:'D1C4:53700:挭
CJK UNIFIED IDEOGRAPH:'D1C5:53701:捇
CJK UNIFIED IDEOGRAPH:'D1C6:53702:挳
CJK UNIFIED IDEOGRAPH:'D1C7:53703:捚
CJK UNIFIED IDEOGRAPH:'D1C8:53704:捑
CJK UNIFIED IDEOGRAPH:'D1C9:53705:挸
CJK UNIFIED IDEOGRAPH:'D1CA:53706:捗
CJK UNIFIED IDEOGRAPH:'D1CB:53707:捀
CJK UNIFIED IDEOGRAPH:'D1CC:53708:捈
CJK UNIFIED IDEOGRAPH:'D1CD:53709:敊
CJK UNIFIED IDEOGRAPH:'D1CE:53710:敆
CJK UNIFIED IDEOGRAPH:'D1CF:53711:旆
CJK UNIFIED IDEOGRAPH:'D1D0:53712:旃
CJK UNIFIED IDEOGRAPH:'D1D1:53713:旄
CJK UNIFIED IDEOGRAPH:'D1D2:53714:旂
CJK UNIFIED IDEOGRAPH:'D1D3:53715:晊
CJK UNIFIED IDEOGRAPH:'D1D4:53716:晟
CJK UNIFIED IDEOGRAPH:'D1D5:53717:晇
CJK UNIFIED IDEOGRAPH:'D1D6:53718:晑
CJK UNIFIED IDEOGRAPH:'D1D7:53719:朒
CJK UNIFIED IDEOGRAPH:'D1D8:53720:朓
CJK UNIFIED IDEOGRAPH:'D1D9:53721:栟
CJK UNIFIED IDEOGRAPH:'D1DA:53722:栚
CJK UNIFIED IDEOGRAPH:'D1DB:53723:桉
CJK UNIFIED IDEOGRAPH:'D1DC:53724:栲
CJK UNIFIED IDEOGRAPH:'D1DD:53725:栳
CJK UNIFIED IDEOGRAPH:'D1DE:53726:栻
CJK UNIFIED IDEOGRAPH:'D1DF:53727:桋
CJK UNIFIED IDEOGRAPH:'D1E0:53728:桏
CJK UNIFIED IDEOGRAPH:'D1E1:53729:栖
CJK UNIFIED IDEOGRAPH:'D1E2:53730:栱
CJK UNIFIED IDEOGRAPH:'D1E3:53731:栜
CJK UNIFIED IDEOGRAPH:'D1E4:53732:栵
CJK UNIFIED IDEOGRAPH:'D1E5:53733:栫
CJK UNIFIED IDEOGRAPH:'D1E6:53734:栭
CJK UNIFIED IDEOGRAPH:'D1E7:53735:栯
CJK UNIFIED IDEOGRAPH:'D1E8:53736:桎
CJK UNIFIED IDEOGRAPH:'D1E9:53737:桄
CJK UNIFIED IDEOGRAPH:'D1EA:53738:栴
CJK UNIFIED IDEOGRAPH:'D1EB:53739:栝
CJK UNIFIED IDEOGRAPH:'D1EC:53740:栒
CJK UNIFIED IDEOGRAPH:'D1ED:53741:栔
CJK UNIFIED IDEOGRAPH:'D1EE:53742:栦
CJK UNIFIED IDEOGRAPH:'D1EF:53743:栨
CJK UNIFIED IDEOGRAPH:'D1F0:53744:栮
CJK UNIFIED IDEOGRAPH:'D1F1:53745:桍
CJK UNIFIED IDEOGRAPH:'D1F2:53746:栺
CJK UNIFIED IDEOGRAPH:'D1F3:53747:栥
CJK UNIFIED IDEOGRAPH:'D1F4:53748:栠
CJK UNIFIED IDEOGRAPH:'D1F5:53749:欬
CJK UNIFIED IDEOGRAPH:'D1F6:53750:欯
CJK UNIFIED IDEOGRAPH:'D1F7:53751:欭
CJK UNIFIED IDEOGRAPH:'D1F8:53752:欱
CJK UNIFIED IDEOGRAPH:'D1F9:53753:欴
CJK UNIFIED IDEOGRAPH:'D1FA:53754:歭
CJK UNIFIED IDEOGRAPH:'D1FB:53755:肂
CJK UNIFIED IDEOGRAPH:'D1FC:53756:殈
CJK UNIFIED IDEOGRAPH:'D1FD:53757:毦
CJK UNIFIED IDEOGRAPH:'D1FE:53758:毤
CJK UNIFIED IDEOGRAPH:'D240:53824:毨
CJK UNIFIED IDEOGRAPH:'D241:53825:毣
CJK UNIFIED IDEOGRAPH:'D242:53826:毢
CJK UNIFIED IDEOGRAPH:'D243:53827:毧
CJK UNIFIED IDEOGRAPH:'D244:53828:氥
CJK UNIFIED IDEOGRAPH:'D245:53829:浺
CJK UNIFIED IDEOGRAPH:'D246:53830:浣
CJK UNIFIED IDEOGRAPH:'D247:53831:浤
CJK UNIFIED IDEOGRAPH:'D248:53832:浶
CJK UNIFIED IDEOGRAPH:'D249:53833:洍
CJK UNIFIED IDEOGRAPH:'D24A:53834:浡
CJK UNIFIED IDEOGRAPH:'D24B:53835:涒
CJK UNIFIED IDEOGRAPH:'D24C:53836:浘
CJK UNIFIED IDEOGRAPH:'D24D:53837:浢
CJK UNIFIED IDEOGRAPH:'D24E:53838:浭
CJK UNIFIED IDEOGRAPH:'D24F:53839:浯
CJK UNIFIED IDEOGRAPH:'D250:53840:涑
CJK UNIFIED IDEOGRAPH:'D251:53841:涍
CJK UNIFIED IDEOGRAPH:'D252:53842:淯
CJK UNIFIED IDEOGRAPH:'D253:53843:浿
CJK UNIFIED IDEOGRAPH:'D254:53844:涆
CJK UNIFIED IDEOGRAPH:'D255:53845:浞
CJK UNIFIED IDEOGRAPH:'D256:53846:浧
CJK UNIFIED IDEOGRAPH:'D257:53847:浠
CJK UNIFIED IDEOGRAPH:'D258:53848:涗
CJK UNIFIED IDEOGRAPH:'D259:53849:浰
CJK UNIFIED IDEOGRAPH:'D25A:53850:浼
CJK UNIFIED IDEOGRAPH:'D25B:53851:浟
CJK UNIFIED IDEOGRAPH:'D25C:53852:涂
CJK UNIFIED IDEOGRAPH:'D25D:53853:涘
CJK UNIFIED IDEOGRAPH:'D25E:53854:洯
CJK UNIFIED IDEOGRAPH:'D25F:53855:浨
CJK UNIFIED IDEOGRAPH:'D260:53856:涋
CJK UNIFIED IDEOGRAPH:'D261:53857:浾
CJK UNIFIED IDEOGRAPH:'D262:53858:涀
CJK UNIFIED IDEOGRAPH:'D263:53859:涄
CJK UNIFIED IDEOGRAPH:'D264:53860:洖
CJK UNIFIED IDEOGRAPH:'D265:53861:涃
CJK UNIFIED IDEOGRAPH:'D266:53862:浻
CJK UNIFIED IDEOGRAPH:'D267:53863:浽
CJK UNIFIED IDEOGRAPH:'D268:53864:浵
CJK UNIFIED IDEOGRAPH:'D269:53865:涐
CJK UNIFIED IDEOGRAPH:'D26A:53866:烜
CJK UNIFIED IDEOGRAPH:'D26B:53867:烓
CJK UNIFIED IDEOGRAPH:'D26C:53868:烑
CJK UNIFIED IDEOGRAPH:'D26D:53869:烝
CJK UNIFIED IDEOGRAPH:'D26E:53870:烋
CJK UNIFIED IDEOGRAPH:'D26F:53871:缹
CJK UNIFIED IDEOGRAPH:'D270:53872:烢
CJK UNIFIED IDEOGRAPH:'D271:53873:烗
CJK UNIFIED IDEOGRAPH:'D272:53874:烒
CJK UNIFIED IDEOGRAPH:'D273:53875:烞
CJK UNIFIED IDEOGRAPH:'D274:53876:烠
CJK UNIFIED IDEOGRAPH:'D275:53877:烔
CJK UNIFIED IDEOGRAPH:'D276:53878:烍
CJK UNIFIED IDEOGRAPH:'D277:53879:烅
CJK UNIFIED IDEOGRAPH:'D278:53880:烆
CJK UNIFIED IDEOGRAPH:'D279:53881:烇
CJK UNIFIED IDEOGRAPH:'D27A:53882:烚
CJK UNIFIED IDEOGRAPH:'D27B:53883:烎
CJK UNIFIED IDEOGRAPH:'D27C:53884:烡
CJK UNIFIED IDEOGRAPH:'D27D:53885:牂
CJK UNIFIED IDEOGRAPH:'D27E:53886:牸
CJK UNIFIED IDEOGRAPH:'D2A1:53921:牷
CJK UNIFIED IDEOGRAPH:'D2A2:53922:牶
CJK UNIFIED IDEOGRAPH:'D2A3:53923:猀
CJK UNIFIED IDEOGRAPH:'D2A4:53924:狺
CJK UNIFIED IDEOGRAPH:'D2A5:53925:狴
CJK UNIFIED IDEOGRAPH:'D2A6:53926:狾
CJK UNIFIED IDEOGRAPH:'D2A7:53927:狶
CJK UNIFIED IDEOGRAPH:'D2A8:53928:狳
CJK UNIFIED IDEOGRAPH:'D2A9:53929:狻
CJK UNIFIED IDEOGRAPH:'D2AA:53930:猁
CJK UNIFIED IDEOGRAPH:'D2AB:53931:珓
CJK UNIFIED IDEOGRAPH:'D2AC:53932:珙
CJK UNIFIED IDEOGRAPH:'D2AD:53933:珥
CJK UNIFIED IDEOGRAPH:'D2AE:53934:珖
CJK UNIFIED IDEOGRAPH:'D2AF:53935:玼
CJK UNIFIED IDEOGRAPH:'D2B0:53936:珧
CJK UNIFIED IDEOGRAPH:'D2B1:53937:珣
CJK UNIFIED IDEOGRAPH:'D2B2:53938:珩
CJK UNIFIED IDEOGRAPH:'D2B3:53939:珜
CJK UNIFIED IDEOGRAPH:'D2B4:53940:珒
CJK UNIFIED IDEOGRAPH:'D2B5:53941:珛
CJK UNIFIED IDEOGRAPH:'D2B6:53942:珔
CJK UNIFIED IDEOGRAPH:'D2B7:53943:珝
CJK UNIFIED IDEOGRAPH:'D2B8:53944:珚
CJK UNIFIED IDEOGRAPH:'D2B9:53945:珗
CJK UNIFIED IDEOGRAPH:'D2BA:53946:珘
CJK UNIFIED IDEOGRAPH:'D2BB:53947:珨
CJK UNIFIED IDEOGRAPH:'D2BC:53948:瓞
CJK UNIFIED IDEOGRAPH:'D2BD:53949:瓟
CJK UNIFIED IDEOGRAPH:'D2BE:53950:瓴
CJK UNIFIED IDEOGRAPH:'D2BF:53951:瓵
CJK UNIFIED IDEOGRAPH:'D2C0:53952:甡
CJK UNIFIED IDEOGRAPH:'D2C1:53953:畛
CJK UNIFIED IDEOGRAPH:'D2C2:53954:畟
CJK UNIFIED IDEOGRAPH:'D2C3:53955:疰
CJK UNIFIED IDEOGRAPH:'D2C4:53956:痁
CJK UNIFIED IDEOGRAPH:'D2C5:53957:疻
CJK UNIFIED IDEOGRAPH:'D2C6:53958:痄
CJK UNIFIED IDEOGRAPH:'D2C7:53959:痀
CJK UNIFIED IDEOGRAPH:'D2C8:53960:疿
CJK UNIFIED IDEOGRAPH:'D2C9:53961:疶
CJK UNIFIED IDEOGRAPH:'D2CA:53962:疺
CJK UNIFIED IDEOGRAPH:'D2CB:53963:皊
CJK UNIFIED IDEOGRAPH:'D2CC:53964:盉
CJK UNIFIED IDEOGRAPH:'D2CD:53965:眝
CJK UNIFIED IDEOGRAPH:'D2CE:53966:眛
CJK UNIFIED IDEOGRAPH:'D2CF:53967:眐
CJK UNIFIED IDEOGRAPH:'D2D0:53968:眓
CJK UNIFIED IDEOGRAPH:'D2D1:53969:眒
CJK UNIFIED IDEOGRAPH:'D2D2:53970:眣
CJK UNIFIED IDEOGRAPH:'D2D3:53971:眑
CJK UNIFIED IDEOGRAPH:'D2D4:53972:眕
CJK UNIFIED IDEOGRAPH:'D2D5:53973:眙
CJK UNIFIED IDEOGRAPH:'D2D6:53974:眚
CJK UNIFIED IDEOGRAPH:'D2D7:53975:眢
CJK UNIFIED IDEOGRAPH:'D2D8:53976:眧
CJK UNIFIED IDEOGRAPH:'D2D9:53977:砣
CJK UNIFIED IDEOGRAPH:'D2DA:53978:砬
CJK UNIFIED IDEOGRAPH:'D2DB:53979:砢
CJK UNIFIED IDEOGRAPH:'D2DC:53980:砵
CJK UNIFIED IDEOGRAPH:'D2DD:53981:砯
CJK UNIFIED IDEOGRAPH:'D2DE:53982:砨
CJK UNIFIED IDEOGRAPH:'D2DF:53983:砮
CJK UNIFIED IDEOGRAPH:'D2E0:53984:砫
CJK UNIFIED IDEOGRAPH:'D2E1:53985:砡
CJK UNIFIED IDEOGRAPH:'D2E2:53986:砩
CJK UNIFIED IDEOGRAPH:'D2E3:53987:砳
CJK UNIFIED IDEOGRAPH:'D2E4:53988:砪
CJK UNIFIED IDEOGRAPH:'D2E5:53989:砱
CJK UNIFIED IDEOGRAPH:'D2E6:53990:祔
CJK UNIFIED IDEOGRAPH:'D2E7:53991:祛
CJK UNIFIED IDEOGRAPH:'D2E8:53992:祏
CJK UNIFIED IDEOGRAPH:'D2E9:53993:祜
CJK UNIFIED IDEOGRAPH:'D2EA:53994:祓
CJK UNIFIED IDEOGRAPH:'D2EB:53995:祒
CJK UNIFIED IDEOGRAPH:'D2EC:53996:祑
CJK UNIFIED IDEOGRAPH:'D2ED:53997:秫
CJK UNIFIED IDEOGRAPH:'D2EE:53998:秬
CJK UNIFIED IDEOGRAPH:'D2EF:53999:秠
CJK UNIFIED IDEOGRAPH:'D2F0:54000:秮
CJK UNIFIED IDEOGRAPH:'D2F1:54001:秭
CJK UNIFIED IDEOGRAPH:'D2F2:54002:秪
CJK UNIFIED IDEOGRAPH:'D2F3:54003:秜
CJK UNIFIED IDEOGRAPH:'D2F4:54004:秞
CJK UNIFIED IDEOGRAPH:'D2F5:54005:秝
CJK UNIFIED IDEOGRAPH:'D2F6:54006:窆
CJK UNIFIED IDEOGRAPH:'D2F7:54007:窉
CJK UNIFIED IDEOGRAPH:'D2F8:54008:窅
CJK UNIFIED IDEOGRAPH:'D2F9:54009:窋
CJK UNIFIED IDEOGRAPH:'D2FA:54010:窌
CJK UNIFIED IDEOGRAPH:'D2FB:54011:窊
CJK UNIFIED IDEOGRAPH:'D2FC:54012:窇
CJK UNIFIED IDEOGRAPH:'D2FD:54013:竘
CJK UNIFIED IDEOGRAPH:'D2FE:54014:笐
CJK UNIFIED IDEOGRAPH:'D340:54080:笄
CJK UNIFIED IDEOGRAPH:'D341:54081:笓
CJK UNIFIED IDEOGRAPH:'D342:54082:笅
CJK UNIFIED IDEOGRAPH:'D343:54083:笏
CJK UNIFIED IDEOGRAPH:'D344:54084:笈
CJK UNIFIED IDEOGRAPH:'D345:54085:笊
CJK UNIFIED IDEOGRAPH:'D346:54086:笎
CJK UNIFIED IDEOGRAPH:'D347:54087:笉
CJK UNIFIED IDEOGRAPH:'D348:54088:笒
CJK UNIFIED IDEOGRAPH:'D349:54089:粄
CJK UNIFIED IDEOGRAPH:'D34A:54090:粑
CJK UNIFIED IDEOGRAPH:'D34B:54091:粊
CJK UNIFIED IDEOGRAPH:'D34C:54092:粌
CJK UNIFIED IDEOGRAPH:'D34D:54093:粈
CJK UNIFIED IDEOGRAPH:'D34E:54094:粍
CJK UNIFIED IDEOGRAPH:'D34F:54095:粅
CJK UNIFIED IDEOGRAPH:'D350:54096:紞
CJK UNIFIED IDEOGRAPH:'D351:54097:紝
CJK UNIFIED IDEOGRAPH:'D352:54098:紑
CJK UNIFIED IDEOGRAPH:'D353:54099:紎
CJK UNIFIED IDEOGRAPH:'D354:54100:紘
CJK UNIFIED IDEOGRAPH:'D355:54101:紖
CJK UNIFIED IDEOGRAPH:'D356:54102:紓
CJK UNIFIED IDEOGRAPH:'D357:54103:紟
CJK UNIFIED IDEOGRAPH:'D358:54104:紒
CJK UNIFIED IDEOGRAPH:'D359:54105:紏
CJK UNIFIED IDEOGRAPH:'D35A:54106:紌
CJK UNIFIED IDEOGRAPH:'D35B:54107:罜
CJK UNIFIED IDEOGRAPH:'D35C:54108:罡
CJK UNIFIED IDEOGRAPH:'D35D:54109:罞
CJK UNIFIED IDEOGRAPH:'D35E:54110:罠
CJK UNIFIED IDEOGRAPH:'D35F:54111:罝
CJK UNIFIED IDEOGRAPH:'D360:54112:罛
CJK UNIFIED IDEOGRAPH:'D361:54113:羖
CJK UNIFIED IDEOGRAPH:'D362:54114:羒
CJK UNIFIED IDEOGRAPH:'D363:54115:翃
CJK UNIFIED IDEOGRAPH:'D364:54116:翂
CJK UNIFIED IDEOGRAPH:'D365:54117:翀
CJK UNIFIED IDEOGRAPH:'D366:54118:耖
CJK UNIFIED IDEOGRAPH:'D367:54119:耾
CJK UNIFIED IDEOGRAPH:'D368:54120:耹
CJK UNIFIED IDEOGRAPH:'D369:54121:胺
CJK UNIFIED IDEOGRAPH:'D36A:54122:胲
CJK UNIFIED IDEOGRAPH:'D36B:54123:胹
CJK UNIFIED IDEOGRAPH:'D36C:54124:胵
CJK UNIFIED IDEOGRAPH:'D36D:54125:脁
CJK UNIFIED IDEOGRAPH:'D36E:54126:胻
CJK UNIFIED IDEOGRAPH:'D36F:54127:脀
CJK UNIFIED IDEOGRAPH:'D370:54128:舁
CJK UNIFIED IDEOGRAPH:'D371:54129:舯
CJK UNIFIED IDEOGRAPH:'D372:54130:舥
CJK UNIFIED IDEOGRAPH:'D373:54131:茳
CJK UNIFIED IDEOGRAPH:'D374:54132:茭
CJK UNIFIED IDEOGRAPH:'D375:54133:荄
CJK UNIFIED IDEOGRAPH:'D376:54134:茙
CJK UNIFIED IDEOGRAPH:'D377:54135:荑
CJK UNIFIED IDEOGRAPH:'D378:54136:茥
CJK UNIFIED IDEOGRAPH:'D379:54137:荖
CJK UNIFIED IDEOGRAPH:'D37A:54138:茿
CJK UNIFIED IDEOGRAPH:'D37B:54139:荁
CJK UNIFIED IDEOGRAPH:'D37C:54140:茦
CJK UNIFIED IDEOGRAPH:'D37D:54141:茜
CJK UNIFIED IDEOGRAPH:'D37E:54142:茢
CJK UNIFIED IDEOGRAPH:'D3A1:54177:荂
CJK UNIFIED IDEOGRAPH:'D3A2:54178:荎
CJK UNIFIED IDEOGRAPH:'D3A3:54179:茛
CJK UNIFIED IDEOGRAPH:'D3A4:54180:茪
CJK UNIFIED IDEOGRAPH:'D3A5:54181:茈
CJK UNIFIED IDEOGRAPH:'D3A6:54182:茼
CJK UNIFIED IDEOGRAPH:'D3A7:54183:荍
CJK UNIFIED IDEOGRAPH:'D3A8:54184:茖
CJK UNIFIED IDEOGRAPH:'D3A9:54185:茤
CJK UNIFIED IDEOGRAPH:'D3AA:54186:茠
CJK UNIFIED IDEOGRAPH:'D3AB:54187:茷
CJK UNIFIED IDEOGRAPH:'D3AC:54188:茯
CJK UNIFIED IDEOGRAPH:'D3AD:54189:茩
CJK UNIFIED IDEOGRAPH:'D3AE:54190:荇
CJK UNIFIED IDEOGRAPH:'D3AF:54191:荅
CJK UNIFIED IDEOGRAPH:'D3B0:54192:荌
CJK UNIFIED IDEOGRAPH:'D3B1:54193:荓
CJK UNIFIED IDEOGRAPH:'D3B2:54194:茞
CJK UNIFIED IDEOGRAPH:'D3B3:54195:茬
CJK UNIFIED IDEOGRAPH:'D3B4:54196:荋
CJK UNIFIED IDEOGRAPH:'D3B5:54197:茧
CJK UNIFIED IDEOGRAPH:'D3B6:54198:荈
CJK UNIFIED IDEOGRAPH:'D3B7:54199:虓
CJK UNIFIED IDEOGRAPH:'D3B8:54200:虒
CJK UNIFIED IDEOGRAPH:'D3B9:54201:蚢
CJK UNIFIED IDEOGRAPH:'D3BA:54202:蚨
CJK UNIFIED IDEOGRAPH:'D3BB:54203:蚖
CJK UNIFIED IDEOGRAPH:'D3BC:54204:蚍
CJK UNIFIED IDEOGRAPH:'D3BD:54205:蚑
CJK UNIFIED IDEOGRAPH:'D3BE:54206:蚞
CJK UNIFIED IDEOGRAPH:'D3BF:54207:蚇
CJK UNIFIED IDEOGRAPH:'D3C0:54208:蚗
CJK UNIFIED IDEOGRAPH:'D3C1:54209:蚆
CJK UNIFIED IDEOGRAPH:'D3C2:54210:蚋
CJK UNIFIED IDEOGRAPH:'D3C3:54211:蚚
CJK UNIFIED IDEOGRAPH:'D3C4:54212:蚅
CJK UNIFIED IDEOGRAPH:'D3C5:54213:蚥
CJK UNIFIED IDEOGRAPH:'D3C6:54214:蚙
CJK UNIFIED IDEOGRAPH:'D3C7:54215:蚡
CJK UNIFIED IDEOGRAPH:'D3C8:54216:蚧
CJK UNIFIED IDEOGRAPH:'D3C9:54217:蚕
CJK UNIFIED IDEOGRAPH:'D3CA:54218:蚘
CJK UNIFIED IDEOGRAPH:'D3CB:54219:蚎
CJK UNIFIED IDEOGRAPH:'D3CC:54220:蚝
CJK UNIFIED IDEOGRAPH:'D3CD:54221:蚐
CJK UNIFIED IDEOGRAPH:'D3CE:54222:蚔
CJK UNIFIED IDEOGRAPH:'D3CF:54223:衃
CJK UNIFIED IDEOGRAPH:'D3D0:54224:衄
CJK UNIFIED IDEOGRAPH:'D3D1:54225:衭
CJK UNIFIED IDEOGRAPH:'D3D2:54226:衵
CJK UNIFIED IDEOGRAPH:'D3D3:54227:衶
CJK UNIFIED IDEOGRAPH:'D3D4:54228:衲
CJK UNIFIED IDEOGRAPH:'D3D5:54229:袀
CJK UNIFIED IDEOGRAPH:'D3D6:54230:衱
CJK UNIFIED IDEOGRAPH:'D3D7:54231:衿
CJK UNIFIED IDEOGRAPH:'D3D8:54232:衯
CJK UNIFIED IDEOGRAPH:'D3D9:54233:袃
CJK UNIFIED IDEOGRAPH:'D3DA:54234:衾
CJK UNIFIED IDEOGRAPH:'D3DB:54235:衴
CJK UNIFIED IDEOGRAPH:'D3DC:54236:衼
CJK UNIFIED IDEOGRAPH:'D3DD:54237:訒
CJK UNIFIED IDEOGRAPH:'D3DE:54238:豇
CJK UNIFIED IDEOGRAPH:'D3DF:54239:豗
CJK UNIFIED IDEOGRAPH:'D3E0:54240:豻
CJK UNIFIED IDEOGRAPH:'D3E1:54241:貤
CJK UNIFIED IDEOGRAPH:'D3E2:54242:貣
CJK UNIFIED IDEOGRAPH:'D3E3:54243:赶
CJK UNIFIED IDEOGRAPH:'D3E4:54244:赸
CJK UNIFIED IDEOGRAPH:'D3E5:54245:趵
CJK UNIFIED IDEOGRAPH:'D3E6:54246:趷
CJK UNIFIED IDEOGRAPH:'D3E7:54247:趶
CJK UNIFIED IDEOGRAPH:'D3E8:54248:軑
CJK UNIFIED IDEOGRAPH:'D3E9:54249:軓
CJK UNIFIED IDEOGRAPH:'D3EA:54250:迾
CJK UNIFIED IDEOGRAPH:'D3EB:54251:迵
CJK UNIFIED IDEOGRAPH:'D3EC:54252:适
CJK UNIFIED IDEOGRAPH:'D3ED:54253:迿
CJK UNIFIED IDEOGRAPH:'D3EE:54254:迻
CJK UNIFIED IDEOGRAPH:'D3EF:54255:逄
CJK UNIFIED IDEOGRAPH:'D3F0:54256:迼
CJK UNIFIED IDEOGRAPH:'D3F1:54257:迶
CJK UNIFIED IDEOGRAPH:'D3F2:54258:郖
CJK UNIFIED IDEOGRAPH:'D3F3:54259:郠
CJK UNIFIED IDEOGRAPH:'D3F4:54260:郙
CJK UNIFIED IDEOGRAPH:'D3F5:54261:郚
CJK UNIFIED IDEOGRAPH:'D3F6:54262:郣
CJK UNIFIED IDEOGRAPH:'D3F7:54263:郟
CJK UNIFIED IDEOGRAPH:'D3F8:54264:郥
CJK UNIFIED IDEOGRAPH:'D3F9:54265:郘
CJK UNIFIED IDEOGRAPH:'D3FA:54266:郛
CJK UNIFIED IDEOGRAPH:'D3FB:54267:郗
CJK UNIFIED IDEOGRAPH:'D3FC:54268:郜
CJK UNIFIED IDEOGRAPH:'D3FD:54269:郤
CJK UNIFIED IDEOGRAPH:'D3FE:54270:酐
CJK UNIFIED IDEOGRAPH:'D440:54336:酎
CJK UNIFIED IDEOGRAPH:'D441:54337:酏
CJK UNIFIED IDEOGRAPH:'D442:54338:釕
CJK UNIFIED IDEOGRAPH:'D443:54339:釢
CJK UNIFIED IDEOGRAPH:'D444:54340:釚
CJK UNIFIED IDEOGRAPH:'D445:54341:陜
CJK UNIFIED IDEOGRAPH:'D446:54342:陟
CJK UNIFIED IDEOGRAPH:'D447:54343:隼
CJK UNIFIED IDEOGRAPH:'D448:54344:飣
CJK UNIFIED IDEOGRAPH:'D449:54345:髟
CJK UNIFIED IDEOGRAPH:'D44A:54346:鬯
CJK UNIFIED IDEOGRAPH:'D44B:54347:乿
CJK UNIFIED IDEOGRAPH:'D44C:54348:偰
CJK UNIFIED IDEOGRAPH:'D44D:54349:偪
CJK UNIFIED IDEOGRAPH:'D44E:54350:偡
CJK UNIFIED IDEOGRAPH:'D44F:54351:偞
CJK UNIFIED IDEOGRAPH:'D450:54352:偠
CJK UNIFIED IDEOGRAPH:'D451:54353:偓
CJK UNIFIED IDEOGRAPH:'D452:54354:偋
CJK UNIFIED IDEOGRAPH:'D453:54355:偝
CJK UNIFIED IDEOGRAPH:'D454:54356:偲
CJK UNIFIED IDEOGRAPH:'D455:54357:偈
CJK UNIFIED IDEOGRAPH:'D456:54358:偍
CJK UNIFIED IDEOGRAPH:'D457:54359:偁
CJK UNIFIED IDEOGRAPH:'D458:54360:偛
CJK UNIFIED IDEOGRAPH:'D459:54361:偊
CJK UNIFIED IDEOGRAPH:'D45A:54362:偢
CJK UNIFIED IDEOGRAPH:'D45B:54363:倕
CJK UNIFIED IDEOGRAPH:'D45C:54364:偅
CJK UNIFIED IDEOGRAPH:'D45D:54365:偟
CJK UNIFIED IDEOGRAPH:'D45E:54366:偩
CJK UNIFIED IDEOGRAPH:'D45F:54367:偫
CJK UNIFIED IDEOGRAPH:'D460:54368:偣
CJK UNIFIED IDEOGRAPH:'D461:54369:偤
CJK UNIFIED IDEOGRAPH:'D462:54370:偆
CJK UNIFIED IDEOGRAPH:'D463:54371:偀
CJK UNIFIED IDEOGRAPH:'D464:54372:偮
CJK UNIFIED IDEOGRAPH:'D465:54373:偳
CJK UNIFIED IDEOGRAPH:'D466:54374:偗
CJK UNIFIED IDEOGRAPH:'D467:54375:偑
CJK UNIFIED IDEOGRAPH:'D468:54376:凐
CJK UNIFIED IDEOGRAPH:'D469:54377:剫
CJK UNIFIED IDEOGRAPH:'D46A:54378:剭
CJK UNIFIED IDEOGRAPH:'D46B:54379:剬
CJK UNIFIED IDEOGRAPH:'D46C:54380:剮
CJK UNIFIED IDEOGRAPH:'D46D:54381:勖
CJK UNIFIED IDEOGRAPH:'D46E:54382:勓
CJK UNIFIED IDEOGRAPH:'D46F:54383:匭
CJK UNIFIED IDEOGRAPH:'D470:54384:厜
CJK UNIFIED IDEOGRAPH:'D471:54385:啵
CJK UNIFIED IDEOGRAPH:'D472:54386:啶
CJK UNIFIED IDEOGRAPH:'D473:54387:唼
CJK UNIFIED IDEOGRAPH:'D474:54388:啍
CJK UNIFIED IDEOGRAPH:'D475:54389:啐
CJK UNIFIED IDEOGRAPH:'D476:54390:唴
CJK UNIFIED IDEOGRAPH:'D477:54391:唪
CJK UNIFIED IDEOGRAPH:'D478:54392:啑
CJK UNIFIED IDEOGRAPH:'D479:54393:啢
CJK UNIFIED IDEOGRAPH:'D47A:54394:唶
CJK UNIFIED IDEOGRAPH:'D47B:54395:唵
CJK UNIFIED IDEOGRAPH:'D47C:54396:唰
CJK UNIFIED IDEOGRAPH:'D47D:54397:啒
CJK UNIFIED IDEOGRAPH:'D47E:54398:啅
CJK UNIFIED IDEOGRAPH:'D4A1:54433:唌
CJK UNIFIED IDEOGRAPH:'D4A2:54434:唲
CJK UNIFIED IDEOGRAPH:'D4A3:54435:啥
CJK UNIFIED IDEOGRAPH:'D4A4:54436:啎
CJK UNIFIED IDEOGRAPH:'D4A5:54437:唹
CJK UNIFIED IDEOGRAPH:'D4A6:54438:啈
CJK UNIFIED IDEOGRAPH:'D4A7:54439:唭
CJK UNIFIED IDEOGRAPH:'D4A8:54440:唻
CJK UNIFIED IDEOGRAPH:'D4A9:54441:啀
CJK UNIFIED IDEOGRAPH:'D4AA:54442:啋
CJK UNIFIED IDEOGRAPH:'D4AB:54443:圊
CJK UNIFIED IDEOGRAPH:'D4AC:54444:圇
CJK UNIFIED IDEOGRAPH:'D4AD:54445:埻
CJK UNIFIED IDEOGRAPH:'D4AE:54446:堔
CJK UNIFIED IDEOGRAPH:'D4AF:54447:埢
CJK UNIFIED IDEOGRAPH:'D4B0:54448:埶
CJK UNIFIED IDEOGRAPH:'D4B1:54449:埜
CJK UNIFIED IDEOGRAPH:'D4B2:54450:埴
CJK UNIFIED IDEOGRAPH:'D4B3:54451:堀
CJK UNIFIED IDEOGRAPH:'D4B4:54452:埭
CJK UNIFIED IDEOGRAPH:'D4B5:54453:埽
CJK UNIFIED IDEOGRAPH:'D4B6:54454:堈
CJK UNIFIED IDEOGRAPH:'D4B7:54455:埸
CJK UNIFIED IDEOGRAPH:'D4B8:54456:堋
CJK UNIFIED IDEOGRAPH:'D4B9:54457:埳
CJK UNIFIED IDEOGRAPH:'D4BA:54458:埏
CJK UNIFIED IDEOGRAPH:'D4BB:54459:堇
CJK UNIFIED IDEOGRAPH:'D4BC:54460:埮
CJK UNIFIED IDEOGRAPH:'D4BD:54461:埣
CJK UNIFIED IDEOGRAPH:'D4BE:54462:埲
CJK UNIFIED IDEOGRAPH:'D4BF:54463:埥
CJK UNIFIED IDEOGRAPH:'D4C0:54464:埬
CJK UNIFIED IDEOGRAPH:'D4C1:54465:埡
CJK UNIFIED IDEOGRAPH:'D4C2:54466:堎
CJK UNIFIED IDEOGRAPH:'D4C3:54467:埼
CJK UNIFIED IDEOGRAPH:'D4C4:54468:堐
CJK UNIFIED IDEOGRAPH:'D4C5:54469:埧
CJK UNIFIED IDEOGRAPH:'D4C6:54470:堁
CJK UNIFIED IDEOGRAPH:'D4C7:54471:堌
CJK UNIFIED IDEOGRAPH:'D4C8:54472:埱
CJK UNIFIED IDEOGRAPH:'D4C9:54473:埩
CJK UNIFIED IDEOGRAPH:'D4CA:54474:埰
CJK UNIFIED IDEOGRAPH:'D4CB:54475:堍
CJK UNIFIED IDEOGRAPH:'D4CC:54476:堄
CJK UNIFIED IDEOGRAPH:'D4CD:54477:奜
CJK UNIFIED IDEOGRAPH:'D4CE:54478:婠
CJK UNIFIED IDEOGRAPH:'D4CF:54479:婘
CJK UNIFIED IDEOGRAPH:'D4D0:54480:婕
CJK UNIFIED IDEOGRAPH:'D4D1:54481:婧
CJK UNIFIED IDEOGRAPH:'D4D2:54482:婞
CJK UNIFIED IDEOGRAPH:'D4D3:54483:娸
CJK UNIFIED IDEOGRAPH:'D4D4:54484:娵
CJK UNIFIED IDEOGRAPH:'D4D5:54485:婭
CJK UNIFIED IDEOGRAPH:'D4D6:54486:婐
CJK UNIFIED IDEOGRAPH:'D4D7:54487:婟
CJK UNIFIED IDEOGRAPH:'D4D8:54488:婥
CJK UNIFIED IDEOGRAPH:'D4D9:54489:婬
CJK UNIFIED IDEOGRAPH:'D4DA:54490:婓
CJK UNIFIED IDEOGRAPH:'D4DB:54491:婤
CJK UNIFIED IDEOGRAPH:'D4DC:54492:婗
CJK UNIFIED IDEOGRAPH:'D4DD:54493:婃
CJK UNIFIED IDEOGRAPH:'D4DE:54494:婝
CJK UNIFIED IDEOGRAPH:'D4DF:54495:婒
CJK UNIFIED IDEOGRAPH:'D4E0:54496:婄
CJK UNIFIED IDEOGRAPH:'D4E1:54497:婛
CJK UNIFIED IDEOGRAPH:'D4E2:54498:婈
CJK UNIFIED IDEOGRAPH:'D4E3:54499:媎
CJK UNIFIED IDEOGRAPH:'D4E4:54500:娾
CJK UNIFIED IDEOGRAPH:'D4E5:54501:婍
CJK UNIFIED IDEOGRAPH:'D4E6:54502:娹
CJK UNIFIED IDEOGRAPH:'D4E7:54503:婌
CJK UNIFIED IDEOGRAPH:'D4E8:54504:婰
CJK UNIFIED IDEOGRAPH:'D4E9:54505:婩
CJK UNIFIED IDEOGRAPH:'D4EA:54506:婇
CJK UNIFIED IDEOGRAPH:'D4EB:54507:婑
CJK UNIFIED IDEOGRAPH:'D4EC:54508:婖
CJK UNIFIED IDEOGRAPH:'D4ED:54509:婂
CJK UNIFIED IDEOGRAPH:'D4EE:54510:婜
CJK UNIFIED IDEOGRAPH:'D4EF:54511:孲
CJK UNIFIED IDEOGRAPH:'D4F0:54512:孮
CJK UNIFIED IDEOGRAPH:'D4F1:54513:寁
CJK UNIFIED IDEOGRAPH:'D4F2:54514:寀
CJK UNIFIED IDEOGRAPH:'D4F3:54515:屙
CJK UNIFIED IDEOGRAPH:'D4F4:54516:崞
CJK UNIFIED IDEOGRAPH:'D4F5:54517:崋
CJK UNIFIED IDEOGRAPH:'D4F6:54518:崝
CJK UNIFIED IDEOGRAPH:'D4F7:54519:崚
CJK UNIFIED IDEOGRAPH:'D4F8:54520:崠
CJK UNIFIED IDEOGRAPH:'D4F9:54521:崌
CJK UNIFIED IDEOGRAPH:'D4FA:54522:崨
CJK UNIFIED IDEOGRAPH:'D4FB:54523:崍
CJK UNIFIED IDEOGRAPH:'D4FC:54524:崦
CJK UNIFIED IDEOGRAPH:'D4FD:54525:崥
CJK UNIFIED IDEOGRAPH:'D4FE:54526:崏
CJK UNIFIED IDEOGRAPH:'D540:54592:崰
CJK UNIFIED IDEOGRAPH:'D541:54593:崒
CJK UNIFIED IDEOGRAPH:'D542:54594:崣
CJK UNIFIED IDEOGRAPH:'D543:54595:崟
CJK UNIFIED IDEOGRAPH:'D544:54596:崮
CJK UNIFIED IDEOGRAPH:'D545:54597:帾
CJK UNIFIED IDEOGRAPH:'D546:54598:帴
CJK UNIFIED IDEOGRAPH:'D547:54599:庱
CJK UNIFIED IDEOGRAPH:'D548:54600:庴
CJK UNIFIED IDEOGRAPH:'D549:54601:庹
CJK UNIFIED IDEOGRAPH:'D54A:54602:庲
CJK UNIFIED IDEOGRAPH:'D54B:54603:庳
CJK UNIFIED IDEOGRAPH:'D54C:54604:弶
CJK UNIFIED IDEOGRAPH:'D54D:54605:弸
CJK UNIFIED IDEOGRAPH:'D54E:54606:徛
CJK UNIFIED IDEOGRAPH:'D54F:54607:徖
CJK UNIFIED IDEOGRAPH:'D550:54608:徟
CJK UNIFIED IDEOGRAPH:'D551:54609:悊
CJK UNIFIED IDEOGRAPH:'D552:54610:悐
CJK UNIFIED IDEOGRAPH:'D553:54611:悆
CJK UNIFIED IDEOGRAPH:'D554:54612:悾
CJK UNIFIED IDEOGRAPH:'D555:54613:悰
CJK UNIFIED IDEOGRAPH:'D556:54614:悺
CJK UNIFIED IDEOGRAPH:'D557:54615:惓
CJK UNIFIED IDEOGRAPH:'D558:54616:惔
CJK UNIFIED IDEOGRAPH:'D559:54617:惏
CJK UNIFIED IDEOGRAPH:'D55A:54618:惤
CJK UNIFIED IDEOGRAPH:'D55B:54619:惙
CJK UNIFIED IDEOGRAPH:'D55C:54620:惝
CJK UNIFIED IDEOGRAPH:'D55D:54621:惈
CJK UNIFIED IDEOGRAPH:'D55E:54622:悱
CJK UNIFIED IDEOGRAPH:'D55F:54623:惛
CJK UNIFIED IDEOGRAPH:'D560:54624:悷
CJK UNIFIED IDEOGRAPH:'D561:54625:惊
CJK UNIFIED IDEOGRAPH:'D562:54626:悿
CJK UNIFIED IDEOGRAPH:'D563:54627:惃
CJK UNIFIED IDEOGRAPH:'D564:54628:惍
CJK UNIFIED IDEOGRAPH:'D565:54629:惀
CJK UNIFIED IDEOGRAPH:'D566:54630:挲
CJK UNIFIED IDEOGRAPH:'D567:54631:捥
CJK UNIFIED IDEOGRAPH:'D568:54632:掊
CJK UNIFIED IDEOGRAPH:'D569:54633:掂
CJK UNIFIED IDEOGRAPH:'D56A:54634:捽
CJK UNIFIED IDEOGRAPH:'D56B:54635:掽
CJK UNIFIED IDEOGRAPH:'D56C:54636:掞
CJK UNIFIED IDEOGRAPH:'D56D:54637:掭
CJK UNIFIED IDEOGRAPH:'D56E:54638:掝
CJK UNIFIED IDEOGRAPH:'D56F:54639:掗
CJK UNIFIED IDEOGRAPH:'D570:54640:掫
CJK UNIFIED IDEOGRAPH:'D571:54641:掎
CJK UNIFIED IDEOGRAPH:'D572:54642:捯
CJK UNIFIED IDEOGRAPH:'D573:54643:掇
CJK UNIFIED IDEOGRAPH:'D574:54644:掐
CJK UNIFIED IDEOGRAPH:'D575:54645:据
CJK UNIFIED IDEOGRAPH:'D576:54646:掯
CJK UNIFIED IDEOGRAPH:'D577:54647:捵
CJK UNIFIED IDEOGRAPH:'D578:54648:掜
CJK UNIFIED IDEOGRAPH:'D579:54649:捭
CJK UNIFIED IDEOGRAPH:'D57A:54650:掮
CJK UNIFIED IDEOGRAPH:'D57B:54651:捼
CJK UNIFIED IDEOGRAPH:'D57C:54652:掤
CJK UNIFIED IDEOGRAPH:'D57D:54653:挻
CJK UNIFIED IDEOGRAPH:'D57E:54654:掟
CJK UNIFIED IDEOGRAPH:'D5A1:54689:捸
CJK UNIFIED IDEOGRAPH:'D5A2:54690:掅
CJK UNIFIED IDEOGRAPH:'D5A3:54691:掁
CJK UNIFIED IDEOGRAPH:'D5A4:54692:掑
CJK UNIFIED IDEOGRAPH:'D5A5:54693:掍
CJK UNIFIED IDEOGRAPH:'D5A6:54694:捰
CJK UNIFIED IDEOGRAPH:'D5A7:54695:敓
CJK UNIFIED IDEOGRAPH:'D5A8:54696:旍
CJK UNIFIED IDEOGRAPH:'D5A9:54697:晥
CJK UNIFIED IDEOGRAPH:'D5AA:54698:晡
CJK UNIFIED IDEOGRAPH:'D5AB:54699:晛
CJK UNIFIED IDEOGRAPH:'D5AC:54700:晙
CJK UNIFIED IDEOGRAPH:'D5AD:54701:晜
CJK UNIFIED IDEOGRAPH:'D5AE:54702:晢
CJK UNIFIED IDEOGRAPH:'D5AF:54703:朘
CJK UNIFIED IDEOGRAPH:'D5B0:54704:桹
CJK UNIFIED IDEOGRAPH:'D5B1:54705:梇
CJK UNIFIED IDEOGRAPH:'D5B2:54706:梐
CJK UNIFIED IDEOGRAPH:'D5B3:54707:梜
CJK UNIFIED IDEOGRAPH:'D5B4:54708:桭
CJK UNIFIED IDEOGRAPH:'D5B5:54709:桮
CJK UNIFIED IDEOGRAPH:'D5B6:54710:梮
CJK UNIFIED IDEOGRAPH:'D5B7:54711:梫
CJK UNIFIED IDEOGRAPH:'D5B8:54712:楖
CJK UNIFIED IDEOGRAPH:'D5B9:54713:桯
CJK UNIFIED IDEOGRAPH:'D5BA:54714:梣
CJK UNIFIED IDEOGRAPH:'D5BB:54715:梬
CJK UNIFIED IDEOGRAPH:'D5BC:54716:梩
CJK UNIFIED IDEOGRAPH:'D5BD:54717:桵
CJK UNIFIED IDEOGRAPH:'D5BE:54718:桴
CJK UNIFIED IDEOGRAPH:'D5BF:54719:梲
CJK UNIFIED IDEOGRAPH:'D5C0:54720:梏
CJK UNIFIED IDEOGRAPH:'D5C1:54721:桷
CJK UNIFIED IDEOGRAPH:'D5C2:54722:梒
CJK UNIFIED IDEOGRAPH:'D5C3:54723:桼
CJK UNIFIED IDEOGRAPH:'D5C4:54724:桫
CJK UNIFIED IDEOGRAPH:'D5C5:54725:桲
CJK UNIFIED IDEOGRAPH:'D5C6:54726:梪
CJK UNIFIED IDEOGRAPH:'D5C7:54727:梀
CJK UNIFIED IDEOGRAPH:'D5C8:54728:桱
CJK UNIFIED IDEOGRAPH:'D5C9:54729:桾
CJK UNIFIED IDEOGRAPH:'D5CA:54730:梛
CJK UNIFIED IDEOGRAPH:'D5CB:54731:梖
CJK UNIFIED IDEOGRAPH:'D5CC:54732:梋
CJK UNIFIED IDEOGRAPH:'D5CD:54733:梠
CJK UNIFIED IDEOGRAPH:'D5CE:54734:梉
CJK UNIFIED IDEOGRAPH:'D5CF:54735:梤
CJK UNIFIED IDEOGRAPH:'D5D0:54736:桸
CJK UNIFIED IDEOGRAPH:'D5D1:54737:桻
CJK UNIFIED IDEOGRAPH:'D5D2:54738:梑
CJK UNIFIED IDEOGRAPH:'D5D3:54739:梌
CJK UNIFIED IDEOGRAPH:'D5D4:54740:梊
CJK UNIFIED IDEOGRAPH:'D5D5:54741:桽
CJK UNIFIED IDEOGRAPH:'D5D6:54742:欶
CJK UNIFIED IDEOGRAPH:'D5D7:54743:欳
CJK UNIFIED IDEOGRAPH:'D5D8:54744:欷
CJK UNIFIED IDEOGRAPH:'D5D9:54745:欸
CJK UNIFIED IDEOGRAPH:'D5DA:54746:殑
CJK UNIFIED IDEOGRAPH:'D5DB:54747:殏
CJK UNIFIED IDEOGRAPH:'D5DC:54748:殍
CJK UNIFIED IDEOGRAPH:'D5DD:54749:殎
CJK UNIFIED IDEOGRAPH:'D5DE:54750:殌
CJK UNIFIED IDEOGRAPH:'D5DF:54751:氪
CJK UNIFIED IDEOGRAPH:'D5E0:54752:淀
CJK UNIFIED IDEOGRAPH:'D5E1:54753:涫
CJK UNIFIED IDEOGRAPH:'D5E2:54754:涴
CJK UNIFIED IDEOGRAPH:'D5E3:54755:涳
CJK UNIFIED IDEOGRAPH:'D5E4:54756:湴
CJK UNIFIED IDEOGRAPH:'D5E5:54757:涬
CJK UNIFIED IDEOGRAPH:'D5E6:54758:淩
CJK UNIFIED IDEOGRAPH:'D5E7:54759:淢
CJK UNIFIED IDEOGRAPH:'D5E8:54760:涷
CJK UNIFIED IDEOGRAPH:'D5E9:54761:淶
CJK UNIFIED IDEOGRAPH:'D5EA:54762:淔
CJK UNIFIED IDEOGRAPH:'D5EB:54763:渀
CJK UNIFIED IDEOGRAPH:'D5EC:54764:淈
CJK UNIFIED IDEOGRAPH:'D5ED:54765:淠
CJK UNIFIED IDEOGRAPH:'D5EE:54766:淟
CJK UNIFIED IDEOGRAPH:'D5EF:54767:淖
CJK UNIFIED IDEOGRAPH:'D5F0:54768:涾
CJK UNIFIED IDEOGRAPH:'D5F1:54769:淥
CJK UNIFIED IDEOGRAPH:'D5F2:54770:淜
CJK UNIFIED IDEOGRAPH:'D5F3:54771:淝
CJK UNIFIED IDEOGRAPH:'D5F4:54772:淛
CJK UNIFIED IDEOGRAPH:'D5F5:54773:淴
CJK UNIFIED IDEOGRAPH:'D5F6:54774:淊
CJK UNIFIED IDEOGRAPH:'D5F7:54775:涽
CJK UNIFIED IDEOGRAPH:'D5F8:54776:淭
CJK UNIFIED IDEOGRAPH:'D5F9:54777:淰
CJK UNIFIED IDEOGRAPH:'D5FA:54778:涺
CJK UNIFIED IDEOGRAPH:'D5FB:54779:淕
CJK UNIFIED IDEOGRAPH:'D5FC:54780:淂
CJK UNIFIED IDEOGRAPH:'D5FD:54781:淏
CJK UNIFIED IDEOGRAPH:'D5FE:54782:淉
CJK UNIFIED IDEOGRAPH:'D640:54848:淐
CJK UNIFIED IDEOGRAPH:'D641:54849:淲
CJK UNIFIED IDEOGRAPH:'D642:54850:淓
CJK UNIFIED IDEOGRAPH:'D643:54851:淽
CJK UNIFIED IDEOGRAPH:'D644:54852:淗
CJK UNIFIED IDEOGRAPH:'D645:54853:淍
CJK UNIFIED IDEOGRAPH:'D646:54854:淣
CJK UNIFIED IDEOGRAPH:'D647:54855:涻
CJK UNIFIED IDEOGRAPH:'D648:54856:烺
CJK UNIFIED IDEOGRAPH:'D649:54857:焍
CJK UNIFIED IDEOGRAPH:'D64A:54858:烷
CJK UNIFIED IDEOGRAPH:'D64B:54859:焗
CJK UNIFIED IDEOGRAPH:'D64C:54860:烴
CJK UNIFIED IDEOGRAPH:'D64D:54861:焌
CJK UNIFIED IDEOGRAPH:'D64E:54862:烰
CJK UNIFIED IDEOGRAPH:'D64F:54863:焄
CJK UNIFIED IDEOGRAPH:'D650:54864:烳
CJK UNIFIED IDEOGRAPH:'D651:54865:焐
CJK UNIFIED IDEOGRAPH:'D652:54866:烼
CJK UNIFIED IDEOGRAPH:'D653:54867:烿
CJK UNIFIED IDEOGRAPH:'D654:54868:焆
CJK UNIFIED IDEOGRAPH:'D655:54869:焓
CJK UNIFIED IDEOGRAPH:'D656:54870:焀
CJK UNIFIED IDEOGRAPH:'D657:54871:烸
CJK UNIFIED IDEOGRAPH:'D658:54872:烶
CJK UNIFIED IDEOGRAPH:'D659:54873:焋
CJK UNIFIED IDEOGRAPH:'D65A:54874:焂
CJK UNIFIED IDEOGRAPH:'D65B:54875:焎
CJK UNIFIED IDEOGRAPH:'D65C:54876:牾
CJK UNIFIED IDEOGRAPH:'D65D:54877:牻
CJK UNIFIED IDEOGRAPH:'D65E:54878:牼
CJK UNIFIED IDEOGRAPH:'D65F:54879:牿
CJK UNIFIED IDEOGRAPH:'D660:54880:猝
CJK UNIFIED IDEOGRAPH:'D661:54881:猗
CJK UNIFIED IDEOGRAPH:'D662:54882:猇
CJK UNIFIED IDEOGRAPH:'D663:54883:猑
CJK UNIFIED IDEOGRAPH:'D664:54884:猘
CJK UNIFIED IDEOGRAPH:'D665:54885:猊
CJK UNIFIED IDEOGRAPH:'D666:54886:猈
CJK UNIFIED IDEOGRAPH:'D667:54887:狿
CJK UNIFIED IDEOGRAPH:'D668:54888:猏
CJK UNIFIED IDEOGRAPH:'D669:54889:猞
CJK UNIFIED IDEOGRAPH:'D66A:54890:玈
CJK UNIFIED IDEOGRAPH:'D66B:54891:珶
CJK UNIFIED IDEOGRAPH:'D66C:54892:珸
CJK UNIFIED IDEOGRAPH:'D66D:54893:珵
CJK UNIFIED IDEOGRAPH:'D66E:54894:琄
CJK UNIFIED IDEOGRAPH:'D66F:54895:琁
CJK UNIFIED IDEOGRAPH:'D670:54896:珽
CJK UNIFIED IDEOGRAPH:'D671:54897:琇
CJK UNIFIED IDEOGRAPH:'D672:54898:琀
CJK UNIFIED IDEOGRAPH:'D673:54899:珺
CJK UNIFIED IDEOGRAPH:'D674:54900:珼
CJK UNIFIED IDEOGRAPH:'D675:54901:珿
CJK UNIFIED IDEOGRAPH:'D676:54902:琌
CJK UNIFIED IDEOGRAPH:'D677:54903:琋
CJK UNIFIED IDEOGRAPH:'D678:54904:珴
CJK UNIFIED IDEOGRAPH:'D679:54905:琈
CJK UNIFIED IDEOGRAPH:'D67A:54906:畤
CJK UNIFIED IDEOGRAPH:'D67B:54907:畣
CJK UNIFIED IDEOGRAPH:'D67C:54908:痎
CJK UNIFIED IDEOGRAPH:'D67D:54909:痒
CJK UNIFIED IDEOGRAPH:'D67E:54910:痏
CJK UNIFIED IDEOGRAPH:'D6A1:54945:痋
CJK UNIFIED IDEOGRAPH:'D6A2:54946:痌
CJK UNIFIED IDEOGRAPH:'D6A3:54947:痑
CJK UNIFIED IDEOGRAPH:'D6A4:54948:痐
CJK UNIFIED IDEOGRAPH:'D6A5:54949:皏
CJK UNIFIED IDEOGRAPH:'D6A6:54950:皉
CJK UNIFIED IDEOGRAPH:'D6A7:54951:盓
CJK UNIFIED IDEOGRAPH:'D6A8:54952:眹
CJK UNIFIED IDEOGRAPH:'D6A9:54953:眯
CJK UNIFIED IDEOGRAPH:'D6AA:54954:眭
CJK UNIFIED IDEOGRAPH:'D6AB:54955:眱
CJK UNIFIED IDEOGRAPH:'D6AC:54956:眲
CJK UNIFIED IDEOGRAPH:'D6AD:54957:眴
CJK UNIFIED IDEOGRAPH:'D6AE:54958:眳
CJK UNIFIED IDEOGRAPH:'D6AF:54959:眽
CJK UNIFIED IDEOGRAPH:'D6B0:54960:眥
CJK UNIFIED IDEOGRAPH:'D6B1:54961:眻
CJK UNIFIED IDEOGRAPH:'D6B2:54962:眵
CJK UNIFIED IDEOGRAPH:'D6B3:54963:硈
CJK UNIFIED IDEOGRAPH:'D6B4:54964:硒
CJK UNIFIED IDEOGRAPH:'D6B5:54965:硉
CJK UNIFIED IDEOGRAPH:'D6B6:54966:硍
CJK UNIFIED IDEOGRAPH:'D6B7:54967:硊
CJK UNIFIED IDEOGRAPH:'D6B8:54968:硌
CJK UNIFIED IDEOGRAPH:'D6B9:54969:砦
CJK UNIFIED IDEOGRAPH:'D6BA:54970:硅
CJK UNIFIED IDEOGRAPH:'D6BB:54971:硐
CJK UNIFIED IDEOGRAPH:'D6BC:54972:祤
CJK UNIFIED IDEOGRAPH:'D6BD:54973:祧
CJK UNIFIED IDEOGRAPH:'D6BE:54974:祩
CJK UNIFIED IDEOGRAPH:'D6BF:54975:祪
CJK UNIFIED IDEOGRAPH:'D6C0:54976:祣
CJK UNIFIED IDEOGRAPH:'D6C1:54977:祫
CJK UNIFIED IDEOGRAPH:'D6C2:54978:祡
CJK UNIFIED IDEOGRAPH:'D6C3:54979:离
CJK UNIFIED IDEOGRAPH:'D6C4:54980:秺
CJK UNIFIED IDEOGRAPH:'D6C5:54981:秸
CJK UNIFIED IDEOGRAPH:'D6C6:54982:秶
CJK UNIFIED IDEOGRAPH:'D6C7:54983:秷
CJK UNIFIED IDEOGRAPH:'D6C8:54984:窏
CJK UNIFIED IDEOGRAPH:'D6C9:54985:窔
CJK UNIFIED IDEOGRAPH:'D6CA:54986:窐
CJK UNIFIED IDEOGRAPH:'D6CB:54987:笵
CJK UNIFIED IDEOGRAPH:'D6CC:54988:筇
CJK UNIFIED IDEOGRAPH:'D6CD:54989:笴
CJK UNIFIED IDEOGRAPH:'D6CE:54990:笥
CJK UNIFIED IDEOGRAPH:'D6CF:54991:笰
CJK UNIFIED IDEOGRAPH:'D6D0:54992:笢
CJK UNIFIED IDEOGRAPH:'D6D1:54993:笤
CJK UNIFIED IDEOGRAPH:'D6D2:54994:笳
CJK UNIFIED IDEOGRAPH:'D6D3:54995:笘
CJK UNIFIED IDEOGRAPH:'D6D4:54996:笪
CJK UNIFIED IDEOGRAPH:'D6D5:54997:笝
CJK UNIFIED IDEOGRAPH:'D6D6:54998:笱
CJK UNIFIED IDEOGRAPH:'D6D7:54999:笫
CJK UNIFIED IDEOGRAPH:'D6D8:55000:笭
CJK UNIFIED IDEOGRAPH:'D6D9:55001:笯
CJK UNIFIED IDEOGRAPH:'D6DA:55002:笲
CJK UNIFIED IDEOGRAPH:'D6DB:55003:笸
CJK UNIFIED IDEOGRAPH:'D6DC:55004:笚
CJK UNIFIED IDEOGRAPH:'D6DD:55005:笣
CJK UNIFIED IDEOGRAPH:'D6DE:55006:粔
CJK UNIFIED IDEOGRAPH:'D6DF:55007:粘
CJK UNIFIED IDEOGRAPH:'D6E0:55008:粖
CJK UNIFIED IDEOGRAPH:'D6E1:55009:粣
CJK UNIFIED IDEOGRAPH:'D6E2:55010:紵
CJK UNIFIED IDEOGRAPH:'D6E3:55011:紽
CJK UNIFIED IDEOGRAPH:'D6E4:55012:紸
CJK UNIFIED IDEOGRAPH:'D6E5:55013:紶
CJK UNIFIED IDEOGRAPH:'D6E6:55014:紺
CJK UNIFIED IDEOGRAPH:'D6E7:55015:絅
CJK UNIFIED IDEOGRAPH:'D6E8:55016:紬
CJK UNIFIED IDEOGRAPH:'D6E9:55017:紩
CJK UNIFIED IDEOGRAPH:'D6EA:55018:絁
CJK UNIFIED IDEOGRAPH:'D6EB:55019:絇
CJK UNIFIED IDEOGRAPH:'D6EC:55020:紾
CJK UNIFIED IDEOGRAPH:'D6ED:55021:紿
CJK UNIFIED IDEOGRAPH:'D6EE:55022:絊
CJK UNIFIED IDEOGRAPH:'D6EF:55023:紻
CJK UNIFIED IDEOGRAPH:'D6F0:55024:紨
CJK UNIFIED IDEOGRAPH:'D6F1:55025:罣
CJK UNIFIED IDEOGRAPH:'D6F2:55026:羕
CJK UNIFIED IDEOGRAPH:'D6F3:55027:羜
CJK UNIFIED IDEOGRAPH:'D6F4:55028:羝
CJK UNIFIED IDEOGRAPH:'D6F5:55029:羛
CJK UNIFIED IDEOGRAPH:'D6F6:55030:翊
CJK UNIFIED IDEOGRAPH:'D6F7:55031:翋
CJK UNIFIED IDEOGRAPH:'D6F8:55032:翍
CJK UNIFIED IDEOGRAPH:'D6F9:55033:翐
CJK UNIFIED IDEOGRAPH:'D6FA:55034:翑
CJK UNIFIED IDEOGRAPH:'D6FB:55035:翇
CJK UNIFIED IDEOGRAPH:'D6FC:55036:翏
CJK UNIFIED IDEOGRAPH:'D6FD:55037:翉
CJK UNIFIED IDEOGRAPH:'D6FE:55038:耟
CJK UNIFIED IDEOGRAPH:'D740:55104:耞
CJK UNIFIED IDEOGRAPH:'D741:55105:耛
CJK UNIFIED IDEOGRAPH:'D742:55106:聇
CJK UNIFIED IDEOGRAPH:'D743:55107:聃
CJK UNIFIED IDEOGRAPH:'D744:55108:聈
CJK UNIFIED IDEOGRAPH:'D745:55109:脘
CJK UNIFIED IDEOGRAPH:'D746:55110:脥
CJK UNIFIED IDEOGRAPH:'D747:55111:脙
CJK UNIFIED IDEOGRAPH:'D748:55112:脛
CJK UNIFIED IDEOGRAPH:'D749:55113:脭
CJK UNIFIED IDEOGRAPH:'D74A:55114:脟
CJK UNIFIED IDEOGRAPH:'D74B:55115:脬
CJK UNIFIED IDEOGRAPH:'D74C:55116:脞
CJK UNIFIED IDEOGRAPH:'D74D:55117:脡
CJK UNIFIED IDEOGRAPH:'D74E:55118:脕
CJK UNIFIED IDEOGRAPH:'D74F:55119:脧
CJK UNIFIED IDEOGRAPH:'D750:55120:脝
CJK UNIFIED IDEOGRAPH:'D751:55121:脢
CJK UNIFIED IDEOGRAPH:'D752:55122:舑
CJK UNIFIED IDEOGRAPH:'D753:55123:舸
CJK UNIFIED IDEOGRAPH:'D754:55124:舳
CJK UNIFIED IDEOGRAPH:'D755:55125:舺
CJK UNIFIED IDEOGRAPH:'D756:55126:舴
CJK UNIFIED IDEOGRAPH:'D757:55127:舲
CJK UNIFIED IDEOGRAPH:'D758:55128:艴
CJK UNIFIED IDEOGRAPH:'D759:55129:莐
CJK UNIFIED IDEOGRAPH:'D75A:55130:莣
CJK UNIFIED IDEOGRAPH:'D75B:55131:莨
CJK UNIFIED IDEOGRAPH:'D75C:55132:莍
CJK UNIFIED IDEOGRAPH:'D75D:55133:荺
CJK UNIFIED IDEOGRAPH:'D75E:55134:荳
CJK UNIFIED IDEOGRAPH:'D75F:55135:莤
CJK UNIFIED IDEOGRAPH:'D760:55136:荴
CJK UNIFIED IDEOGRAPH:'D761:55137:莏
CJK UNIFIED IDEOGRAPH:'D762:55138:莁
CJK UNIFIED IDEOGRAPH:'D763:55139:莕
CJK UNIFIED IDEOGRAPH:'D764:55140:莙
CJK UNIFIED IDEOGRAPH:'D765:55141:荵
CJK UNIFIED IDEOGRAPH:'D766:55142:莔
CJK UNIFIED IDEOGRAPH:'D767:55143:莩
CJK UNIFIED IDEOGRAPH:'D768:55144:荽
CJK UNIFIED IDEOGRAPH:'D769:55145:莃
CJK UNIFIED IDEOGRAPH:'D76A:55146:莌
CJK UNIFIED IDEOGRAPH:'D76B:55147:莝
CJK UNIFIED IDEOGRAPH:'D76C:55148:莛
CJK UNIFIED IDEOGRAPH:'D76D:55149:莪
CJK UNIFIED IDEOGRAPH:'D76E:55150:莋
CJK UNIFIED IDEOGRAPH:'D76F:55151:荾
CJK UNIFIED IDEOGRAPH:'D770:55152:莥
CJK UNIFIED IDEOGRAPH:'D771:55153:莯
CJK UNIFIED IDEOGRAPH:'D772:55154:莈
CJK UNIFIED IDEOGRAPH:'D773:55155:莗
CJK UNIFIED IDEOGRAPH:'D774:55156:莰
CJK UNIFIED IDEOGRAPH:'D775:55157:荿
CJK UNIFIED IDEOGRAPH:'D776:55158:莦
CJK UNIFIED IDEOGRAPH:'D777:55159:莇
CJK UNIFIED IDEOGRAPH:'D778:55160:莮
CJK UNIFIED IDEOGRAPH:'D779:55161:荶
CJK UNIFIED IDEOGRAPH:'D77A:55162:莚
CJK UNIFIED IDEOGRAPH:'D77B:55163:虙
CJK UNIFIED IDEOGRAPH:'D77C:55164:虖
CJK UNIFIED IDEOGRAPH:'D77D:55165:蚿
CJK UNIFIED IDEOGRAPH:'D77E:55166:蚷
CJK UNIFIED IDEOGRAPH:'D7A1:55201:蛂
CJK UNIFIED IDEOGRAPH:'D7A2:55202:蛁
CJK UNIFIED IDEOGRAPH:'D7A3:55203:蛅
CJK UNIFIED IDEOGRAPH:'D7A4:55204:蚺
CJK UNIFIED IDEOGRAPH:'D7A5:55205:蚰
CJK UNIFIED IDEOGRAPH:'D7A6:55206:蛈
CJK UNIFIED IDEOGRAPH:'D7A7:55207:蚹
CJK UNIFIED IDEOGRAPH:'D7A8:55208:蚳
CJK UNIFIED IDEOGRAPH:'D7A9:55209:蚸
CJK UNIFIED IDEOGRAPH:'D7AA:55210:蛌
CJK UNIFIED IDEOGRAPH:'D7AB:55211:蚴
CJK UNIFIED IDEOGRAPH:'D7AC:55212:蚻
CJK UNIFIED IDEOGRAPH:'D7AD:55213:蚼
CJK UNIFIED IDEOGRAPH:'D7AE:55214:蛃
CJK UNIFIED IDEOGRAPH:'D7AF:55215:蚽
CJK UNIFIED IDEOGRAPH:'D7B0:55216:蚾
CJK UNIFIED IDEOGRAPH:'D7B1:55217:衒
CJK UNIFIED IDEOGRAPH:'D7B2:55218:袉
CJK UNIFIED IDEOGRAPH:'D7B3:55219:袕
CJK UNIFIED IDEOGRAPH:'D7B4:55220:袨
CJK UNIFIED IDEOGRAPH:'D7B5:55221:袢
CJK UNIFIED IDEOGRAPH:'D7B6:55222:袪
CJK UNIFIED IDEOGRAPH:'D7B7:55223:袚
CJK UNIFIED IDEOGRAPH:'D7B8:55224:袑
CJK UNIFIED IDEOGRAPH:'D7B9:55225:袡
CJK UNIFIED IDEOGRAPH:'D7BA:55226:袟
CJK UNIFIED IDEOGRAPH:'D7BB:55227:袘
CJK UNIFIED IDEOGRAPH:'D7BC:55228:袧
CJK UNIFIED IDEOGRAPH:'D7BD:55229:袙
CJK UNIFIED IDEOGRAPH:'D7BE:55230:袛
CJK UNIFIED IDEOGRAPH:'D7BF:55231:袗
CJK UNIFIED IDEOGRAPH:'D7C0:55232:袤
CJK UNIFIED IDEOGRAPH:'D7C1:55233:袬
CJK UNIFIED IDEOGRAPH:'D7C2:55234:袌
CJK UNIFIED IDEOGRAPH:'D7C3:55235:袓
CJK UNIFIED IDEOGRAPH:'D7C4:55236:袎
CJK UNIFIED IDEOGRAPH:'D7C5:55237:覂
CJK UNIFIED IDEOGRAPH:'D7C6:55238:觖
CJK UNIFIED IDEOGRAPH:'D7C7:55239:觙
CJK UNIFIED IDEOGRAPH:'D7C8:55240:觕
CJK UNIFIED IDEOGRAPH:'D7C9:55241:訰
CJK UNIFIED IDEOGRAPH:'D7CA:55242:訧
CJK UNIFIED IDEOGRAPH:'D7CB:55243:訬
CJK UNIFIED IDEOGRAPH:'D7CC:55244:訞
CJK UNIFIED IDEOGRAPH:'D7CD:55245:谹
CJK UNIFIED IDEOGRAPH:'D7CE:55246:谻
CJK UNIFIED IDEOGRAPH:'D7CF:55247:豜
CJK UNIFIED IDEOGRAPH:'D7D0:55248:豝
CJK UNIFIED IDEOGRAPH:'D7D1:55249:豽
CJK UNIFIED IDEOGRAPH:'D7D2:55250:貥
CJK UNIFIED IDEOGRAPH:'D7D3:55251:赽
CJK UNIFIED IDEOGRAPH:'D7D4:55252:赻
CJK UNIFIED IDEOGRAPH:'D7D5:55253:赹
CJK UNIFIED IDEOGRAPH:'D7D6:55254:趼
CJK UNIFIED IDEOGRAPH:'D7D7:55255:跂
CJK UNIFIED IDEOGRAPH:'D7D8:55256:趹
CJK UNIFIED IDEOGRAPH:'D7D9:55257:趿
CJK UNIFIED IDEOGRAPH:'D7DA:55258:跁
CJK UNIFIED IDEOGRAPH:'D7DB:55259:軘
CJK UNIFIED IDEOGRAPH:'D7DC:55260:軞
CJK UNIFIED IDEOGRAPH:'D7DD:55261:軝
CJK UNIFIED IDEOGRAPH:'D7DE:55262:軜
CJK UNIFIED IDEOGRAPH:'D7DF:55263:軗
CJK UNIFIED IDEOGRAPH:'D7E0:55264:軠
CJK UNIFIED IDEOGRAPH:'D7E1:55265:軡
CJK UNIFIED IDEOGRAPH:'D7E2:55266:逤
CJK UNIFIED IDEOGRAPH:'D7E3:55267:逋
CJK UNIFIED IDEOGRAPH:'D7E4:55268:逑
CJK UNIFIED IDEOGRAPH:'D7E5:55269:逜
CJK UNIFIED IDEOGRAPH:'D7E6:55270:逌
CJK UNIFIED IDEOGRAPH:'D7E7:55271:逡
CJK UNIFIED IDEOGRAPH:'D7E8:55272:郯
CJK UNIFIED IDEOGRAPH:'D7E9:55273:郪
CJK UNIFIED IDEOGRAPH:'D7EA:55274:郰
CJK UNIFIED IDEOGRAPH:'D7EB:55275:郴
CJK UNIFIED IDEOGRAPH:'D7EC:55276:郲
CJK UNIFIED IDEOGRAPH:'D7ED:55277:郳
CJK UNIFIED IDEOGRAPH:'D7EE:55278:郔
CJK UNIFIED IDEOGRAPH:'D7EF:55279:郫
CJK UNIFIED IDEOGRAPH:'D7F0:55280:郬
CJK UNIFIED IDEOGRAPH:'D7F1:55281:郩
CJK UNIFIED IDEOGRAPH:'D7F2:55282:酖
CJK UNIFIED IDEOGRAPH:'D7F3:55283:酘
CJK UNIFIED IDEOGRAPH:'D7F4:55284:酚
CJK UNIFIED IDEOGRAPH:'D7F5:55285:酓
CJK UNIFIED IDEOGRAPH:'D7F6:55286:酕
CJK UNIFIED IDEOGRAPH:'D7F7:55287:釬
CJK UNIFIED IDEOGRAPH:'D7F8:55288:釴
CJK UNIFIED IDEOGRAPH:'D7F9:55289:釱
CJK UNIFIED IDEOGRAPH:'D7FA:55290:釳
CJK UNIFIED IDEOGRAPH:'D7FB:55291:釸
CJK UNIFIED IDEOGRAPH:'D7FC:55292:釤
CJK UNIFIED IDEOGRAPH:'D7FD:55293:釹
CJK UNIFIED IDEOGRAPH:'D7FE:55294:釪
CJK UNIFIED IDEOGRAPH:'D840:55360:釫
CJK UNIFIED IDEOGRAPH:'D841:55361:釷
CJK UNIFIED IDEOGRAPH:'D842:55362:釨
CJK UNIFIED IDEOGRAPH:'D843:55363:釮
CJK UNIFIED IDEOGRAPH:'D844:55364:镺
CJK UNIFIED IDEOGRAPH:'D845:55365:閆
CJK UNIFIED IDEOGRAPH:'D846:55366:閈
CJK UNIFIED IDEOGRAPH:'D847:55367:陼
CJK UNIFIED IDEOGRAPH:'D848:55368:陭
CJK UNIFIED IDEOGRAPH:'D849:55369:陫
CJK UNIFIED IDEOGRAPH:'D84A:55370:陱
CJK UNIFIED IDEOGRAPH:'D84B:55371:陯
CJK UNIFIED IDEOGRAPH:'D84C:55372:隿
CJK UNIFIED IDEOGRAPH:'D84D:55373:靪
CJK UNIFIED IDEOGRAPH:'D84E:55374:頄
CJK UNIFIED IDEOGRAPH:'D84F:55375:飥
CJK UNIFIED IDEOGRAPH:'D850:55376:馗
CJK UNIFIED IDEOGRAPH:'D851:55377:傛
CJK UNIFIED IDEOGRAPH:'D852:55378:傕
CJK UNIFIED IDEOGRAPH:'D853:55379:傔
CJK UNIFIED IDEOGRAPH:'D854:55380:傞
CJK UNIFIED IDEOGRAPH:'D855:55381:傋
CJK UNIFIED IDEOGRAPH:'D856:55382:傣
CJK UNIFIED IDEOGRAPH:'D857:55383:傃
CJK UNIFIED IDEOGRAPH:'D858:55384:傌
CJK UNIFIED IDEOGRAPH:'D859:55385:傎
CJK UNIFIED IDEOGRAPH:'D85A:55386:傝
CJK UNIFIED IDEOGRAPH:'D85B:55387:偨
CJK UNIFIED IDEOGRAPH:'D85C:55388:傜
CJK UNIFIED IDEOGRAPH:'D85D:55389:傒
CJK UNIFIED IDEOGRAPH:'D85E:55390:傂
CJK UNIFIED IDEOGRAPH:'D85F:55391:傇
CJK UNIFIED IDEOGRAPH:'D860:55392:兟
CJK UNIFIED IDEOGRAPH:'D861:55393:凔
CJK UNIFIED IDEOGRAPH:'D862:55394:匒
CJK UNIFIED IDEOGRAPH:'D863:55395:匑
CJK UNIFIED IDEOGRAPH:'D864:55396:厤
CJK UNIFIED IDEOGRAPH:'D865:55397:厧
CJK UNIFIED IDEOGRAPH:'D866:55398:喑
CJK UNIFIED IDEOGRAPH:'D867:55399:喨
CJK UNIFIED IDEOGRAPH:'D868:55400:喥
CJK UNIFIED IDEOGRAPH:'D869:55401:喭
CJK UNIFIED IDEOGRAPH:'D86A:55402:啷
CJK UNIFIED IDEOGRAPH:'D86B:55403:噅
CJK UNIFIED IDEOGRAPH:'D86C:55404:喢
CJK UNIFIED IDEOGRAPH:'D86D:55405:喓
CJK UNIFIED IDEOGRAPH:'D86E:55406:喈
CJK UNIFIED IDEOGRAPH:'D86F:55407:喏
CJK UNIFIED IDEOGRAPH:'D870:55408:喵
CJK UNIFIED IDEOGRAPH:'D871:55409:喁
CJK UNIFIED IDEOGRAPH:'D872:55410:喣
CJK UNIFIED IDEOGRAPH:'D873:55411:喒
CJK UNIFIED IDEOGRAPH:'D874:55412:喤
CJK UNIFIED IDEOGRAPH:'D875:55413:啽
CJK UNIFIED IDEOGRAPH:'D876:55414:喌
CJK UNIFIED IDEOGRAPH:'D877:55415:喦
CJK UNIFIED IDEOGRAPH:'D878:55416:啿
CJK UNIFIED IDEOGRAPH:'D879:55417:喕
CJK UNIFIED IDEOGRAPH:'D87A:55418:喡
CJK UNIFIED IDEOGRAPH:'D87B:55419:喎
CJK UNIFIED IDEOGRAPH:'D87C:55420:圌
CJK UNIFIED IDEOGRAPH:'D87D:55421:堩
CJK UNIFIED IDEOGRAPH:'D87E:55422:堷
CJK UNIFIED IDEOGRAPH:'D8A1:55457:堙
CJK UNIFIED IDEOGRAPH:'D8A2:55458:堞
CJK UNIFIED IDEOGRAPH:'D8A3:55459:堧
CJK UNIFIED IDEOGRAPH:'D8A4:55460:堣
CJK UNIFIED IDEOGRAPH:'D8A5:55461:堨
CJK UNIFIED IDEOGRAPH:'D8A6:55462:埵
CJK UNIFIED IDEOGRAPH:'D8A7:55463:塈
CJK UNIFIED IDEOGRAPH:'D8A8:55464:堥
CJK UNIFIED IDEOGRAPH:'D8A9:55465:堜
CJK UNIFIED IDEOGRAPH:'D8AA:55466:堛
CJK UNIFIED IDEOGRAPH:'D8AB:55467:堳
CJK UNIFIED IDEOGRAPH:'D8AC:55468:堿
CJK UNIFIED IDEOGRAPH:'D8AD:55469:堶
CJK UNIFIED IDEOGRAPH:'D8AE:55470:堮
CJK UNIFIED IDEOGRAPH:'D8AF:55471:堹
CJK UNIFIED IDEOGRAPH:'D8B0:55472:堸
CJK UNIFIED IDEOGRAPH:'D8B1:55473:堭
CJK UNIFIED IDEOGRAPH:'D8B2:55474:堬
CJK UNIFIED IDEOGRAPH:'D8B3:55475:堻
CJK UNIFIED IDEOGRAPH:'D8B4:55476:奡
CJK UNIFIED IDEOGRAPH:'D8B5:55477:媯
CJK UNIFIED IDEOGRAPH:'D8B6:55478:媔
CJK UNIFIED IDEOGRAPH:'D8B7:55479:媟
CJK UNIFIED IDEOGRAPH:'D8B8:55480:婺
CJK UNIFIED IDEOGRAPH:'D8B9:55481:媢
CJK UNIFIED IDEOGRAPH:'D8BA:55482:媞
CJK UNIFIED IDEOGRAPH:'D8BB:55483:婸
CJK UNIFIED IDEOGRAPH:'D8BC:55484:媦
CJK UNIFIED IDEOGRAPH:'D8BD:55485:婼
CJK UNIFIED IDEOGRAPH:'D8BE:55486:媥
CJK UNIFIED IDEOGRAPH:'D8BF:55487:媬
CJK UNIFIED IDEOGRAPH:'D8C0:55488:媕
CJK UNIFIED IDEOGRAPH:'D8C1:55489:媮
CJK UNIFIED IDEOGRAPH:'D8C2:55490:娷
CJK UNIFIED IDEOGRAPH:'D8C3:55491:媄
CJK UNIFIED IDEOGRAPH:'D8C4:55492:媊
CJK UNIFIED IDEOGRAPH:'D8C5:55493:媗
CJK UNIFIED IDEOGRAPH:'D8C6:55494:媃
CJK UNIFIED IDEOGRAPH:'D8C7:55495:媋
CJK UNIFIED IDEOGRAPH:'D8C8:55496:媩
CJK UNIFIED IDEOGRAPH:'D8C9:55497:婻
CJK UNIFIED IDEOGRAPH:'D8CA:55498:婽
CJK UNIFIED IDEOGRAPH:'D8CB:55499:媌
CJK UNIFIED IDEOGRAPH:'D8CC:55500:媜
CJK UNIFIED IDEOGRAPH:'D8CD:55501:媏
CJK UNIFIED IDEOGRAPH:'D8CE:55502:媓
CJK UNIFIED IDEOGRAPH:'D8CF:55503:媝
CJK UNIFIED IDEOGRAPH:'D8D0:55504:寪
CJK UNIFIED IDEOGRAPH:'D8D1:55505:寍
CJK UNIFIED IDEOGRAPH:'D8D2:55506:寋
CJK UNIFIED IDEOGRAPH:'D8D3:55507:寔
CJK UNIFIED IDEOGRAPH:'D8D4:55508:寑
CJK UNIFIED IDEOGRAPH:'D8D5:55509:寊
CJK UNIFIED IDEOGRAPH:'D8D6:55510:寎
CJK UNIFIED IDEOGRAPH:'D8D7:55511:尌
CJK UNIFIED IDEOGRAPH:'D8D8:55512:尰
CJK UNIFIED IDEOGRAPH:'D8D9:55513:崷
CJK UNIFIED IDEOGRAPH:'D8DA:55514:嵃
CJK UNIFIED IDEOGRAPH:'D8DB:55515:嵫
CJK UNIFIED IDEOGRAPH:'D8DC:55516:嵁
CJK UNIFIED IDEOGRAPH:'D8DD:55517:嵋
CJK UNIFIED IDEOGRAPH:'D8DE:55518:崿
CJK UNIFIED IDEOGRAPH:'D8DF:55519:崵
CJK UNIFIED IDEOGRAPH:'D8E0:55520:嵑
CJK UNIFIED IDEOGRAPH:'D8E1:55521:嵎
CJK UNIFIED IDEOGRAPH:'D8E2:55522:嵕
CJK UNIFIED IDEOGRAPH:'D8E3:55523:崳
CJK UNIFIED IDEOGRAPH:'D8E4:55524:崺
CJK UNIFIED IDEOGRAPH:'D8E5:55525:嵒
CJK UNIFIED IDEOGRAPH:'D8E6:55526:崽
CJK UNIFIED IDEOGRAPH:'D8E7:55527:崱
CJK UNIFIED IDEOGRAPH:'D8E8:55528:嵙
CJK UNIFIED IDEOGRAPH:'D8E9:55529:嵂
CJK UNIFIED IDEOGRAPH:'D8EA:55530:崹
CJK UNIFIED IDEOGRAPH:'D8EB:55531:嵉
CJK UNIFIED IDEOGRAPH:'D8EC:55532:崸
CJK UNIFIED IDEOGRAPH:'D8ED:55533:崼
CJK UNIFIED IDEOGRAPH:'D8EE:55534:崲
CJK UNIFIED IDEOGRAPH:'D8EF:55535:崶
CJK UNIFIED IDEOGRAPH:'D8F0:55536:嵀
CJK UNIFIED IDEOGRAPH:'D8F1:55537:嵅
CJK UNIFIED IDEOGRAPH:'D8F2:55538:幄
CJK UNIFIED IDEOGRAPH:'D8F3:55539:幁
CJK UNIFIED IDEOGRAPH:'D8F4:55540:彘
CJK UNIFIED IDEOGRAPH:'D8F5:55541:徦
CJK UNIFIED IDEOGRAPH:'D8F6:55542:徥
CJK UNIFIED IDEOGRAPH:'D8F7:55543:徫
CJK UNIFIED IDEOGRAPH:'D8F8:55544:惉
CJK UNIFIED IDEOGRAPH:'D8F9:55545:悹
CJK UNIFIED IDEOGRAPH:'D8FA:55546:惌
CJK UNIFIED IDEOGRAPH:'D8FB:55547:惢
CJK UNIFIED IDEOGRAPH:'D8FC:55548:惎
CJK UNIFIED IDEOGRAPH:'D8FD:55549:惄
CJK UNIFIED IDEOGRAPH:'D8FE:55550:愔
CJK UNIFIED IDEOGRAPH:'D940:55616:惲
CJK UNIFIED IDEOGRAPH:'D941:55617:愊
CJK UNIFIED IDEOGRAPH:'D942:55618:愖
CJK UNIFIED IDEOGRAPH:'D943:55619:愅
CJK UNIFIED IDEOGRAPH:'D944:55620:惵
CJK UNIFIED IDEOGRAPH:'D945:55621:愓
CJK UNIFIED IDEOGRAPH:'D946:55622:惸
CJK UNIFIED IDEOGRAPH:'D947:55623:惼
CJK UNIFIED IDEOGRAPH:'D948:55624:惾
CJK UNIFIED IDEOGRAPH:'D949:55625:惁
CJK UNIFIED IDEOGRAPH:'D94A:55626:愃
CJK UNIFIED IDEOGRAPH:'D94B:55627:愘
CJK UNIFIED IDEOGRAPH:'D94C:55628:愝
CJK UNIFIED IDEOGRAPH:'D94D:55629:愐
CJK UNIFIED IDEOGRAPH:'D94E:55630:惿
CJK UNIFIED IDEOGRAPH:'D94F:55631:愄
CJK UNIFIED IDEOGRAPH:'D950:55632:愋
CJK UNIFIED IDEOGRAPH:'D951:55633:扊
CJK UNIFIED IDEOGRAPH:'D952:55634:掔
CJK UNIFIED IDEOGRAPH:'D953:55635:掱
CJK UNIFIED IDEOGRAPH:'D954:55636:掰
CJK UNIFIED IDEOGRAPH:'D955:55637:揎
CJK UNIFIED IDEOGRAPH:'D956:55638:揥
CJK UNIFIED IDEOGRAPH:'D957:55639:揨
CJK UNIFIED IDEOGRAPH:'D958:55640:揯
CJK UNIFIED IDEOGRAPH:'D959:55641:揃
CJK UNIFIED IDEOGRAPH:'D95A:55642:撝
CJK UNIFIED IDEOGRAPH:'D95B:55643:揳
CJK UNIFIED IDEOGRAPH:'D95C:55644:揊
CJK UNIFIED IDEOGRAPH:'D95D:55645:揠
CJK UNIFIED IDEOGRAPH:'D95E:55646:揶
CJK UNIFIED IDEOGRAPH:'D95F:55647:揕
CJK UNIFIED IDEOGRAPH:'D960:55648:揲
CJK UNIFIED IDEOGRAPH:'D961:55649:揵
CJK UNIFIED IDEOGRAPH:'D962:55650:摡
CJK UNIFIED IDEOGRAPH:'D963:55651:揟
CJK UNIFIED IDEOGRAPH:'D964:55652:掾
CJK UNIFIED IDEOGRAPH:'D965:55653:揝
CJK UNIFIED IDEOGRAPH:'D966:55654:揜
CJK UNIFIED IDEOGRAPH:'D967:55655:揄
CJK UNIFIED IDEOGRAPH:'D968:55656:揘
CJK UNIFIED IDEOGRAPH:'D969:55657:揓
CJK UNIFIED IDEOGRAPH:'D96A:55658:揂
CJK UNIFIED IDEOGRAPH:'D96B:55659:揇
CJK UNIFIED IDEOGRAPH:'D96C:55660:揌
CJK UNIFIED IDEOGRAPH:'D96D:55661:揋
CJK UNIFIED IDEOGRAPH:'D96E:55662:揈
CJK UNIFIED IDEOGRAPH:'D96F:55663:揰
CJK UNIFIED IDEOGRAPH:'D970:55664:揗
CJK UNIFIED IDEOGRAPH:'D971:55665:揙
CJK UNIFIED IDEOGRAPH:'D972:55666:攲
CJK UNIFIED IDEOGRAPH:'D973:55667:敧
CJK UNIFIED IDEOGRAPH:'D974:55668:敪
CJK UNIFIED IDEOGRAPH:'D975:55669:敤
CJK UNIFIED IDEOGRAPH:'D976:55670:敜
CJK UNIFIED IDEOGRAPH:'D977:55671:敨
CJK UNIFIED IDEOGRAPH:'D978:55672:敥
CJK UNIFIED IDEOGRAPH:'D979:55673:斌
CJK UNIFIED IDEOGRAPH:'D97A:55674:斝
CJK UNIFIED IDEOGRAPH:'D97B:55675:斞
CJK UNIFIED IDEOGRAPH:'D97C:55676:斮
CJK UNIFIED IDEOGRAPH:'D97D:55677:旐
CJK UNIFIED IDEOGRAPH:'D97E:55678:旒
CJK UNIFIED IDEOGRAPH:'D9A1:55713:晼
CJK UNIFIED IDEOGRAPH:'D9A2:55714:晬
CJK UNIFIED IDEOGRAPH:'D9A3:55715:晻
CJK UNIFIED IDEOGRAPH:'D9A4:55716:暀
CJK UNIFIED IDEOGRAPH:'D9A5:55717:晱
CJK UNIFIED IDEOGRAPH:'D9A6:55718:晹
CJK UNIFIED IDEOGRAPH:'D9A7:55719:晪
CJK UNIFIED IDEOGRAPH:'D9A8:55720:晲
CJK UNIFIED IDEOGRAPH:'D9A9:55721:朁
CJK UNIFIED IDEOGRAPH:'D9AA:55722:椌
CJK UNIFIED IDEOGRAPH:'D9AB:55723:棓
CJK UNIFIED IDEOGRAPH:'D9AC:55724:椄
CJK UNIFIED IDEOGRAPH:'D9AD:55725:棜
CJK UNIFIED IDEOGRAPH:'D9AE:55726:椪
CJK UNIFIED IDEOGRAPH:'D9AF:55727:棬
CJK UNIFIED IDEOGRAPH:'D9B0:55728:棪
CJK UNIFIED IDEOGRAPH:'D9B1:55729:棱
CJK UNIFIED IDEOGRAPH:'D9B2:55730:椏
CJK UNIFIED IDEOGRAPH:'D9B3:55731:棖
CJK UNIFIED IDEOGRAPH:'D9B4:55732:棷
CJK UNIFIED IDEOGRAPH:'D9B5:55733:棫
CJK UNIFIED IDEOGRAPH:'D9B6:55734:棤
CJK UNIFIED IDEOGRAPH:'D9B7:55735:棶
CJK UNIFIED IDEOGRAPH:'D9B8:55736:椓
CJK UNIFIED IDEOGRAPH:'D9B9:55737:椐
CJK UNIFIED IDEOGRAPH:'D9BA:55738:棳
CJK UNIFIED IDEOGRAPH:'D9BB:55739:棡
CJK UNIFIED IDEOGRAPH:'D9BC:55740:椇
CJK UNIFIED IDEOGRAPH:'D9BD:55741:棌
CJK UNIFIED IDEOGRAPH:'D9BE:55742:椈
CJK UNIFIED IDEOGRAPH:'D9BF:55743:楰
CJK UNIFIED IDEOGRAPH:'D9C0:55744:梴
CJK UNIFIED IDEOGRAPH:'D9C1:55745:椑
CJK UNIFIED IDEOGRAPH:'D9C2:55746:棯
CJK UNIFIED IDEOGRAPH:'D9C3:55747:棆
CJK UNIFIED IDEOGRAPH:'D9C4:55748:椔
CJK UNIFIED IDEOGRAPH:'D9C5:55749:棸
CJK UNIFIED IDEOGRAPH:'D9C6:55750:棐
CJK UNIFIED IDEOGRAPH:'D9C7:55751:棽
CJK UNIFIED IDEOGRAPH:'D9C8:55752:棼
CJK UNIFIED IDEOGRAPH:'D9C9:55753:棨
CJK UNIFIED IDEOGRAPH:'D9CA:55754:椋
CJK UNIFIED IDEOGRAPH:'D9CB:55755:椊
CJK UNIFIED IDEOGRAPH:'D9CC:55756:椗
CJK UNIFIED IDEOGRAPH:'D9CD:55757:棎
CJK UNIFIED IDEOGRAPH:'D9CE:55758:棈
CJK UNIFIED IDEOGRAPH:'D9CF:55759:棝
CJK UNIFIED IDEOGRAPH:'D9D0:55760:棞
CJK UNIFIED IDEOGRAPH:'D9D1:55761:棦
CJK UNIFIED IDEOGRAPH:'D9D2:55762:棴
CJK UNIFIED IDEOGRAPH:'D9D3:55763:棑
CJK UNIFIED IDEOGRAPH:'D9D4:55764:椆
CJK UNIFIED IDEOGRAPH:'D9D5:55765:棔
CJK UNIFIED IDEOGRAPH:'D9D6:55766:棩
CJK UNIFIED IDEOGRAPH:'D9D7:55767:椕
CJK UNIFIED IDEOGRAPH:'D9D8:55768:椥
CJK UNIFIED IDEOGRAPH:'D9D9:55769:棇
CJK UNIFIED IDEOGRAPH:'D9DA:55770:欹
CJK UNIFIED IDEOGRAPH:'D9DB:55771:欻
CJK UNIFIED IDEOGRAPH:'D9DC:55772:欿
CJK UNIFIED IDEOGRAPH:'D9DD:55773:欼
CJK UNIFIED IDEOGRAPH:'D9DE:55774:殔
CJK UNIFIED IDEOGRAPH:'D9DF:55775:殗
CJK UNIFIED IDEOGRAPH:'D9E0:55776:殙
CJK UNIFIED IDEOGRAPH:'D9E1:55777:殕
CJK UNIFIED IDEOGRAPH:'D9E2:55778:殽
CJK UNIFIED IDEOGRAPH:'D9E3:55779:毰
CJK UNIFIED IDEOGRAPH:'D9E4:55780:毲
CJK UNIFIED IDEOGRAPH:'D9E5:55781:毳
CJK UNIFIED IDEOGRAPH:'D9E6:55782:氰
CJK UNIFIED IDEOGRAPH:'D9E7:55783:淼
CJK UNIFIED IDEOGRAPH:'D9E8:55784:湆
CJK UNIFIED IDEOGRAPH:'D9E9:55785:湇
CJK UNIFIED IDEOGRAPH:'D9EA:55786:渟
CJK UNIFIED IDEOGRAPH:'D9EB:55787:湉
CJK UNIFIED IDEOGRAPH:'D9EC:55788:溈
CJK UNIFIED IDEOGRAPH:'D9ED:55789:渼
CJK UNIFIED IDEOGRAPH:'D9EE:55790:渽
CJK UNIFIED IDEOGRAPH:'D9EF:55791:湅
CJK UNIFIED IDEOGRAPH:'D9F0:55792:湢
CJK UNIFIED IDEOGRAPH:'D9F1:55793:渫
CJK UNIFIED IDEOGRAPH:'D9F2:55794:渿
CJK UNIFIED IDEOGRAPH:'D9F3:55795:湁
CJK UNIFIED IDEOGRAPH:'D9F4:55796:湝
CJK UNIFIED IDEOGRAPH:'D9F5:55797:湳
CJK UNIFIED IDEOGRAPH:'D9F6:55798:渜
CJK UNIFIED IDEOGRAPH:'D9F7:55799:渳
CJK UNIFIED IDEOGRAPH:'D9F8:55800:湋
CJK UNIFIED IDEOGRAPH:'D9F9:55801:湀
CJK UNIFIED IDEOGRAPH:'D9FA:55802:湑
CJK UNIFIED IDEOGRAPH:'D9FB:55803:渻
CJK UNIFIED IDEOGRAPH:'D9FC:55804:渃
CJK UNIFIED IDEOGRAPH:'D9FD:55805:渮
CJK UNIFIED IDEOGRAPH:'D9FE:55806:湞
CJK UNIFIED IDEOGRAPH:'DA40:55872:湨
CJK UNIFIED IDEOGRAPH:'DA41:55873:湜
CJK UNIFIED IDEOGRAPH:'DA42:55874:湡
CJK UNIFIED IDEOGRAPH:'DA43:55875:渱
CJK UNIFIED IDEOGRAPH:'DA44:55876:渨
CJK UNIFIED IDEOGRAPH:'DA45:55877:湠
CJK UNIFIED IDEOGRAPH:'DA46:55878:湱
CJK UNIFIED IDEOGRAPH:'DA47:55879:湫
CJK UNIFIED IDEOGRAPH:'DA48:55880:渹
CJK UNIFIED IDEOGRAPH:'DA49:55881:渢
CJK UNIFIED IDEOGRAPH:'DA4A:55882:渰
CJK UNIFIED IDEOGRAPH:'DA4B:55883:湓
CJK UNIFIED IDEOGRAPH:'DA4C:55884:湥
CJK UNIFIED IDEOGRAPH:'DA4D:55885:渧
CJK UNIFIED IDEOGRAPH:'DA4E:55886:湸
CJK UNIFIED IDEOGRAPH:'DA4F:55887:湤
CJK UNIFIED IDEOGRAPH:'DA50:55888:湷
CJK UNIFIED IDEOGRAPH:'DA51:55889:湕
CJK UNIFIED IDEOGRAPH:'DA52:55890:湹
CJK UNIFIED IDEOGRAPH:'DA53:55891:湒
CJK UNIFIED IDEOGRAPH:'DA54:55892:湦
CJK UNIFIED IDEOGRAPH:'DA55:55893:渵
CJK UNIFIED IDEOGRAPH:'DA56:55894:渶
CJK UNIFIED IDEOGRAPH:'DA57:55895:湚
CJK UNIFIED IDEOGRAPH:'DA58:55896:焠
CJK UNIFIED IDEOGRAPH:'DA59:55897:焞
CJK UNIFIED IDEOGRAPH:'DA5A:55898:焯
CJK UNIFIED IDEOGRAPH:'DA5B:55899:烻
CJK UNIFIED IDEOGRAPH:'DA5C:55900:焮
CJK UNIFIED IDEOGRAPH:'DA5D:55901:焱
CJK UNIFIED IDEOGRAPH:'DA5E:55902:焣
CJK UNIFIED IDEOGRAPH:'DA5F:55903:焥
CJK UNIFIED IDEOGRAPH:'DA60:55904:焢
CJK UNIFIED IDEOGRAPH:'DA61:55905:焲
CJK UNIFIED IDEOGRAPH:'DA62:55906:焟
CJK UNIFIED IDEOGRAPH:'DA63:55907:焨
CJK UNIFIED IDEOGRAPH:'DA64:55908:焺
CJK UNIFIED IDEOGRAPH:'DA65:55909:焛
CJK UNIFIED IDEOGRAPH:'DA66:55910:牋
CJK UNIFIED IDEOGRAPH:'DA67:55911:牚
CJK UNIFIED IDEOGRAPH:'DA68:55912:犈
CJK UNIFIED IDEOGRAPH:'DA69:55913:犉
CJK UNIFIED IDEOGRAPH:'DA6A:55914:犆
CJK UNIFIED IDEOGRAPH:'DA6B:55915:犅
CJK UNIFIED IDEOGRAPH:'DA6C:55916:犋
CJK UNIFIED IDEOGRAPH:'DA6D:55917:猒
CJK UNIFIED IDEOGRAPH:'DA6E:55918:猋
CJK UNIFIED IDEOGRAPH:'DA6F:55919:猰
CJK UNIFIED IDEOGRAPH:'DA70:55920:猢
CJK UNIFIED IDEOGRAPH:'DA71:55921:猱
CJK UNIFIED IDEOGRAPH:'DA72:55922:猳
CJK UNIFIED IDEOGRAPH:'DA73:55923:猧
CJK UNIFIED IDEOGRAPH:'DA74:55924:猲
CJK UNIFIED IDEOGRAPH:'DA75:55925:猭
CJK UNIFIED IDEOGRAPH:'DA76:55926:猦
CJK UNIFIED IDEOGRAPH:'DA77:55927:猣
CJK UNIFIED IDEOGRAPH:'DA78:55928:猵
CJK UNIFIED IDEOGRAPH:'DA79:55929:猌
CJK UNIFIED IDEOGRAPH:'DA7A:55930:琮
CJK UNIFIED IDEOGRAPH:'DA7B:55931:琬
CJK UNIFIED IDEOGRAPH:'DA7C:55932:琰
CJK UNIFIED IDEOGRAPH:'DA7D:55933:琫
CJK UNIFIED IDEOGRAPH:'DA7E:55934:琖
CJK UNIFIED IDEOGRAPH:'DAA1:55969:琚
CJK UNIFIED IDEOGRAPH:'DAA2:55970:琡
CJK UNIFIED IDEOGRAPH:'DAA3:55971:琭
CJK UNIFIED IDEOGRAPH:'DAA4:55972:琱
CJK UNIFIED IDEOGRAPH:'DAA5:55973:琤
CJK UNIFIED IDEOGRAPH:'DAA6:55974:琣
CJK UNIFIED IDEOGRAPH:'DAA7:55975:琝
CJK UNIFIED IDEOGRAPH:'DAA8:55976:琩
CJK UNIFIED IDEOGRAPH:'DAA9:55977:琠
CJK UNIFIED IDEOGRAPH:'DAAA:55978:琲
CJK UNIFIED IDEOGRAPH:'DAAB:55979:瓻
CJK UNIFIED IDEOGRAPH:'DAAC:55980:甯
CJK UNIFIED IDEOGRAPH:'DAAD:55981:畯
CJK UNIFIED IDEOGRAPH:'DAAE:55982:畬
CJK UNIFIED IDEOGRAPH:'DAAF:55983:痧
CJK UNIFIED IDEOGRAPH:'DAB0:55984:痚
CJK UNIFIED IDEOGRAPH:'DAB1:55985:痡
CJK UNIFIED IDEOGRAPH:'DAB2:55986:痦
CJK UNIFIED IDEOGRAPH:'DAB3:55987:痝
CJK UNIFIED IDEOGRAPH:'DAB4:55988:痟
CJK UNIFIED IDEOGRAPH:'DAB5:55989:痤
CJK UNIFIED IDEOGRAPH:'DAB6:55990:痗
CJK UNIFIED IDEOGRAPH:'DAB7:55991:皕
CJK UNIFIED IDEOGRAPH:'DAB8:55992:皒
CJK UNIFIED IDEOGRAPH:'DAB9:55993:盚
CJK UNIFIED IDEOGRAPH:'DABA:55994:睆
CJK UNIFIED IDEOGRAPH:'DABB:55995:睇
CJK UNIFIED IDEOGRAPH:'DABC:55996:睄
CJK UNIFIED IDEOGRAPH:'DABD:55997:睍
CJK UNIFIED IDEOGRAPH:'DABE:55998:睅
CJK UNIFIED IDEOGRAPH:'DABF:55999:睊
CJK UNIFIED IDEOGRAPH:'DAC0:56000:睎
CJK UNIFIED IDEOGRAPH:'DAC1:56001:睋
CJK UNIFIED IDEOGRAPH:'DAC2:56002:睌
CJK UNIFIED IDEOGRAPH:'DAC3:56003:矞
CJK UNIFIED IDEOGRAPH:'DAC4:56004:矬
CJK UNIFIED IDEOGRAPH:'DAC5:56005:硠
CJK UNIFIED IDEOGRAPH:'DAC6:56006:硤
CJK UNIFIED IDEOGRAPH:'DAC7:56007:硥
CJK UNIFIED IDEOGRAPH:'DAC8:56008:硜
CJK UNIFIED IDEOGRAPH:'DAC9:56009:硭
CJK UNIFIED IDEOGRAPH:'DACA:56010:硱
CJK UNIFIED IDEOGRAPH:'DACB:56011:硪
CJK UNIFIED IDEOGRAPH:'DACC:56012:确
CJK UNIFIED IDEOGRAPH:'DACD:56013:硰
CJK UNIFIED IDEOGRAPH:'DACE:56014:硩
CJK UNIFIED IDEOGRAPH:'DACF:56015:硨
CJK UNIFIED IDEOGRAPH:'DAD0:56016:硞
CJK UNIFIED IDEOGRAPH:'DAD1:56017:硢
CJK UNIFIED IDEOGRAPH:'DAD2:56018:祴
CJK UNIFIED IDEOGRAPH:'DAD3:56019:祳
CJK UNIFIED IDEOGRAPH:'DAD4:56020:祲
CJK UNIFIED IDEOGRAPH:'DAD5:56021:祰
CJK UNIFIED IDEOGRAPH:'DAD6:56022:稂
CJK UNIFIED IDEOGRAPH:'DAD7:56023:稊
CJK UNIFIED IDEOGRAPH:'DAD8:56024:稃
CJK UNIFIED IDEOGRAPH:'DAD9:56025:稌
CJK UNIFIED IDEOGRAPH:'DADA:56026:稄
CJK UNIFIED IDEOGRAPH:'DADB:56027:窙
CJK UNIFIED IDEOGRAPH:'DADC:56028:竦
CJK UNIFIED IDEOGRAPH:'DADD:56029:竤
CJK UNIFIED IDEOGRAPH:'DADE:56030:筊
CJK UNIFIED IDEOGRAPH:'DADF:56031:笻
CJK UNIFIED IDEOGRAPH:'DAE0:56032:筄
CJK UNIFIED IDEOGRAPH:'DAE1:56033:筈
CJK UNIFIED IDEOGRAPH:'DAE2:56034:筌
CJK UNIFIED IDEOGRAPH:'DAE3:56035:筎
CJK UNIFIED IDEOGRAPH:'DAE4:56036:筀
CJK UNIFIED IDEOGRAPH:'DAE5:56037:筘
CJK UNIFIED IDEOGRAPH:'DAE6:56038:筅
CJK UNIFIED IDEOGRAPH:'DAE7:56039:粢
CJK UNIFIED IDEOGRAPH:'DAE8:56040:粞
CJK UNIFIED IDEOGRAPH:'DAE9:56041:粨
CJK UNIFIED IDEOGRAPH:'DAEA:56042:粡
CJK UNIFIED IDEOGRAPH:'DAEB:56043:絘
CJK UNIFIED IDEOGRAPH:'DAEC:56044:絯
CJK UNIFIED IDEOGRAPH:'DAED:56045:絣
CJK UNIFIED IDEOGRAPH:'DAEE:56046:絓
CJK UNIFIED IDEOGRAPH:'DAEF:56047:絖
CJK UNIFIED IDEOGRAPH:'DAF0:56048:絧
CJK UNIFIED IDEOGRAPH:'DAF1:56049:絪
CJK UNIFIED IDEOGRAPH:'DAF2:56050:絏
CJK UNIFIED IDEOGRAPH:'DAF3:56051:絭
CJK UNIFIED IDEOGRAPH:'DAF4:56052:絜
CJK UNIFIED IDEOGRAPH:'DAF5:56053:絫
CJK UNIFIED IDEOGRAPH:'DAF6:56054:絒
CJK UNIFIED IDEOGRAPH:'DAF7:56055:絔
CJK UNIFIED IDEOGRAPH:'DAF8:56056:絩
CJK UNIFIED IDEOGRAPH:'DAF9:56057:絑
CJK UNIFIED IDEOGRAPH:'DAFA:56058:絟
CJK UNIFIED IDEOGRAPH:'DAFB:56059:絎
CJK UNIFIED IDEOGRAPH:'DAFC:56060:缾
CJK UNIFIED IDEOGRAPH:'DAFD:56061:缿
CJK UNIFIED IDEOGRAPH:'DAFE:56062:罥
CJK UNIFIED IDEOGRAPH:'DB40:56128:罦
CJK UNIFIED IDEOGRAPH:'DB41:56129:羢
CJK UNIFIED IDEOGRAPH:'DB42:56130:羠
CJK UNIFIED IDEOGRAPH:'DB43:56131:羡
CJK UNIFIED IDEOGRAPH:'DB44:56132:翗
CJK UNIFIED IDEOGRAPH:'DB45:56133:聑
CJK UNIFIED IDEOGRAPH:'DB46:56134:聏
CJK UNIFIED IDEOGRAPH:'DB47:56135:聐
CJK UNIFIED IDEOGRAPH:'DB48:56136:胾
CJK UNIFIED IDEOGRAPH:'DB49:56137:胔
CJK UNIFIED IDEOGRAPH:'DB4A:56138:腃
CJK UNIFIED IDEOGRAPH:'DB4B:56139:腊
CJK UNIFIED IDEOGRAPH:'DB4C:56140:腒
CJK UNIFIED IDEOGRAPH:'DB4D:56141:腏
CJK UNIFIED IDEOGRAPH:'DB4E:56142:腇
CJK UNIFIED IDEOGRAPH:'DB4F:56143:脽
CJK UNIFIED IDEOGRAPH:'DB50:56144:腍
CJK UNIFIED IDEOGRAPH:'DB51:56145:脺
CJK UNIFIED IDEOGRAPH:'DB52:56146:臦
CJK UNIFIED IDEOGRAPH:'DB53:56147:臮
CJK UNIFIED IDEOGRAPH:'DB54:56148:臷
CJK UNIFIED IDEOGRAPH:'DB55:56149:臸
CJK UNIFIED IDEOGRAPH:'DB56:56150:臹
CJK UNIFIED IDEOGRAPH:'DB57:56151:舄
CJK UNIFIED IDEOGRAPH:'DB58:56152:舼
CJK UNIFIED IDEOGRAPH:'DB59:56153:舽
CJK UNIFIED IDEOGRAPH:'DB5A:56154:舿
CJK UNIFIED IDEOGRAPH:'DB5B:56155:艵
CJK UNIFIED IDEOGRAPH:'DB5C:56156:茻
CJK UNIFIED IDEOGRAPH:'DB5D:56157:菏
CJK UNIFIED IDEOGRAPH:'DB5E:56158:菹
CJK UNIFIED IDEOGRAPH:'DB5F:56159:萣
CJK UNIFIED IDEOGRAPH:'DB60:56160:菀
CJK UNIFIED IDEOGRAPH:'DB61:56161:菨
CJK UNIFIED IDEOGRAPH:'DB62:56162:萒
CJK UNIFIED IDEOGRAPH:'DB63:56163:菧
CJK UNIFIED IDEOGRAPH:'DB64:56164:菤
CJK UNIFIED IDEOGRAPH:'DB65:56165:菼
CJK UNIFIED IDEOGRAPH:'DB66:56166:菶
CJK UNIFIED IDEOGRAPH:'DB67:56167:萐
CJK UNIFIED IDEOGRAPH:'DB68:56168:菆
CJK UNIFIED IDEOGRAPH:'DB69:56169:菈
CJK UNIFIED IDEOGRAPH:'DB6A:56170:菫
CJK UNIFIED IDEOGRAPH:'DB6B:56171:菣
CJK UNIFIED IDEOGRAPH:'DB6C:56172:莿
CJK UNIFIED IDEOGRAPH:'DB6D:56173:萁
CJK UNIFIED IDEOGRAPH:'DB6E:56174:菝
CJK UNIFIED IDEOGRAPH:'DB6F:56175:菥
CJK UNIFIED IDEOGRAPH:'DB70:56176:菘
CJK UNIFIED IDEOGRAPH:'DB71:56177:菿
CJK UNIFIED IDEOGRAPH:'DB72:56178:菡
CJK UNIFIED IDEOGRAPH:'DB73:56179:菋
CJK UNIFIED IDEOGRAPH:'DB74:56180:菎
CJK UNIFIED IDEOGRAPH:'DB75:56181:菖
CJK UNIFIED IDEOGRAPH:'DB76:56182:菵
CJK UNIFIED IDEOGRAPH:'DB77:56183:菉
CJK UNIFIED IDEOGRAPH:'DB78:56184:萉
CJK UNIFIED IDEOGRAPH:'DB79:56185:萏
CJK UNIFIED IDEOGRAPH:'DB7A:56186:菞
CJK UNIFIED IDEOGRAPH:'DB7B:56187:萑
CJK UNIFIED IDEOGRAPH:'DB7C:56188:萆
CJK UNIFIED IDEOGRAPH:'DB7D:56189:菂
CJK UNIFIED IDEOGRAPH:'DB7E:56190:菳
CJK UNIFIED IDEOGRAPH:'DBA1:56225:菕
CJK UNIFIED IDEOGRAPH:'DBA2:56226:菺
CJK UNIFIED IDEOGRAPH:'DBA3:56227:菇
CJK UNIFIED IDEOGRAPH:'DBA4:56228:菑
CJK UNIFIED IDEOGRAPH:'DBA5:56229:菪
CJK UNIFIED IDEOGRAPH:'DBA6:56230:萓
CJK UNIFIED IDEOGRAPH:'DBA7:56231:菃
CJK UNIFIED IDEOGRAPH:'DBA8:56232:菬
CJK UNIFIED IDEOGRAPH:'DBA9:56233:菮
CJK UNIFIED IDEOGRAPH:'DBAA:56234:菄
CJK UNIFIED IDEOGRAPH:'DBAB:56235:菻
CJK UNIFIED IDEOGRAPH:'DBAC:56236:菗
CJK UNIFIED IDEOGRAPH:'DBAD:56237:菢
CJK UNIFIED IDEOGRAPH:'DBAE:56238:萛
CJK UNIFIED IDEOGRAPH:'DBAF:56239:菛
CJK UNIFIED IDEOGRAPH:'DBB0:56240:菾
CJK UNIFIED IDEOGRAPH:'DBB1:56241:蛘
CJK UNIFIED IDEOGRAPH:'DBB2:56242:蛢
CJK UNIFIED IDEOGRAPH:'DBB3:56243:蛦
CJK UNIFIED IDEOGRAPH:'DBB4:56244:蛓
CJK UNIFIED IDEOGRAPH:'DBB5:56245:蛣
CJK UNIFIED IDEOGRAPH:'DBB6:56246:蛚
CJK UNIFIED IDEOGRAPH:'DBB7:56247:蛪
CJK UNIFIED IDEOGRAPH:'DBB8:56248:蛝
CJK UNIFIED IDEOGRAPH:'DBB9:56249:蛫
CJK UNIFIED IDEOGRAPH:'DBBA:56250:蛜
CJK UNIFIED IDEOGRAPH:'DBBB:56251:蛬
CJK UNIFIED IDEOGRAPH:'DBBC:56252:蛩
CJK UNIFIED IDEOGRAPH:'DBBD:56253:蛗
CJK UNIFIED IDEOGRAPH:'DBBE:56254:蛨
CJK UNIFIED IDEOGRAPH:'DBBF:56255:蛑
CJK UNIFIED IDEOGRAPH:'DBC0:56256:衈
CJK UNIFIED IDEOGRAPH:'DBC1:56257:衖
CJK UNIFIED IDEOGRAPH:'DBC2:56258:衕
CJK UNIFIED IDEOGRAPH:'DBC3:56259:袺
CJK UNIFIED IDEOGRAPH:'DBC4:56260:裗
CJK UNIFIED IDEOGRAPH:'DBC5:56261:袹
CJK UNIFIED IDEOGRAPH:'DBC6:56262:袸
CJK UNIFIED IDEOGRAPH:'DBC7:56263:裀
CJK UNIFIED IDEOGRAPH:'DBC8:56264:袾
CJK UNIFIED IDEOGRAPH:'DBC9:56265:袶
CJK UNIFIED IDEOGRAPH:'DBCA:56266:袼
CJK UNIFIED IDEOGRAPH:'DBCB:56267:袷
CJK UNIFIED IDEOGRAPH:'DBCC:56268:袽
CJK UNIFIED IDEOGRAPH:'DBCD:56269:袲
CJK UNIFIED IDEOGRAPH:'DBCE:56270:褁
CJK UNIFIED IDEOGRAPH:'DBCF:56271:裉
CJK UNIFIED IDEOGRAPH:'DBD0:56272:覕
CJK UNIFIED IDEOGRAPH:'DBD1:56273:覘
CJK UNIFIED IDEOGRAPH:'DBD2:56274:覗
CJK UNIFIED IDEOGRAPH:'DBD3:56275:觝
CJK UNIFIED IDEOGRAPH:'DBD4:56276:觚
CJK UNIFIED IDEOGRAPH:'DBD5:56277:觛
CJK UNIFIED IDEOGRAPH:'DBD6:56278:詎
CJK UNIFIED IDEOGRAPH:'DBD7:56279:詍
CJK UNIFIED IDEOGRAPH:'DBD8:56280:訹
CJK UNIFIED IDEOGRAPH:'DBD9:56281:詙
CJK UNIFIED IDEOGRAPH:'DBDA:56282:詀
CJK UNIFIED IDEOGRAPH:'DBDB:56283:詗
CJK UNIFIED IDEOGRAPH:'DBDC:56284:詘
CJK UNIFIED IDEOGRAPH:'DBDD:56285:詄
CJK UNIFIED IDEOGRAPH:'DBDE:56286:詅
CJK UNIFIED IDEOGRAPH:'DBDF:56287:詒
CJK UNIFIED IDEOGRAPH:'DBE0:56288:詈
CJK UNIFIED IDEOGRAPH:'DBE1:56289:詑
CJK UNIFIED IDEOGRAPH:'DBE2:56290:詊
CJK UNIFIED IDEOGRAPH:'DBE3:56291:詌
CJK UNIFIED IDEOGRAPH:'DBE4:56292:詏
CJK UNIFIED IDEOGRAPH:'DBE5:56293:豟
CJK UNIFIED IDEOGRAPH:'DBE6:56294:貁
CJK UNIFIED IDEOGRAPH:'DBE7:56295:貀
CJK UNIFIED IDEOGRAPH:'DBE8:56296:貺
CJK UNIFIED IDEOGRAPH:'DBE9:56297:貾
CJK UNIFIED IDEOGRAPH:'DBEA:56298:貰
CJK UNIFIED IDEOGRAPH:'DBEB:56299:貹
CJK UNIFIED IDEOGRAPH:'DBEC:56300:貵
CJK UNIFIED IDEOGRAPH:'DBED:56301:趄
CJK UNIFIED IDEOGRAPH:'DBEE:56302:趀
CJK UNIFIED IDEOGRAPH:'DBEF:56303:趉
CJK UNIFIED IDEOGRAPH:'DBF0:56304:跘
CJK UNIFIED IDEOGRAPH:'DBF1:56305:跓
CJK UNIFIED IDEOGRAPH:'DBF2:56306:跍
CJK UNIFIED IDEOGRAPH:'DBF3:56307:跇
CJK UNIFIED IDEOGRAPH:'DBF4:56308:跖
CJK UNIFIED IDEOGRAPH:'DBF5:56309:跜
CJK UNIFIED IDEOGRAPH:'DBF6:56310:跏
CJK UNIFIED IDEOGRAPH:'DBF7:56311:跕
CJK UNIFIED IDEOGRAPH:'DBF8:56312:跙
CJK UNIFIED IDEOGRAPH:'DBF9:56313:跈
CJK UNIFIED IDEOGRAPH:'DBFA:56314:跗
CJK UNIFIED IDEOGRAPH:'DBFB:56315:跅
CJK UNIFIED IDEOGRAPH:'DBFC:56316:軯
CJK UNIFIED IDEOGRAPH:'DBFD:56317:軷
CJK UNIFIED IDEOGRAPH:'DBFE:56318:軺
CJK UNIFIED IDEOGRAPH:'DC40:56384:軹
CJK UNIFIED IDEOGRAPH:'DC41:56385:軦
CJK UNIFIED IDEOGRAPH:'DC42:56386:軮
CJK UNIFIED IDEOGRAPH:'DC43:56387:軥
CJK UNIFIED IDEOGRAPH:'DC44:56388:軵
CJK UNIFIED IDEOGRAPH:'DC45:56389:軧
CJK UNIFIED IDEOGRAPH:'DC46:56390:軨
CJK UNIFIED IDEOGRAPH:'DC47:56391:軶
CJK UNIFIED IDEOGRAPH:'DC48:56392:軫
CJK UNIFIED IDEOGRAPH:'DC49:56393:軱
CJK UNIFIED IDEOGRAPH:'DC4A:56394:軬
CJK UNIFIED IDEOGRAPH:'DC4B:56395:軴
CJK UNIFIED IDEOGRAPH:'DC4C:56396:軩
CJK UNIFIED IDEOGRAPH:'DC4D:56397:逭
CJK UNIFIED IDEOGRAPH:'DC4E:56398:逴
CJK UNIFIED IDEOGRAPH:'DC4F:56399:逯
CJK UNIFIED IDEOGRAPH:'DC50:56400:鄆
CJK UNIFIED IDEOGRAPH:'DC51:56401:鄬
CJK UNIFIED IDEOGRAPH:'DC52:56402:鄄
CJK UNIFIED IDEOGRAPH:'DC53:56403:郿
CJK UNIFIED IDEOGRAPH:'DC54:56404:郼
CJK UNIFIED IDEOGRAPH:'DC55:56405:鄈
CJK UNIFIED IDEOGRAPH:'DC56:56406:郹
CJK UNIFIED IDEOGRAPH:'DC57:56407:郻
CJK UNIFIED IDEOGRAPH:'DC58:56408:鄁
CJK UNIFIED IDEOGRAPH:'DC59:56409:鄀
CJK UNIFIED IDEOGRAPH:'DC5A:56410:鄇
CJK UNIFIED IDEOGRAPH:'DC5B:56411:鄅
CJK UNIFIED IDEOGRAPH:'DC5C:56412:鄃
CJK UNIFIED IDEOGRAPH:'DC5D:56413:酡
CJK UNIFIED IDEOGRAPH:'DC5E:56414:酤
CJK UNIFIED IDEOGRAPH:'DC5F:56415:酟
CJK UNIFIED IDEOGRAPH:'DC60:56416:酢
CJK UNIFIED IDEOGRAPH:'DC61:56417:酠
CJK UNIFIED IDEOGRAPH:'DC62:56418:鈁
CJK UNIFIED IDEOGRAPH:'DC63:56419:鈊
CJK UNIFIED IDEOGRAPH:'DC64:56420:鈥
CJK UNIFIED IDEOGRAPH:'DC65:56421:鈃
CJK UNIFIED IDEOGRAPH:'DC66:56422:鈚
CJK UNIFIED IDEOGRAPH:'DC67:56423:鈦
CJK UNIFIED IDEOGRAPH:'DC68:56424:鈏
CJK UNIFIED IDEOGRAPH:'DC69:56425:鈌
CJK UNIFIED IDEOGRAPH:'DC6A:56426:鈀
CJK UNIFIED IDEOGRAPH:'DC6B:56427:鈒
CJK UNIFIED IDEOGRAPH:'DC6C:56428:釿
CJK UNIFIED IDEOGRAPH:'DC6D:56429:釽
CJK UNIFIED IDEOGRAPH:'DC6E:56430:鈆
CJK UNIFIED IDEOGRAPH:'DC6F:56431:鈄
CJK UNIFIED IDEOGRAPH:'DC70:56432:鈧
CJK UNIFIED IDEOGRAPH:'DC71:56433:鈂
CJK UNIFIED IDEOGRAPH:'DC72:56434:鈜
CJK UNIFIED IDEOGRAPH:'DC73:56435:鈤
CJK UNIFIED IDEOGRAPH:'DC74:56436:鈙
CJK UNIFIED IDEOGRAPH:'DC75:56437:鈗
CJK UNIFIED IDEOGRAPH:'DC76:56438:鈅
CJK UNIFIED IDEOGRAPH:'DC77:56439:鈖
CJK UNIFIED IDEOGRAPH:'DC78:56440:镻
CJK UNIFIED IDEOGRAPH:'DC79:56441:閍
CJK UNIFIED IDEOGRAPH:'DC7A:56442:閌
CJK UNIFIED IDEOGRAPH:'DC7B:56443:閐
CJK UNIFIED IDEOGRAPH:'DC7C:56444:隇
CJK UNIFIED IDEOGRAPH:'DC7D:56445:陾
CJK UNIFIED IDEOGRAPH:'DC7E:56446:隈
CJK UNIFIED IDEOGRAPH:'DCA1:56481:隉
CJK UNIFIED IDEOGRAPH:'DCA2:56482:隃
CJK UNIFIED IDEOGRAPH:'DCA3:56483:隀
CJK UNIFIED IDEOGRAPH:'DCA4:56484:雂
CJK UNIFIED IDEOGRAPH:'DCA5:56485:雈
CJK UNIFIED IDEOGRAPH:'DCA6:56486:雃
CJK UNIFIED IDEOGRAPH:'DCA7:56487:雱
CJK UNIFIED IDEOGRAPH:'DCA8:56488:雰
CJK UNIFIED IDEOGRAPH:'DCA9:56489:靬
CJK UNIFIED IDEOGRAPH:'DCAA:56490:靰
CJK UNIFIED IDEOGRAPH:'DCAB:56491:靮
CJK UNIFIED IDEOGRAPH:'DCAC:56492:頇
CJK UNIFIED IDEOGRAPH:'DCAD:56493:颩
CJK UNIFIED IDEOGRAPH:'DCAE:56494:飫
CJK UNIFIED IDEOGRAPH:'DCAF:56495:鳦
CJK UNIFIED IDEOGRAPH:'DCB0:56496:黹
CJK UNIFIED IDEOGRAPH:'DCB1:56497:亃
CJK UNIFIED IDEOGRAPH:'DCB2:56498:亄
CJK UNIFIED IDEOGRAPH:'DCB3:56499:亶
CJK UNIFIED IDEOGRAPH:'DCB4:56500:傽
CJK UNIFIED IDEOGRAPH:'DCB5:56501:傿
CJK UNIFIED IDEOGRAPH:'DCB6:56502:僆
CJK UNIFIED IDEOGRAPH:'DCB7:56503:傮
CJK UNIFIED IDEOGRAPH:'DCB8:56504:僄
CJK UNIFIED IDEOGRAPH:'DCB9:56505:僊
CJK UNIFIED IDEOGRAPH:'DCBA:56506:傴
CJK UNIFIED IDEOGRAPH:'DCBB:56507:僈
CJK UNIFIED IDEOGRAPH:'DCBC:56508:僂
CJK UNIFIED IDEOGRAPH:'DCBD:56509:傰
CJK UNIFIED IDEOGRAPH:'DCBE:56510:僁
CJK UNIFIED IDEOGRAPH:'DCBF:56511:傺
CJK UNIFIED IDEOGRAPH:'DCC0:56512:傱
CJK UNIFIED IDEOGRAPH:'DCC1:56513:僋
CJK UNIFIED IDEOGRAPH:'DCC2:56514:僉
CJK UNIFIED IDEOGRAPH:'DCC3:56515:傶
CJK UNIFIED IDEOGRAPH:'DCC4:56516:傸
CJK UNIFIED IDEOGRAPH:'DCC5:56517:凗
CJK UNIFIED IDEOGRAPH:'DCC6:56518:剺
CJK UNIFIED IDEOGRAPH:'DCC7:56519:剸
CJK UNIFIED IDEOGRAPH:'DCC8:56520:剻
CJK UNIFIED IDEOGRAPH:'DCC9:56521:剼
CJK UNIFIED IDEOGRAPH:'DCCA:56522:嗃
CJK UNIFIED IDEOGRAPH:'DCCB:56523:嗛
CJK UNIFIED IDEOGRAPH:'DCCC:56524:嗌
CJK UNIFIED IDEOGRAPH:'DCCD:56525:嗐
CJK UNIFIED IDEOGRAPH:'DCCE:56526:嗋
CJK UNIFIED IDEOGRAPH:'DCCF:56527:嗊
CJK UNIFIED IDEOGRAPH:'DCD0:56528:嗝
CJK UNIFIED IDEOGRAPH:'DCD1:56529:嗀
CJK UNIFIED IDEOGRAPH:'DCD2:56530:嗔
CJK UNIFIED IDEOGRAPH:'DCD3:56531:嗄
CJK UNIFIED IDEOGRAPH:'DCD4:56532:嗩
CJK UNIFIED IDEOGRAPH:'DCD5:56533:喿
CJK UNIFIED IDEOGRAPH:'DCD6:56534:嗒
CJK UNIFIED IDEOGRAPH:'DCD7:56535:喍
CJK UNIFIED IDEOGRAPH:'DCD8:56536:嗏
CJK UNIFIED IDEOGRAPH:'DCD9:56537:嗕
CJK UNIFIED IDEOGRAPH:'DCDA:56538:嗢
CJK UNIFIED IDEOGRAPH:'DCDB:56539:嗖
CJK UNIFIED IDEOGRAPH:'DCDC:56540:嗈
CJK UNIFIED IDEOGRAPH:'DCDD:56541:嗲
CJK UNIFIED IDEOGRAPH:'DCDE:56542:嗍
CJK UNIFIED IDEOGRAPH:'DCDF:56543:嗙
CJK UNIFIED IDEOGRAPH:'DCE0:56544:嗂
CJK UNIFIED IDEOGRAPH:'DCE1:56545:圔
CJK UNIFIED IDEOGRAPH:'DCE2:56546:塓
CJK UNIFIED IDEOGRAPH:'DCE3:56547:塨
CJK UNIFIED IDEOGRAPH:'DCE4:56548:塤
CJK UNIFIED IDEOGRAPH:'DCE5:56549:塏
CJK UNIFIED IDEOGRAPH:'DCE6:56550:塍
CJK UNIFIED IDEOGRAPH:'DCE7:56551:塉
CJK UNIFIED IDEOGRAPH:'DCE8:56552:塯
CJK UNIFIED IDEOGRAPH:'DCE9:56553:塕
CJK UNIFIED IDEOGRAPH:'DCEA:56554:塎
CJK UNIFIED IDEOGRAPH:'DCEB:56555:塝
CJK UNIFIED IDEOGRAPH:'DCEC:56556:塙
CJK UNIFIED IDEOGRAPH:'DCED:56557:塥
CJK UNIFIED IDEOGRAPH:'DCEE:56558:塛
CJK UNIFIED IDEOGRAPH:'DCEF:56559:堽
CJK UNIFIED IDEOGRAPH:'DCF0:56560:塣
CJK UNIFIED IDEOGRAPH:'DCF1:56561:塱
CJK UNIFIED IDEOGRAPH:'DCF2:56562:壼
CJK UNIFIED IDEOGRAPH:'DCF3:56563:嫇
CJK UNIFIED IDEOGRAPH:'DCF4:56564:嫄
CJK UNIFIED IDEOGRAPH:'DCF5:56565:嫋
CJK UNIFIED IDEOGRAPH:'DCF6:56566:媺
CJK UNIFIED IDEOGRAPH:'DCF7:56567:媸
CJK UNIFIED IDEOGRAPH:'DCF8:56568:媱
CJK UNIFIED IDEOGRAPH:'DCF9:56569:媵
CJK UNIFIED IDEOGRAPH:'DCFA:56570:媰
CJK UNIFIED IDEOGRAPH:'DCFB:56571:媿
CJK UNIFIED IDEOGRAPH:'DCFC:56572:嫈
CJK UNIFIED IDEOGRAPH:'DCFD:56573:媻
CJK UNIFIED IDEOGRAPH:'DCFE:56574:嫆
CJK UNIFIED IDEOGRAPH:'DD40:56640:媷
CJK UNIFIED IDEOGRAPH:'DD41:56641:嫀
CJK UNIFIED IDEOGRAPH:'DD42:56642:嫊
CJK UNIFIED IDEOGRAPH:'DD43:56643:媴
CJK UNIFIED IDEOGRAPH:'DD44:56644:媶
CJK UNIFIED IDEOGRAPH:'DD45:56645:嫍
CJK UNIFIED IDEOGRAPH:'DD46:56646:媹
CJK UNIFIED IDEOGRAPH:'DD47:56647:媐
CJK UNIFIED IDEOGRAPH:'DD48:56648:寖
CJK UNIFIED IDEOGRAPH:'DD49:56649:寘
CJK UNIFIED IDEOGRAPH:'DD4A:56650:寙
CJK UNIFIED IDEOGRAPH:'DD4B:56651:尟
CJK UNIFIED IDEOGRAPH:'DD4C:56652:尳
CJK UNIFIED IDEOGRAPH:'DD4D:56653:嵱
CJK UNIFIED IDEOGRAPH:'DD4E:56654:嵣
CJK UNIFIED IDEOGRAPH:'DD4F:56655:嵊
CJK UNIFIED IDEOGRAPH:'DD50:56656:嵥
CJK UNIFIED IDEOGRAPH:'DD51:56657:嵲
CJK UNIFIED IDEOGRAPH:'DD52:56658:嵬
CJK UNIFIED IDEOGRAPH:'DD53:56659:嵞
CJK UNIFIED IDEOGRAPH:'DD54:56660:嵨
CJK UNIFIED IDEOGRAPH:'DD55:56661:嵧
CJK UNIFIED IDEOGRAPH:'DD56:56662:嵢
CJK UNIFIED IDEOGRAPH:'DD57:56663:巰
CJK UNIFIED IDEOGRAPH:'DD58:56664:幏
CJK UNIFIED IDEOGRAPH:'DD59:56665:幎
CJK UNIFIED IDEOGRAPH:'DD5A:56666:幊
CJK UNIFIED IDEOGRAPH:'DD5B:56667:幍
CJK UNIFIED IDEOGRAPH:'DD5C:56668:幋
CJK UNIFIED IDEOGRAPH:'DD5D:56669:廅
CJK UNIFIED IDEOGRAPH:'DD5E:56670:廌
CJK UNIFIED IDEOGRAPH:'DD5F:56671:廆
CJK UNIFIED IDEOGRAPH:'DD60:56672:廋
CJK UNIFIED IDEOGRAPH:'DD61:56673:廇
CJK UNIFIED IDEOGRAPH:'DD62:56674:彀
CJK UNIFIED IDEOGRAPH:'DD63:56675:徯
CJK UNIFIED IDEOGRAPH:'DD64:56676:徭
CJK UNIFIED IDEOGRAPH:'DD65:56677:惷
CJK UNIFIED IDEOGRAPH:'DD66:56678:慉
CJK UNIFIED IDEOGRAPH:'DD67:56679:慊
CJK UNIFIED IDEOGRAPH:'DD68:56680:愫
CJK UNIFIED IDEOGRAPH:'DD69:56681:慅
CJK UNIFIED IDEOGRAPH:'DD6A:56682:愶
CJK UNIFIED IDEOGRAPH:'DD6B:56683:愲
CJK UNIFIED IDEOGRAPH:'DD6C:56684:愮
CJK UNIFIED IDEOGRAPH:'DD6D:56685:慆
CJK UNIFIED IDEOGRAPH:'DD6E:56686:愯
CJK UNIFIED IDEOGRAPH:'DD6F:56687:慏
CJK UNIFIED IDEOGRAPH:'DD70:56688:愩
CJK UNIFIED IDEOGRAPH:'DD71:56689:慀
CJK UNIFIED IDEOGRAPH:'DD72:56690:戠
CJK UNIFIED IDEOGRAPH:'DD73:56691:酨
CJK UNIFIED IDEOGRAPH:'DD74:56692:戣
CJK UNIFIED IDEOGRAPH:'DD75:56693:戥
CJK UNIFIED IDEOGRAPH:'DD76:56694:戤
CJK UNIFIED IDEOGRAPH:'DD77:56695:揅
CJK UNIFIED IDEOGRAPH:'DD78:56696:揱
CJK UNIFIED IDEOGRAPH:'DD79:56697:揫
CJK UNIFIED IDEOGRAPH:'DD7A:56698:搐
CJK UNIFIED IDEOGRAPH:'DD7B:56699:搒
CJK UNIFIED IDEOGRAPH:'DD7C:56700:搉
CJK UNIFIED IDEOGRAPH:'DD7D:56701:搠
CJK UNIFIED IDEOGRAPH:'DD7E:56702:搤
CJK UNIFIED IDEOGRAPH:'DDA1:56737:搳
CJK UNIFIED IDEOGRAPH:'DDA2:56738:摃
CJK UNIFIED IDEOGRAPH:'DDA3:56739:搟
CJK UNIFIED IDEOGRAPH:'DDA4:56740:搕
CJK UNIFIED IDEOGRAPH:'DDA5:56741:搘
CJK UNIFIED IDEOGRAPH:'DDA6:56742:搹
CJK UNIFIED IDEOGRAPH:'DDA7:56743:搷
CJK UNIFIED IDEOGRAPH:'DDA8:56744:搢
CJK UNIFIED IDEOGRAPH:'DDA9:56745:搣
CJK UNIFIED IDEOGRAPH:'DDAA:56746:搌
CJK UNIFIED IDEOGRAPH:'DDAB:56747:搦
CJK UNIFIED IDEOGRAPH:'DDAC:56748:搰
CJK UNIFIED IDEOGRAPH:'DDAD:56749:搨
CJK UNIFIED IDEOGRAPH:'DDAE:56750:摁
CJK UNIFIED IDEOGRAPH:'DDAF:56751:搵
CJK UNIFIED IDEOGRAPH:'DDB0:56752:搯
CJK UNIFIED IDEOGRAPH:'DDB1:56753:搊
CJK UNIFIED IDEOGRAPH:'DDB2:56754:搚
CJK UNIFIED IDEOGRAPH:'DDB3:56755:摀
CJK UNIFIED IDEOGRAPH:'DDB4:56756:搥
CJK UNIFIED IDEOGRAPH:'DDB5:56757:搧
CJK UNIFIED IDEOGRAPH:'DDB6:56758:搋
CJK UNIFIED IDEOGRAPH:'DDB7:56759:揧
CJK UNIFIED IDEOGRAPH:'DDB8:56760:搛
CJK UNIFIED IDEOGRAPH:'DDB9:56761:搮
CJK UNIFIED IDEOGRAPH:'DDBA:56762:搡
CJK UNIFIED IDEOGRAPH:'DDBB:56763:搎
CJK UNIFIED IDEOGRAPH:'DDBC:56764:敯
CJK UNIFIED IDEOGRAPH:'DDBD:56765:斒
CJK UNIFIED IDEOGRAPH:'DDBE:56766:旓
CJK UNIFIED IDEOGRAPH:'DDBF:56767:暆
CJK UNIFIED IDEOGRAPH:'DDC0:56768:暌
CJK UNIFIED IDEOGRAPH:'DDC1:56769:暕
CJK UNIFIED IDEOGRAPH:'DDC2:56770:暐
CJK UNIFIED IDEOGRAPH:'DDC3:56771:暋
CJK UNIFIED IDEOGRAPH:'DDC4:56772:暊
CJK UNIFIED IDEOGRAPH:'DDC5:56773:暙
CJK UNIFIED IDEOGRAPH:'DDC6:56774:暔
CJK UNIFIED IDEOGRAPH:'DDC7:56775:晸
CJK UNIFIED IDEOGRAPH:'DDC8:56776:朠
CJK UNIFIED IDEOGRAPH:'DDC9:56777:楦
CJK UNIFIED IDEOGRAPH:'DDCA:56778:楟
CJK UNIFIED IDEOGRAPH:'DDCB:56779:椸
CJK UNIFIED IDEOGRAPH:'DDCC:56780:楎
CJK UNIFIED IDEOGRAPH:'DDCD:56781:楢
CJK UNIFIED IDEOGRAPH:'DDCE:56782:楱
CJK UNIFIED IDEOGRAPH:'DDCF:56783:椿
CJK UNIFIED IDEOGRAPH:'DDD0:56784:楅
CJK UNIFIED IDEOGRAPH:'DDD1:56785:楪
CJK UNIFIED IDEOGRAPH:'DDD2:56786:椹
CJK UNIFIED IDEOGRAPH:'DDD3:56787:楂
CJK UNIFIED IDEOGRAPH:'DDD4:56788:楗
CJK UNIFIED IDEOGRAPH:'DDD5:56789:楙
CJK UNIFIED IDEOGRAPH:'DDD6:56790:楺
CJK UNIFIED IDEOGRAPH:'DDD7:56791:楈
CJK UNIFIED IDEOGRAPH:'DDD8:56792:楉
CJK UNIFIED IDEOGRAPH:'DDD9:56793:椵
CJK UNIFIED IDEOGRAPH:'DDDA:56794:楬
CJK UNIFIED IDEOGRAPH:'DDDB:56795:椳
CJK UNIFIED IDEOGRAPH:'DDDC:56796:椽
CJK UNIFIED IDEOGRAPH:'DDDD:56797:楥
CJK UNIFIED IDEOGRAPH:'DDDE:56798:棰
CJK UNIFIED IDEOGRAPH:'DDDF:56799:楸
CJK UNIFIED IDEOGRAPH:'DDE0:56800:椴
CJK UNIFIED IDEOGRAPH:'DDE1:56801:楩
CJK UNIFIED IDEOGRAPH:'DDE2:56802:楀
CJK UNIFIED IDEOGRAPH:'DDE3:56803:楯
CJK UNIFIED IDEOGRAPH:'DDE4:56804:楄
CJK UNIFIED IDEOGRAPH:'DDE5:56805:楶
CJK UNIFIED IDEOGRAPH:'DDE6:56806:楘
CJK UNIFIED IDEOGRAPH:'DDE7:56807:楁
CJK UNIFIED IDEOGRAPH:'DDE8:56808:楴
CJK UNIFIED IDEOGRAPH:'DDE9:56809:楌
CJK UNIFIED IDEOGRAPH:'DDEA:56810:椻
CJK UNIFIED IDEOGRAPH:'DDEB:56811:楋
CJK UNIFIED IDEOGRAPH:'DDEC:56812:椷
CJK UNIFIED IDEOGRAPH:'DDED:56813:楜
CJK UNIFIED IDEOGRAPH:'DDEE:56814:楏
CJK UNIFIED IDEOGRAPH:'DDEF:56815:楑
CJK UNIFIED IDEOGRAPH:'DDF0:56816:椲
CJK UNIFIED IDEOGRAPH:'DDF1:56817:楒
CJK UNIFIED IDEOGRAPH:'DDF2:56818:椯
CJK UNIFIED IDEOGRAPH:'DDF3:56819:楻
CJK UNIFIED IDEOGRAPH:'DDF4:56820:椼
CJK UNIFIED IDEOGRAPH:'DDF5:56821:歆
CJK UNIFIED IDEOGRAPH:'DDF6:56822:歅
CJK UNIFIED IDEOGRAPH:'DDF7:56823:歃
CJK UNIFIED IDEOGRAPH:'DDF8:56824:歂
CJK UNIFIED IDEOGRAPH:'DDF9:56825:歈
CJK UNIFIED IDEOGRAPH:'DDFA:56826:歁
CJK UNIFIED IDEOGRAPH:'DDFB:56827:殛
CJK COMPATIBILITY IDEOGRAPH:'DDFC:56828:嗀
CJK UNIFIED IDEOGRAPH:'DDFD:56829:毻
CJK UNIFIED IDEOGRAPH:'DDFE:56830:毼
CJK UNIFIED IDEOGRAPH:'DE40:56896:毹
CJK UNIFIED IDEOGRAPH:'DE41:56897:毷
CJK UNIFIED IDEOGRAPH:'DE42:56898:毸
CJK UNIFIED IDEOGRAPH:'DE43:56899:溛
CJK UNIFIED IDEOGRAPH:'DE44:56900:滖
CJK UNIFIED IDEOGRAPH:'DE45:56901:滈
CJK UNIFIED IDEOGRAPH:'DE46:56902:溏
CJK UNIFIED IDEOGRAPH:'DE47:56903:滀
CJK UNIFIED IDEOGRAPH:'DE48:56904:溟
CJK UNIFIED IDEOGRAPH:'DE49:56905:溓
CJK UNIFIED IDEOGRAPH:'DE4A:56906:溔
CJK UNIFIED IDEOGRAPH:'DE4B:56907:溠
CJK UNIFIED IDEOGRAPH:'DE4C:56908:溱
CJK UNIFIED IDEOGRAPH:'DE4D:56909:溹
CJK UNIFIED IDEOGRAPH:'DE4E:56910:滆
CJK UNIFIED IDEOGRAPH:'DE4F:56911:滒
CJK UNIFIED IDEOGRAPH:'DE50:56912:溽
CJK UNIFIED IDEOGRAPH:'DE51:56913:滁
CJK UNIFIED IDEOGRAPH:'DE52:56914:溞
CJK UNIFIED IDEOGRAPH:'DE53:56915:滉
CJK UNIFIED IDEOGRAPH:'DE54:56916:溷
CJK UNIFIED IDEOGRAPH:'DE55:56917:溰
CJK UNIFIED IDEOGRAPH:'DE56:56918:滍
CJK UNIFIED IDEOGRAPH:'DE57:56919:溦
CJK UNIFIED IDEOGRAPH:'DE58:56920:滏
CJK UNIFIED IDEOGRAPH:'DE59:56921:溲
CJK UNIFIED IDEOGRAPH:'DE5A:56922:溾
CJK UNIFIED IDEOGRAPH:'DE5B:56923:滃
CJK UNIFIED IDEOGRAPH:'DE5C:56924:滜
CJK UNIFIED IDEOGRAPH:'DE5D:56925:滘
CJK UNIFIED IDEOGRAPH:'DE5E:56926:溙
CJK UNIFIED IDEOGRAPH:'DE5F:56927:溒
CJK UNIFIED IDEOGRAPH:'DE60:56928:溎
CJK UNIFIED IDEOGRAPH:'DE61:56929:溍
CJK UNIFIED IDEOGRAPH:'DE62:56930:溤
CJK UNIFIED IDEOGRAPH:'DE63:56931:溡
CJK UNIFIED IDEOGRAPH:'DE64:56932:溿
CJK UNIFIED IDEOGRAPH:'DE65:56933:溳
CJK UNIFIED IDEOGRAPH:'DE66:56934:滐
CJK UNIFIED IDEOGRAPH:'DE67:56935:滊
CJK UNIFIED IDEOGRAPH:'DE68:56936:溗
CJK UNIFIED IDEOGRAPH:'DE69:56937:溮
CJK UNIFIED IDEOGRAPH:'DE6A:56938:溣
CJK UNIFIED IDEOGRAPH:'DE6B:56939:煇
CJK UNIFIED IDEOGRAPH:'DE6C:56940:煔
CJK UNIFIED IDEOGRAPH:'DE6D:56941:煒
CJK UNIFIED IDEOGRAPH:'DE6E:56942:煣
CJK UNIFIED IDEOGRAPH:'DE6F:56943:煠
CJK UNIFIED IDEOGRAPH:'DE70:56944:煁
CJK UNIFIED IDEOGRAPH:'DE71:56945:煝
CJK UNIFIED IDEOGRAPH:'DE72:56946:煢
CJK UNIFIED IDEOGRAPH:'DE73:56947:煲
CJK UNIFIED IDEOGRAPH:'DE74:56948:煸
CJK UNIFIED IDEOGRAPH:'DE75:56949:煪
CJK UNIFIED IDEOGRAPH:'DE76:56950:煡
CJK UNIFIED IDEOGRAPH:'DE77:56951:煂
CJK UNIFIED IDEOGRAPH:'DE78:56952:煘
CJK UNIFIED IDEOGRAPH:'DE79:56953:煃
CJK UNIFIED IDEOGRAPH:'DE7A:56954:煋
CJK UNIFIED IDEOGRAPH:'DE7B:56955:煰
CJK UNIFIED IDEOGRAPH:'DE7C:56956:煟
CJK UNIFIED IDEOGRAPH:'DE7D:56957:煐
CJK UNIFIED IDEOGRAPH:'DE7E:56958:煓
CJK UNIFIED IDEOGRAPH:'DEA1:56993:煄
CJK UNIFIED IDEOGRAPH:'DEA2:56994:煍
CJK UNIFIED IDEOGRAPH:'DEA3:56995:煚
CJK UNIFIED IDEOGRAPH:'DEA4:56996:牏
CJK UNIFIED IDEOGRAPH:'DEA5:56997:犍
CJK UNIFIED IDEOGRAPH:'DEA6:56998:犌
CJK UNIFIED IDEOGRAPH:'DEA7:56999:犑
CJK UNIFIED IDEOGRAPH:'DEA8:57000:犐
CJK UNIFIED IDEOGRAPH:'DEA9:57001:犎
CJK UNIFIED IDEOGRAPH:'DEAA:57002:猼
CJK UNIFIED IDEOGRAPH:'DEAB:57003:獂
CJK UNIFIED IDEOGRAPH:'DEAC:57004:猻
CJK UNIFIED IDEOGRAPH:'DEAD:57005:猺
CJK UNIFIED IDEOGRAPH:'DEAE:57006:獀
CJK UNIFIED IDEOGRAPH:'DEAF:57007:獊
CJK UNIFIED IDEOGRAPH:'DEB0:57008:獉
CJK UNIFIED IDEOGRAPH:'DEB1:57009:瑄
CJK UNIFIED IDEOGRAPH:'DEB2:57010:瑊
CJK UNIFIED IDEOGRAPH:'DEB3:57011:瑋
CJK UNIFIED IDEOGRAPH:'DEB4:57012:瑒
CJK UNIFIED IDEOGRAPH:'DEB5:57013:瑑
CJK UNIFIED IDEOGRAPH:'DEB6:57014:瑗
CJK UNIFIED IDEOGRAPH:'DEB7:57015:瑀
CJK UNIFIED IDEOGRAPH:'DEB8:57016:瑏
CJK UNIFIED IDEOGRAPH:'DEB9:57017:瑐
CJK UNIFIED IDEOGRAPH:'DEBA:57018:瑎
CJK UNIFIED IDEOGRAPH:'DEBB:57019:瑂
CJK UNIFIED IDEOGRAPH:'DEBC:57020:瑆
CJK UNIFIED IDEOGRAPH:'DEBD:57021:瑍
CJK UNIFIED IDEOGRAPH:'DEBE:57022:瑔
CJK UNIFIED IDEOGRAPH:'DEBF:57023:瓡
CJK UNIFIED IDEOGRAPH:'DEC0:57024:瓿
CJK UNIFIED IDEOGRAPH:'DEC1:57025:瓾
CJK UNIFIED IDEOGRAPH:'DEC2:57026:瓽
CJK UNIFIED IDEOGRAPH:'DEC3:57027:甝
CJK UNIFIED IDEOGRAPH:'DEC4:57028:畹
CJK UNIFIED IDEOGRAPH:'DEC5:57029:畷
CJK UNIFIED IDEOGRAPH:'DEC6:57030:榃
CJK UNIFIED IDEOGRAPH:'DEC7:57031:痯
CJK UNIFIED IDEOGRAPH:'DEC8:57032:瘏
CJK UNIFIED IDEOGRAPH:'DEC9:57033:瘃
CJK UNIFIED IDEOGRAPH:'DECA:57034:痷
CJK UNIFIED IDEOGRAPH:'DECB:57035:痾
CJK UNIFIED IDEOGRAPH:'DECC:57036:痼
CJK UNIFIED IDEOGRAPH:'DECD:57037:痹
CJK UNIFIED IDEOGRAPH:'DECE:57038:痸
CJK UNIFIED IDEOGRAPH:'DECF:57039:瘐
CJK UNIFIED IDEOGRAPH:'DED0:57040:痻
CJK UNIFIED IDEOGRAPH:'DED1:57041:痶
CJK UNIFIED IDEOGRAPH:'DED2:57042:痭
CJK UNIFIED IDEOGRAPH:'DED3:57043:痵
CJK UNIFIED IDEOGRAPH:'DED4:57044:痽
CJK UNIFIED IDEOGRAPH:'DED5:57045:皙
CJK UNIFIED IDEOGRAPH:'DED6:57046:皵
CJK UNIFIED IDEOGRAPH:'DED7:57047:盝
CJK UNIFIED IDEOGRAPH:'DED8:57048:睕
CJK UNIFIED IDEOGRAPH:'DED9:57049:睟
CJK UNIFIED IDEOGRAPH:'DEDA:57050:睠
CJK UNIFIED IDEOGRAPH:'DEDB:57051:睒
CJK UNIFIED IDEOGRAPH:'DEDC:57052:睖
CJK UNIFIED IDEOGRAPH:'DEDD:57053:睚
CJK UNIFIED IDEOGRAPH:'DEDE:57054:睩
CJK UNIFIED IDEOGRAPH:'DEDF:57055:睧
CJK UNIFIED IDEOGRAPH:'DEE0:57056:睔
CJK UNIFIED IDEOGRAPH:'DEE1:57057:睙
CJK UNIFIED IDEOGRAPH:'DEE2:57058:睭
CJK UNIFIED IDEOGRAPH:'DEE3:57059:矠
CJK UNIFIED IDEOGRAPH:'DEE4:57060:碇
CJK UNIFIED IDEOGRAPH:'DEE5:57061:碚
CJK UNIFIED IDEOGRAPH:'DEE6:57062:碔
CJK UNIFIED IDEOGRAPH:'DEE7:57063:碏
CJK UNIFIED IDEOGRAPH:'DEE8:57064:碄
CJK UNIFIED IDEOGRAPH:'DEE9:57065:碕
CJK UNIFIED IDEOGRAPH:'DEEA:57066:碅
CJK UNIFIED IDEOGRAPH:'DEEB:57067:碆
CJK UNIFIED IDEOGRAPH:'DEEC:57068:碡
CJK UNIFIED IDEOGRAPH:'DEED:57069:碃
CJK UNIFIED IDEOGRAPH:'DEEE:57070:硹
CJK UNIFIED IDEOGRAPH:'DEEF:57071:碙
CJK UNIFIED IDEOGRAPH:'DEF0:57072:碀
CJK UNIFIED IDEOGRAPH:'DEF1:57073:碖
CJK UNIFIED IDEOGRAPH:'DEF2:57074:硻
CJK UNIFIED IDEOGRAPH:'DEF3:57075:祼
CJK UNIFIED IDEOGRAPH:'DEF4:57076:禂
CJK UNIFIED IDEOGRAPH:'DEF5:57077:祽
CJK UNIFIED IDEOGRAPH:'DEF6:57078:祹
CJK UNIFIED IDEOGRAPH:'DEF7:57079:稑
CJK UNIFIED IDEOGRAPH:'DEF8:57080:稘
CJK UNIFIED IDEOGRAPH:'DEF9:57081:稙
CJK UNIFIED IDEOGRAPH:'DEFA:57082:稒
CJK UNIFIED IDEOGRAPH:'DEFB:57083:稗
CJK UNIFIED IDEOGRAPH:'DEFC:57084:稕
CJK UNIFIED IDEOGRAPH:'DEFD:57085:稢
CJK UNIFIED IDEOGRAPH:'DEFE:57086:稓
CJK UNIFIED IDEOGRAPH:'DF40:57152:稛
CJK UNIFIED IDEOGRAPH:'DF41:57153:稐
CJK UNIFIED IDEOGRAPH:'DF42:57154:窣
CJK UNIFIED IDEOGRAPH:'DF43:57155:窢
CJK UNIFIED IDEOGRAPH:'DF44:57156:窞
CJK UNIFIED IDEOGRAPH:'DF45:57157:竫
CJK UNIFIED IDEOGRAPH:'DF46:57158:筦
CJK UNIFIED IDEOGRAPH:'DF47:57159:筤
CJK UNIFIED IDEOGRAPH:'DF48:57160:筭
CJK UNIFIED IDEOGRAPH:'DF49:57161:筴
CJK UNIFIED IDEOGRAPH:'DF4A:57162:筩
CJK UNIFIED IDEOGRAPH:'DF4B:57163:筲
CJK UNIFIED IDEOGRAPH:'DF4C:57164:筥
CJK UNIFIED IDEOGRAPH:'DF4D:57165:筳
CJK UNIFIED IDEOGRAPH:'DF4E:57166:筱
CJK UNIFIED IDEOGRAPH:'DF4F:57167:筰
CJK UNIFIED IDEOGRAPH:'DF50:57168:筡
CJK UNIFIED IDEOGRAPH:'DF51:57169:筸
CJK UNIFIED IDEOGRAPH:'DF52:57170:筶
CJK UNIFIED IDEOGRAPH:'DF53:57171:筣
CJK UNIFIED IDEOGRAPH:'DF54:57172:粲
CJK UNIFIED IDEOGRAPH:'DF55:57173:粴
CJK UNIFIED IDEOGRAPH:'DF56:57174:粯
CJK UNIFIED IDEOGRAPH:'DF57:57175:綈
CJK UNIFIED IDEOGRAPH:'DF58:57176:綆
CJK UNIFIED IDEOGRAPH:'DF59:57177:綀
CJK UNIFIED IDEOGRAPH:'DF5A:57178:綍
CJK UNIFIED IDEOGRAPH:'DF5B:57179:絿
CJK UNIFIED IDEOGRAPH:'DF5C:57180:綅
CJK UNIFIED IDEOGRAPH:'DF5D:57181:絺
CJK UNIFIED IDEOGRAPH:'DF5E:57182:綎
CJK UNIFIED IDEOGRAPH:'DF5F:57183:絻
CJK UNIFIED IDEOGRAPH:'DF60:57184:綃
CJK UNIFIED IDEOGRAPH:'DF61:57185:絼
CJK UNIFIED IDEOGRAPH:'DF62:57186:綌
CJK UNIFIED IDEOGRAPH:'DF63:57187:綔
CJK UNIFIED IDEOGRAPH:'DF64:57188:綄
CJK UNIFIED IDEOGRAPH:'DF65:57189:絽
CJK UNIFIED IDEOGRAPH:'DF66:57190:綒
CJK UNIFIED IDEOGRAPH:'DF67:57191:罭
CJK UNIFIED IDEOGRAPH:'DF68:57192:罫
CJK UNIFIED IDEOGRAPH:'DF69:57193:罧
CJK UNIFIED IDEOGRAPH:'DF6A:57194:罨
CJK UNIFIED IDEOGRAPH:'DF6B:57195:罬
CJK UNIFIED IDEOGRAPH:'DF6C:57196:羦
CJK UNIFIED IDEOGRAPH:'DF6D:57197:羥
CJK UNIFIED IDEOGRAPH:'DF6E:57198:羧
CJK UNIFIED IDEOGRAPH:'DF6F:57199:翛
CJK UNIFIED IDEOGRAPH:'DF70:57200:翜
CJK UNIFIED IDEOGRAPH:'DF71:57201:耡
CJK UNIFIED IDEOGRAPH:'DF72:57202:腤
CJK UNIFIED IDEOGRAPH:'DF73:57203:腠
CJK UNIFIED IDEOGRAPH:'DF74:57204:腷
CJK UNIFIED IDEOGRAPH:'DF75:57205:腜
CJK UNIFIED IDEOGRAPH:'DF76:57206:腩
CJK UNIFIED IDEOGRAPH:'DF77:57207:腛
CJK UNIFIED IDEOGRAPH:'DF78:57208:腢
CJK UNIFIED IDEOGRAPH:'DF79:57209:腲
CJK UNIFIED IDEOGRAPH:'DF7A:57210:朡
CJK UNIFIED IDEOGRAPH:'DF7B:57211:腞
CJK UNIFIED IDEOGRAPH:'DF7C:57212:腶
CJK UNIFIED IDEOGRAPH:'DF7D:57213:腧
CJK UNIFIED IDEOGRAPH:'DF7E:57214:腯
CJK UNIFIED IDEOGRAPH:'DFA1:57249:腄
CJK UNIFIED IDEOGRAPH:'DFA2:57250:腡
CJK UNIFIED IDEOGRAPH:'DFA3:57251:舝
CJK UNIFIED IDEOGRAPH:'DFA4:57252:艉
CJK UNIFIED IDEOGRAPH:'DFA5:57253:艄
CJK UNIFIED IDEOGRAPH:'DFA6:57254:艀
CJK UNIFIED IDEOGRAPH:'DFA7:57255:艂
CJK UNIFIED IDEOGRAPH:'DFA8:57256:艅
CJK UNIFIED IDEOGRAPH:'DFA9:57257:蓱
CJK UNIFIED IDEOGRAPH:'DFAA:57258:萿
CJK UNIFIED IDEOGRAPH:'DFAB:57259:葖
CJK UNIFIED IDEOGRAPH:'DFAC:57260:葶
CJK UNIFIED IDEOGRAPH:'DFAD:57261:葹
CJK UNIFIED IDEOGRAPH:'DFAE:57262:蒏
CJK UNIFIED IDEOGRAPH:'DFAF:57263:蒍
CJK UNIFIED IDEOGRAPH:'DFB0:57264:葥
CJK UNIFIED IDEOGRAPH:'DFB1:57265:葑
CJK UNIFIED IDEOGRAPH:'DFB2:57266:葀
CJK UNIFIED IDEOGRAPH:'DFB3:57267:蒆
CJK UNIFIED IDEOGRAPH:'DFB4:57268:葧
CJK UNIFIED IDEOGRAPH:'DFB5:57269:萰
CJK UNIFIED IDEOGRAPH:'DFB6:57270:葍
CJK UNIFIED IDEOGRAPH:'DFB7:57271:葽
CJK UNIFIED IDEOGRAPH:'DFB8:57272:葚
CJK UNIFIED IDEOGRAPH:'DFB9:57273:葙
CJK UNIFIED IDEOGRAPH:'DFBA:57274:葴
CJK UNIFIED IDEOGRAPH:'DFBB:57275:葳
CJK UNIFIED IDEOGRAPH:'DFBC:57276:葝
CJK UNIFIED IDEOGRAPH:'DFBD:57277:蔇
CJK UNIFIED IDEOGRAPH:'DFBE:57278:葞
CJK UNIFIED IDEOGRAPH:'DFBF:57279:萷
CJK UNIFIED IDEOGRAPH:'DFC0:57280:萺
CJK UNIFIED IDEOGRAPH:'DFC1:57281:萴
CJK UNIFIED IDEOGRAPH:'DFC2:57282:葺
CJK UNIFIED IDEOGRAPH:'DFC3:57283:葃
CJK UNIFIED IDEOGRAPH:'DFC4:57284:葸
CJK UNIFIED IDEOGRAPH:'DFC5:57285:萲
CJK UNIFIED IDEOGRAPH:'DFC6:57286:葅
CJK UNIFIED IDEOGRAPH:'DFC7:57287:萩
CJK UNIFIED IDEOGRAPH:'DFC8:57288:菙
CJK UNIFIED IDEOGRAPH:'DFC9:57289:葋
CJK UNIFIED IDEOGRAPH:'DFCA:57290:萯
CJK UNIFIED IDEOGRAPH:'DFCB:57291:葂
CJK UNIFIED IDEOGRAPH:'DFCC:57292:萭
CJK UNIFIED IDEOGRAPH:'DFCD:57293:葟
CJK UNIFIED IDEOGRAPH:'DFCE:57294:葰
CJK UNIFIED IDEOGRAPH:'DFCF:57295:萹
CJK UNIFIED IDEOGRAPH:'DFD0:57296:葎
CJK UNIFIED IDEOGRAPH:'DFD1:57297:葌
CJK UNIFIED IDEOGRAPH:'DFD2:57298:葒
CJK UNIFIED IDEOGRAPH:'DFD3:57299:葯
CJK UNIFIED IDEOGRAPH:'DFD4:57300:蓅
CJK UNIFIED IDEOGRAPH:'DFD5:57301:蒎
CJK UNIFIED IDEOGRAPH:'DFD6:57302:萻
CJK UNIFIED IDEOGRAPH:'DFD7:57303:葇
CJK UNIFIED IDEOGRAPH:'DFD8:57304:萶
CJK UNIFIED IDEOGRAPH:'DFD9:57305:萳
CJK UNIFIED IDEOGRAPH:'DFDA:57306:葨
CJK UNIFIED IDEOGRAPH:'DFDB:57307:葾
CJK UNIFIED IDEOGRAPH:'DFDC:57308:葄
CJK UNIFIED IDEOGRAPH:'DFDD:57309:萫
CJK UNIFIED IDEOGRAPH:'DFDE:57310:葠
CJK UNIFIED IDEOGRAPH:'DFDF:57311:葔
CJK UNIFIED IDEOGRAPH:'DFE0:57312:葮
CJK UNIFIED IDEOGRAPH:'DFE1:57313:葐
CJK UNIFIED IDEOGRAPH:'DFE2:57314:蜋
CJK UNIFIED IDEOGRAPH:'DFE3:57315:蜄
CJK UNIFIED IDEOGRAPH:'DFE4:57316:蛷
CJK UNIFIED IDEOGRAPH:'DFE5:57317:蜌
CJK UNIFIED IDEOGRAPH:'DFE6:57318:蛺
CJK UNIFIED IDEOGRAPH:'DFE7:57319:蛖
CJK UNIFIED IDEOGRAPH:'DFE8:57320:蛵
CJK UNIFIED IDEOGRAPH:'DFE9:57321:蝍
CJK UNIFIED IDEOGRAPH:'DFEA:57322:蛸
CJK UNIFIED IDEOGRAPH:'DFEB:57323:蜎
CJK UNIFIED IDEOGRAPH:'DFEC:57324:蜉
CJK UNIFIED IDEOGRAPH:'DFED:57325:蜁
CJK UNIFIED IDEOGRAPH:'DFEE:57326:蛶
CJK UNIFIED IDEOGRAPH:'DFEF:57327:蜍
CJK UNIFIED IDEOGRAPH:'DFF0:57328:蜅
CJK UNIFIED IDEOGRAPH:'DFF1:57329:裖
CJK UNIFIED IDEOGRAPH:'DFF2:57330:裋
CJK UNIFIED IDEOGRAPH:'DFF3:57331:裍
CJK UNIFIED IDEOGRAPH:'DFF4:57332:裎
CJK UNIFIED IDEOGRAPH:'DFF5:57333:裞
CJK UNIFIED IDEOGRAPH:'DFF6:57334:裛
CJK UNIFIED IDEOGRAPH:'DFF7:57335:裚
CJK UNIFIED IDEOGRAPH:'DFF8:57336:裌
CJK UNIFIED IDEOGRAPH:'DFF9:57337:裐
CJK UNIFIED IDEOGRAPH:'DFFA:57338:覅
CJK UNIFIED IDEOGRAPH:'DFFB:57339:覛
CJK UNIFIED IDEOGRAPH:'DFFC:57340:觟
CJK UNIFIED IDEOGRAPH:'DFFD:57341:觥
CJK UNIFIED IDEOGRAPH:'DFFE:57342:觤
CJK UNIFIED IDEOGRAPH:'E040:57408:觡
CJK UNIFIED IDEOGRAPH:'E041:57409:觠
CJK UNIFIED IDEOGRAPH:'E042:57410:觢
CJK UNIFIED IDEOGRAPH:'E043:57411:觜
CJK UNIFIED IDEOGRAPH:'E044:57412:触
CJK UNIFIED IDEOGRAPH:'E045:57413:詶
CJK UNIFIED IDEOGRAPH:'E046:57414:誆
CJK UNIFIED IDEOGRAPH:'E047:57415:詿
CJK UNIFIED IDEOGRAPH:'E048:57416:詡
CJK UNIFIED IDEOGRAPH:'E049:57417:訿
CJK UNIFIED IDEOGRAPH:'E04A:57418:詷
CJK UNIFIED IDEOGRAPH:'E04B:57419:誂
CJK UNIFIED IDEOGRAPH:'E04C:57420:誄
CJK UNIFIED IDEOGRAPH:'E04D:57421:詵
CJK UNIFIED IDEOGRAPH:'E04E:57422:誃
CJK UNIFIED IDEOGRAPH:'E04F:57423:誁
CJK UNIFIED IDEOGRAPH:'E050:57424:詴
CJK UNIFIED IDEOGRAPH:'E051:57425:詺
CJK UNIFIED IDEOGRAPH:'E052:57426:谼
CJK UNIFIED IDEOGRAPH:'E053:57427:豋
CJK UNIFIED IDEOGRAPH:'E054:57428:豊
CJK UNIFIED IDEOGRAPH:'E055:57429:豥
CJK UNIFIED IDEOGRAPH:'E056:57430:豤
CJK UNIFIED IDEOGRAPH:'E057:57431:豦
CJK UNIFIED IDEOGRAPH:'E058:57432:貆
CJK UNIFIED IDEOGRAPH:'E059:57433:貄
CJK UNIFIED IDEOGRAPH:'E05A:57434:貅
CJK UNIFIED IDEOGRAPH:'E05B:57435:賌
CJK UNIFIED IDEOGRAPH:'E05C:57436:赨
CJK UNIFIED IDEOGRAPH:'E05D:57437:赩
CJK UNIFIED IDEOGRAPH:'E05E:57438:趑
CJK UNIFIED IDEOGRAPH:'E05F:57439:趌
CJK UNIFIED IDEOGRAPH:'E060:57440:趎
CJK UNIFIED IDEOGRAPH:'E061:57441:趏
CJK UNIFIED IDEOGRAPH:'E062:57442:趍
CJK UNIFIED IDEOGRAPH:'E063:57443:趓
CJK UNIFIED IDEOGRAPH:'E064:57444:趔
CJK UNIFIED IDEOGRAPH:'E065:57445:趐
CJK UNIFIED IDEOGRAPH:'E066:57446:趒
CJK UNIFIED IDEOGRAPH:'E067:57447:跰
CJK UNIFIED IDEOGRAPH:'E068:57448:跠
CJK UNIFIED IDEOGRAPH:'E069:57449:跬
CJK UNIFIED IDEOGRAPH:'E06A:57450:跱
CJK UNIFIED IDEOGRAPH:'E06B:57451:跮
CJK UNIFIED IDEOGRAPH:'E06C:57452:跐
CJK UNIFIED IDEOGRAPH:'E06D:57453:跩
CJK UNIFIED IDEOGRAPH:'E06E:57454:跣
CJK UNIFIED IDEOGRAPH:'E06F:57455:跢
CJK UNIFIED IDEOGRAPH:'E070:57456:跧
CJK UNIFIED IDEOGRAPH:'E071:57457:跲
CJK UNIFIED IDEOGRAPH:'E072:57458:跫
CJK UNIFIED IDEOGRAPH:'E073:57459:跴
CJK UNIFIED IDEOGRAPH:'E074:57460:輆
CJK UNIFIED IDEOGRAPH:'E075:57461:軿
CJK UNIFIED IDEOGRAPH:'E076:57462:輁
CJK UNIFIED IDEOGRAPH:'E077:57463:輀
CJK UNIFIED IDEOGRAPH:'E078:57464:輅
CJK UNIFIED IDEOGRAPH:'E079:57465:輇
CJK UNIFIED IDEOGRAPH:'E07A:57466:輈
CJK UNIFIED IDEOGRAPH:'E07B:57467:輂
CJK UNIFIED IDEOGRAPH:'E07C:57468:輋
CJK UNIFIED IDEOGRAPH:'E07D:57469:遒
CJK UNIFIED IDEOGRAPH:'E07E:57470:逿
CJK UNIFIED IDEOGRAPH:'E0A1:57505:遄
CJK UNIFIED IDEOGRAPH:'E0A2:57506:遉
CJK UNIFIED IDEOGRAPH:'E0A3:57507:逽
CJK UNIFIED IDEOGRAPH:'E0A4:57508:鄐
CJK UNIFIED IDEOGRAPH:'E0A5:57509:鄍
CJK UNIFIED IDEOGRAPH:'E0A6:57510:鄏
CJK UNIFIED IDEOGRAPH:'E0A7:57511:鄑
CJK UNIFIED IDEOGRAPH:'E0A8:57512:鄖
CJK UNIFIED IDEOGRAPH:'E0A9:57513:鄔
CJK UNIFIED IDEOGRAPH:'E0AA:57514:鄋
CJK UNIFIED IDEOGRAPH:'E0AB:57515:鄎
CJK UNIFIED IDEOGRAPH:'E0AC:57516:酮
CJK UNIFIED IDEOGRAPH:'E0AD:57517:酯
CJK UNIFIED IDEOGRAPH:'E0AE:57518:鉈
CJK UNIFIED IDEOGRAPH:'E0AF:57519:鉒
CJK UNIFIED IDEOGRAPH:'E0B0:57520:鈰
CJK UNIFIED IDEOGRAPH:'E0B1:57521:鈺
CJK UNIFIED IDEOGRAPH:'E0B2:57522:鉦
CJK UNIFIED IDEOGRAPH:'E0B3:57523:鈳
CJK UNIFIED IDEOGRAPH:'E0B4:57524:鉥
CJK UNIFIED IDEOGRAPH:'E0B5:57525:鉞
CJK UNIFIED IDEOGRAPH:'E0B6:57526:銃
CJK UNIFIED IDEOGRAPH:'E0B7:57527:鈮
CJK UNIFIED IDEOGRAPH:'E0B8:57528:鉊
CJK UNIFIED IDEOGRAPH:'E0B9:57529:鉆
CJK UNIFIED IDEOGRAPH:'E0BA:57530:鉭
CJK UNIFIED IDEOGRAPH:'E0BB:57531:鉬
CJK UNIFIED IDEOGRAPH:'E0BC:57532:鉏
CJK UNIFIED IDEOGRAPH:'E0BD:57533:鉠
CJK UNIFIED IDEOGRAPH:'E0BE:57534:鉧
CJK UNIFIED IDEOGRAPH:'E0BF:57535:鉯
CJK UNIFIED IDEOGRAPH:'E0C0:57536:鈶
CJK UNIFIED IDEOGRAPH:'E0C1:57537:鉡
CJK UNIFIED IDEOGRAPH:'E0C2:57538:鉰
CJK UNIFIED IDEOGRAPH:'E0C3:57539:鈱
CJK UNIFIED IDEOGRAPH:'E0C4:57540:鉔
CJK UNIFIED IDEOGRAPH:'E0C5:57541:鉣
CJK UNIFIED IDEOGRAPH:'E0C6:57542:鉐
CJK UNIFIED IDEOGRAPH:'E0C7:57543:鉲
CJK UNIFIED IDEOGRAPH:'E0C8:57544:鉎
CJK UNIFIED IDEOGRAPH:'E0C9:57545:鉓
CJK UNIFIED IDEOGRAPH:'E0CA:57546:鉌
CJK UNIFIED IDEOGRAPH:'E0CB:57547:鉖
CJK UNIFIED IDEOGRAPH:'E0CC:57548:鈲
CJK UNIFIED IDEOGRAPH:'E0CD:57549:閟
CJK UNIFIED IDEOGRAPH:'E0CE:57550:閜
CJK UNIFIED IDEOGRAPH:'E0CF:57551:閞
CJK UNIFIED IDEOGRAPH:'E0D0:57552:閛
CJK UNIFIED IDEOGRAPH:'E0D1:57553:隒
CJK UNIFIED IDEOGRAPH:'E0D2:57554:隓
CJK UNIFIED IDEOGRAPH:'E0D3:57555:隑
CJK UNIFIED IDEOGRAPH:'E0D4:57556:隗
CJK UNIFIED IDEOGRAPH:'E0D5:57557:雎
CJK UNIFIED IDEOGRAPH:'E0D6:57558:雺
CJK UNIFIED IDEOGRAPH:'E0D7:57559:雽
CJK UNIFIED IDEOGRAPH:'E0D8:57560:雸
CJK UNIFIED IDEOGRAPH:'E0D9:57561:雵
CJK UNIFIED IDEOGRAPH:'E0DA:57562:靳
CJK UNIFIED IDEOGRAPH:'E0DB:57563:靷
CJK UNIFIED IDEOGRAPH:'E0DC:57564:靸
CJK UNIFIED IDEOGRAPH:'E0DD:57565:靲
CJK UNIFIED IDEOGRAPH:'E0DE:57566:頏
CJK UNIFIED IDEOGRAPH:'E0DF:57567:頍
CJK UNIFIED IDEOGRAPH:'E0E0:57568:頎
CJK UNIFIED IDEOGRAPH:'E0E1:57569:颬
CJK UNIFIED IDEOGRAPH:'E0E2:57570:飶
CJK UNIFIED IDEOGRAPH:'E0E3:57571:飹
CJK UNIFIED IDEOGRAPH:'E0E4:57572:馯
CJK UNIFIED IDEOGRAPH:'E0E5:57573:馲
CJK UNIFIED IDEOGRAPH:'E0E6:57574:馰
CJK UNIFIED IDEOGRAPH:'E0E7:57575:馵
CJK UNIFIED IDEOGRAPH:'E0E8:57576:骭
CJK UNIFIED IDEOGRAPH:'E0E9:57577:骫
CJK UNIFIED IDEOGRAPH:'E0EA:57578:魛
CJK UNIFIED IDEOGRAPH:'E0EB:57579:鳪
CJK UNIFIED IDEOGRAPH:'E0EC:57580:鳭
CJK UNIFIED IDEOGRAPH:'E0ED:57581:鳧
CJK UNIFIED IDEOGRAPH:'E0EE:57582:麀
CJK UNIFIED IDEOGRAPH:'E0EF:57583:黽
CJK UNIFIED IDEOGRAPH:'E0F0:57584:僦
CJK UNIFIED IDEOGRAPH:'E0F1:57585:僔
CJK UNIFIED IDEOGRAPH:'E0F2:57586:僗
CJK UNIFIED IDEOGRAPH:'E0F3:57587:僨
CJK UNIFIED IDEOGRAPH:'E0F4:57588:僳
CJK UNIFIED IDEOGRAPH:'E0F5:57589:僛
CJK UNIFIED IDEOGRAPH:'E0F6:57590:僪
CJK UNIFIED IDEOGRAPH:'E0F7:57591:僝
CJK UNIFIED IDEOGRAPH:'E0F8:57592:僤
CJK UNIFIED IDEOGRAPH:'E0F9:57593:僓
CJK UNIFIED IDEOGRAPH:'E0FA:57594:僬
CJK UNIFIED IDEOGRAPH:'E0FB:57595:僰
CJK UNIFIED IDEOGRAPH:'E0FC:57596:僯
CJK UNIFIED IDEOGRAPH:'E0FD:57597:僣
CJK UNIFIED IDEOGRAPH:'E0FE:57598:僠
CJK UNIFIED IDEOGRAPH:'E140:57664:凘
CJK UNIFIED IDEOGRAPH:'E141:57665:劀
CJK UNIFIED IDEOGRAPH:'E142:57666:劁
CJK UNIFIED IDEOGRAPH:'E143:57667:勩
CJK UNIFIED IDEOGRAPH:'E144:57668:勫
CJK UNIFIED IDEOGRAPH:'E145:57669:匰
CJK UNIFIED IDEOGRAPH:'E146:57670:厬
CJK UNIFIED IDEOGRAPH:'E147:57671:嘧
CJK UNIFIED IDEOGRAPH:'E148:57672:嘕
CJK UNIFIED IDEOGRAPH:'E149:57673:嘌
CJK UNIFIED IDEOGRAPH:'E14A:57674:嘒
CJK UNIFIED IDEOGRAPH:'E14B:57675:嗼
CJK UNIFIED IDEOGRAPH:'E14C:57676:嘏
CJK UNIFIED IDEOGRAPH:'E14D:57677:嘜
CJK UNIFIED IDEOGRAPH:'E14E:57678:嘁
CJK UNIFIED IDEOGRAPH:'E14F:57679:嘓
CJK UNIFIED IDEOGRAPH:'E150:57680:嘂
CJK UNIFIED IDEOGRAPH:'E151:57681:嗺
CJK UNIFIED IDEOGRAPH:'E152:57682:嘝
CJK UNIFIED IDEOGRAPH:'E153:57683:嘄
CJK UNIFIED IDEOGRAPH:'E154:57684:嗿
CJK UNIFIED IDEOGRAPH:'E155:57685:嗹
CJK UNIFIED IDEOGRAPH:'E156:57686:墉
CJK UNIFIED IDEOGRAPH:'E157:57687:塼
CJK UNIFIED IDEOGRAPH:'E158:57688:墐
CJK UNIFIED IDEOGRAPH:'E159:57689:墘
CJK UNIFIED IDEOGRAPH:'E15A:57690:墆
CJK UNIFIED IDEOGRAPH:'E15B:57691:墁
CJK UNIFIED IDEOGRAPH:'E15C:57692:塿
CJK UNIFIED IDEOGRAPH:'E15D:57693:塴
CJK UNIFIED IDEOGRAPH:'E15E:57694:墋
CJK UNIFIED IDEOGRAPH:'E15F:57695:塺
CJK UNIFIED IDEOGRAPH:'E160:57696:墇
CJK UNIFIED IDEOGRAPH:'E161:57697:墑
CJK UNIFIED IDEOGRAPH:'E162:57698:墎
CJK UNIFIED IDEOGRAPH:'E163:57699:塶
CJK UNIFIED IDEOGRAPH:'E164:57700:墂
CJK UNIFIED IDEOGRAPH:'E165:57701:墈
CJK UNIFIED IDEOGRAPH:'E166:57702:塻
CJK UNIFIED IDEOGRAPH:'E167:57703:墔
CJK UNIFIED IDEOGRAPH:'E168:57704:墏
CJK UNIFIED IDEOGRAPH:'E169:57705:壾
CJK UNIFIED IDEOGRAPH:'E16A:57706:奫
CJK UNIFIED IDEOGRAPH:'E16B:57707:嫜
CJK UNIFIED IDEOGRAPH:'E16C:57708:嫮
CJK UNIFIED IDEOGRAPH:'E16D:57709:嫥
CJK UNIFIED IDEOGRAPH:'E16E:57710:嫕
CJK UNIFIED IDEOGRAPH:'E16F:57711:嫪
CJK UNIFIED IDEOGRAPH:'E170:57712:嫚
CJK UNIFIED IDEOGRAPH:'E171:57713:嫭
CJK UNIFIED IDEOGRAPH:'E172:57714:嫫
CJK UNIFIED IDEOGRAPH:'E173:57715:嫳
CJK UNIFIED IDEOGRAPH:'E174:57716:嫢
CJK UNIFIED IDEOGRAPH:'E175:57717:嫠
CJK UNIFIED IDEOGRAPH:'E176:57718:嫛
CJK UNIFIED IDEOGRAPH:'E177:57719:嫬
CJK UNIFIED IDEOGRAPH:'E178:57720:嫞
CJK UNIFIED IDEOGRAPH:'E179:57721:嫝
CJK UNIFIED IDEOGRAPH:'E17A:57722:嫙
CJK UNIFIED IDEOGRAPH:'E17B:57723:嫨
CJK UNIFIED IDEOGRAPH:'E17C:57724:嫟
CJK UNIFIED IDEOGRAPH:'E17D:57725:孷
CJK UNIFIED IDEOGRAPH:'E17E:57726:寠
CJK UNIFIED IDEOGRAPH:'E1A1:57761:寣
CJK UNIFIED IDEOGRAPH:'E1A2:57762:屣
CJK UNIFIED IDEOGRAPH:'E1A3:57763:嶂
CJK UNIFIED IDEOGRAPH:'E1A4:57764:嶀
CJK UNIFIED IDEOGRAPH:'E1A5:57765:嵽
CJK UNIFIED IDEOGRAPH:'E1A6:57766:嶆
CJK UNIFIED IDEOGRAPH:'E1A7:57767:嵺
CJK UNIFIED IDEOGRAPH:'E1A8:57768:嶁
CJK UNIFIED IDEOGRAPH:'E1A9:57769:嵷
CJK UNIFIED IDEOGRAPH:'E1AA:57770:嶊
CJK UNIFIED IDEOGRAPH:'E1AB:57771:嶉
CJK UNIFIED IDEOGRAPH:'E1AC:57772:嶈
CJK UNIFIED IDEOGRAPH:'E1AD:57773:嵾
CJK UNIFIED IDEOGRAPH:'E1AE:57774:嵼
CJK UNIFIED IDEOGRAPH:'E1AF:57775:嶍
CJK UNIFIED IDEOGRAPH:'E1B0:57776:嵹
CJK UNIFIED IDEOGRAPH:'E1B1:57777:嵿
CJK UNIFIED IDEOGRAPH:'E1B2:57778:幘
CJK UNIFIED IDEOGRAPH:'E1B3:57779:幙
CJK UNIFIED IDEOGRAPH:'E1B4:57780:幓
CJK UNIFIED IDEOGRAPH:'E1B5:57781:廘
CJK UNIFIED IDEOGRAPH:'E1B6:57782:廑
CJK UNIFIED IDEOGRAPH:'E1B7:57783:廗
CJK UNIFIED IDEOGRAPH:'E1B8:57784:廎
CJK UNIFIED IDEOGRAPH:'E1B9:57785:廜
CJK UNIFIED IDEOGRAPH:'E1BA:57786:廕
CJK UNIFIED IDEOGRAPH:'E1BB:57787:廙
CJK UNIFIED IDEOGRAPH:'E1BC:57788:廒
CJK UNIFIED IDEOGRAPH:'E1BD:57789:廔
CJK UNIFIED IDEOGRAPH:'E1BE:57790:彄
CJK UNIFIED IDEOGRAPH:'E1BF:57791:彃
CJK UNIFIED IDEOGRAPH:'E1C0:57792:彯
CJK UNIFIED IDEOGRAPH:'E1C1:57793:徶
CJK UNIFIED IDEOGRAPH:'E1C2:57794:愬
CJK UNIFIED IDEOGRAPH:'E1C3:57795:愨
CJK UNIFIED IDEOGRAPH:'E1C4:57796:慁
CJK UNIFIED IDEOGRAPH:'E1C5:57797:慞
CJK UNIFIED IDEOGRAPH:'E1C6:57798:慱
CJK UNIFIED IDEOGRAPH:'E1C7:57799:慳
CJK UNIFIED IDEOGRAPH:'E1C8:57800:慒
CJK UNIFIED IDEOGRAPH:'E1C9:57801:慓
CJK UNIFIED IDEOGRAPH:'E1CA:57802:慲
CJK UNIFIED IDEOGRAPH:'E1CB:57803:慬
CJK UNIFIED IDEOGRAPH:'E1CC:57804:憀
CJK UNIFIED IDEOGRAPH:'E1CD:57805:慴
CJK UNIFIED IDEOGRAPH:'E1CE:57806:慔
CJK UNIFIED IDEOGRAPH:'E1CF:57807:慺
CJK UNIFIED IDEOGRAPH:'E1D0:57808:慛
CJK UNIFIED IDEOGRAPH:'E1D1:57809:慥
CJK UNIFIED IDEOGRAPH:'E1D2:57810:愻
CJK UNIFIED IDEOGRAPH:'E1D3:57811:慪
CJK UNIFIED IDEOGRAPH:'E1D4:57812:慡
CJK UNIFIED IDEOGRAPH:'E1D5:57813:慖
CJK UNIFIED IDEOGRAPH:'E1D6:57814:戩
CJK UNIFIED IDEOGRAPH:'E1D7:57815:戧
CJK UNIFIED IDEOGRAPH:'E1D8:57816:戫
CJK UNIFIED IDEOGRAPH:'E1D9:57817:搫
CJK UNIFIED IDEOGRAPH:'E1DA:57818:摍
CJK UNIFIED IDEOGRAPH:'E1DB:57819:摛
CJK UNIFIED IDEOGRAPH:'E1DC:57820:摝
CJK UNIFIED IDEOGRAPH:'E1DD:57821:摴
CJK UNIFIED IDEOGRAPH:'E1DE:57822:摶
CJK UNIFIED IDEOGRAPH:'E1DF:57823:摲
CJK UNIFIED IDEOGRAPH:'E1E0:57824:摳
CJK UNIFIED IDEOGRAPH:'E1E1:57825:摽
CJK UNIFIED IDEOGRAPH:'E1E2:57826:摵
CJK UNIFIED IDEOGRAPH:'E1E3:57827:摦
CJK UNIFIED IDEOGRAPH:'E1E4:57828:撦
CJK UNIFIED IDEOGRAPH:'E1E5:57829:摎
CJK UNIFIED IDEOGRAPH:'E1E6:57830:撂
CJK UNIFIED IDEOGRAPH:'E1E7:57831:摞
CJK UNIFIED IDEOGRAPH:'E1E8:57832:摜
CJK UNIFIED IDEOGRAPH:'E1E9:57833:摋
CJK UNIFIED IDEOGRAPH:'E1EA:57834:摓
CJK UNIFIED IDEOGRAPH:'E1EB:57835:摠
CJK UNIFIED IDEOGRAPH:'E1EC:57836:摐
CJK UNIFIED IDEOGRAPH:'E1ED:57837:摿
CJK UNIFIED IDEOGRAPH:'E1EE:57838:搿
CJK UNIFIED IDEOGRAPH:'E1EF:57839:摬
CJK UNIFIED IDEOGRAPH:'E1F0:57840:摫
CJK UNIFIED IDEOGRAPH:'E1F1:57841:摙
CJK UNIFIED IDEOGRAPH:'E1F2:57842:摥
CJK UNIFIED IDEOGRAPH:'E1F3:57843:摷
CJK UNIFIED IDEOGRAPH:'E1F4:57844:敳
CJK UNIFIED IDEOGRAPH:'E1F5:57845:斠
CJK UNIFIED IDEOGRAPH:'E1F6:57846:暡
CJK UNIFIED IDEOGRAPH:'E1F7:57847:暠
CJK UNIFIED IDEOGRAPH:'E1F8:57848:暟
CJK UNIFIED IDEOGRAPH:'E1F9:57849:朅
CJK UNIFIED IDEOGRAPH:'E1FA:57850:朄
CJK UNIFIED IDEOGRAPH:'E1FB:57851:朢
CJK UNIFIED IDEOGRAPH:'E1FC:57852:榱
CJK UNIFIED IDEOGRAPH:'E1FD:57853:榶
CJK UNIFIED IDEOGRAPH:'E1FE:57854:槉
CJK UNIFIED IDEOGRAPH:'E240:57920:榠
CJK UNIFIED IDEOGRAPH:'E241:57921:槎
CJK UNIFIED IDEOGRAPH:'E242:57922:榖
CJK UNIFIED IDEOGRAPH:'E243:57923:榰
CJK UNIFIED IDEOGRAPH:'E244:57924:榬
CJK UNIFIED IDEOGRAPH:'E245:57925:榼
CJK UNIFIED IDEOGRAPH:'E246:57926:榑
CJK UNIFIED IDEOGRAPH:'E247:57927:榙
CJK UNIFIED IDEOGRAPH:'E248:57928:榎
CJK UNIFIED IDEOGRAPH:'E249:57929:榧
CJK UNIFIED IDEOGRAPH:'E24A:57930:榍
CJK UNIFIED IDEOGRAPH:'E24B:57931:榩
CJK UNIFIED IDEOGRAPH:'E24C:57932:榾
CJK UNIFIED IDEOGRAPH:'E24D:57933:榯
CJK UNIFIED IDEOGRAPH:'E24E:57934:榿
CJK UNIFIED IDEOGRAPH:'E24F:57935:槄
CJK UNIFIED IDEOGRAPH:'E250:57936:榽
CJK UNIFIED IDEOGRAPH:'E251:57937:榤
CJK UNIFIED IDEOGRAPH:'E252:57938:槔
CJK UNIFIED IDEOGRAPH:'E253:57939:榹
CJK UNIFIED IDEOGRAPH:'E254:57940:槊
CJK UNIFIED IDEOGRAPH:'E255:57941:榚
CJK UNIFIED IDEOGRAPH:'E256:57942:槏
CJK UNIFIED IDEOGRAPH:'E257:57943:榳
CJK UNIFIED IDEOGRAPH:'E258:57944:榓
CJK UNIFIED IDEOGRAPH:'E259:57945:榪
CJK UNIFIED IDEOGRAPH:'E25A:57946:榡
CJK UNIFIED IDEOGRAPH:'E25B:57947:榞
CJK UNIFIED IDEOGRAPH:'E25C:57948:槙
CJK UNIFIED IDEOGRAPH:'E25D:57949:榗
CJK UNIFIED IDEOGRAPH:'E25E:57950:榐
CJK UNIFIED IDEOGRAPH:'E25F:57951:槂
CJK UNIFIED IDEOGRAPH:'E260:57952:榵
CJK UNIFIED IDEOGRAPH:'E261:57953:榥
CJK UNIFIED IDEOGRAPH:'E262:57954:槆
CJK UNIFIED IDEOGRAPH:'E263:57955:歊
CJK UNIFIED IDEOGRAPH:'E264:57956:歍
CJK UNIFIED IDEOGRAPH:'E265:57957:歋
CJK UNIFIED IDEOGRAPH:'E266:57958:殞
CJK UNIFIED IDEOGRAPH:'E267:57959:殟
CJK UNIFIED IDEOGRAPH:'E268:57960:殠
CJK UNIFIED IDEOGRAPH:'E269:57961:毃
CJK UNIFIED IDEOGRAPH:'E26A:57962:毄
CJK UNIFIED IDEOGRAPH:'E26B:57963:毾
CJK UNIFIED IDEOGRAPH:'E26C:57964:滎
CJK UNIFIED IDEOGRAPH:'E26D:57965:滵
CJK UNIFIED IDEOGRAPH:'E26E:57966:滱
CJK UNIFIED IDEOGRAPH:'E26F:57967:漃
CJK UNIFIED IDEOGRAPH:'E270:57968:漥
CJK UNIFIED IDEOGRAPH:'E271:57969:滸
CJK UNIFIED IDEOGRAPH:'E272:57970:漷
CJK UNIFIED IDEOGRAPH:'E273:57971:滻
CJK UNIFIED IDEOGRAPH:'E274:57972:漮
CJK UNIFIED IDEOGRAPH:'E275:57973:漉
CJK UNIFIED IDEOGRAPH:'E276:57974:潎
CJK UNIFIED IDEOGRAPH:'E277:57975:漙
CJK UNIFIED IDEOGRAPH:'E278:57976:漚
CJK UNIFIED IDEOGRAPH:'E279:57977:漧
CJK UNIFIED IDEOGRAPH:'E27A:57978:漘
CJK UNIFIED IDEOGRAPH:'E27B:57979:漻
CJK UNIFIED IDEOGRAPH:'E27C:57980:漒
CJK UNIFIED IDEOGRAPH:'E27D:57981:滭
CJK UNIFIED IDEOGRAPH:'E27E:57982:漊
CJK UNIFIED IDEOGRAPH:'E2A1:58017:漶
CJK UNIFIED IDEOGRAPH:'E2A2:58018:潳
CJK UNIFIED IDEOGRAPH:'E2A3:58019:滹
CJK UNIFIED IDEOGRAPH:'E2A4:58020:滮
CJK UNIFIED IDEOGRAPH:'E2A5:58021:漭
CJK UNIFIED IDEOGRAPH:'E2A6:58022:潀
CJK UNIFIED IDEOGRAPH:'E2A7:58023:漰
CJK UNIFIED IDEOGRAPH:'E2A8:58024:漼
CJK UNIFIED IDEOGRAPH:'E2A9:58025:漵
CJK UNIFIED IDEOGRAPH:'E2AA:58026:滫
CJK UNIFIED IDEOGRAPH:'E2AB:58027:漇
CJK UNIFIED IDEOGRAPH:'E2AC:58028:漎
CJK UNIFIED IDEOGRAPH:'E2AD:58029:潃
CJK UNIFIED IDEOGRAPH:'E2AE:58030:漅
CJK UNIFIED IDEOGRAPH:'E2AF:58031:滽
CJK UNIFIED IDEOGRAPH:'E2B0:58032:滶
CJK UNIFIED IDEOGRAPH:'E2B1:58033:漹
CJK UNIFIED IDEOGRAPH:'E2B2:58034:漜
CJK UNIFIED IDEOGRAPH:'E2B3:58035:滼
CJK UNIFIED IDEOGRAPH:'E2B4:58036:漺
CJK UNIFIED IDEOGRAPH:'E2B5:58037:漟
CJK UNIFIED IDEOGRAPH:'E2B6:58038:漍
CJK UNIFIED IDEOGRAPH:'E2B7:58039:漞
CJK UNIFIED IDEOGRAPH:'E2B8:58040:漈
CJK UNIFIED IDEOGRAPH:'E2B9:58041:漡
CJK UNIFIED IDEOGRAPH:'E2BA:58042:熇
CJK UNIFIED IDEOGRAPH:'E2BB:58043:熐
CJK UNIFIED IDEOGRAPH:'E2BC:58044:熉
CJK UNIFIED IDEOGRAPH:'E2BD:58045:熀
CJK UNIFIED IDEOGRAPH:'E2BE:58046:熅
CJK UNIFIED IDEOGRAPH:'E2BF:58047:熂
CJK UNIFIED IDEOGRAPH:'E2C0:58048:熏
CJK UNIFIED IDEOGRAPH:'E2C1:58049:煻
CJK UNIFIED IDEOGRAPH:'E2C2:58050:熆
CJK UNIFIED IDEOGRAPH:'E2C3:58051:熁
CJK UNIFIED IDEOGRAPH:'E2C4:58052:熗
CJK UNIFIED IDEOGRAPH:'E2C5:58053:牄
CJK UNIFIED IDEOGRAPH:'E2C6:58054:牓
CJK UNIFIED IDEOGRAPH:'E2C7:58055:犗
CJK UNIFIED IDEOGRAPH:'E2C8:58056:犕
CJK UNIFIED IDEOGRAPH:'E2C9:58057:犓
CJK UNIFIED IDEOGRAPH:'E2CA:58058:獃
CJK UNIFIED IDEOGRAPH:'E2CB:58059:獍
CJK UNIFIED IDEOGRAPH:'E2CC:58060:獑
CJK UNIFIED IDEOGRAPH:'E2CD:58061:獌
CJK UNIFIED IDEOGRAPH:'E2CE:58062:瑢
CJK UNIFIED IDEOGRAPH:'E2CF:58063:瑳
CJK UNIFIED IDEOGRAPH:'E2D0:58064:瑱
CJK UNIFIED IDEOGRAPH:'E2D1:58065:瑵
CJK UNIFIED IDEOGRAPH:'E2D2:58066:瑲
CJK UNIFIED IDEOGRAPH:'E2D3:58067:瑧
CJK UNIFIED IDEOGRAPH:'E2D4:58068:瑮
CJK UNIFIED IDEOGRAPH:'E2D5:58069:甀
CJK UNIFIED IDEOGRAPH:'E2D6:58070:甂
CJK UNIFIED IDEOGRAPH:'E2D7:58071:甃
CJK UNIFIED IDEOGRAPH:'E2D8:58072:畽
CJK UNIFIED IDEOGRAPH:'E2D9:58073:疐
CJK UNIFIED IDEOGRAPH:'E2DA:58074:瘖
CJK UNIFIED IDEOGRAPH:'E2DB:58075:瘈
CJK UNIFIED IDEOGRAPH:'E2DC:58076:瘌
CJK UNIFIED IDEOGRAPH:'E2DD:58077:瘕
CJK UNIFIED IDEOGRAPH:'E2DE:58078:瘑
CJK UNIFIED IDEOGRAPH:'E2DF:58079:瘊
CJK UNIFIED IDEOGRAPH:'E2E0:58080:瘔
CJK UNIFIED IDEOGRAPH:'E2E1:58081:皸
CJK UNIFIED IDEOGRAPH:'E2E2:58082:瞁
CJK UNIFIED IDEOGRAPH:'E2E3:58083:睼
CJK UNIFIED IDEOGRAPH:'E2E4:58084:瞅
CJK UNIFIED IDEOGRAPH:'E2E5:58085:瞂
CJK UNIFIED IDEOGRAPH:'E2E6:58086:睮
CJK UNIFIED IDEOGRAPH:'E2E7:58087:瞀
CJK UNIFIED IDEOGRAPH:'E2E8:58088:睯
CJK UNIFIED IDEOGRAPH:'E2E9:58089:睾
CJK UNIFIED IDEOGRAPH:'E2EA:58090:瞃
CJK UNIFIED IDEOGRAPH:'E2EB:58091:碲
CJK UNIFIED IDEOGRAPH:'E2EC:58092:碪
CJK UNIFIED IDEOGRAPH:'E2ED:58093:碴
CJK UNIFIED IDEOGRAPH:'E2EE:58094:碭
CJK UNIFIED IDEOGRAPH:'E2EF:58095:碨
CJK UNIFIED IDEOGRAPH:'E2F0:58096:硾
CJK UNIFIED IDEOGRAPH:'E2F1:58097:碫
CJK UNIFIED IDEOGRAPH:'E2F2:58098:碞
CJK UNIFIED IDEOGRAPH:'E2F3:58099:碥
CJK UNIFIED IDEOGRAPH:'E2F4:58100:碠
CJK UNIFIED IDEOGRAPH:'E2F5:58101:碬
CJK UNIFIED IDEOGRAPH:'E2F6:58102:碢
CJK UNIFIED IDEOGRAPH:'E2F7:58103:碤
CJK UNIFIED IDEOGRAPH:'E2F8:58104:禘
CJK UNIFIED IDEOGRAPH:'E2F9:58105:禊
CJK UNIFIED IDEOGRAPH:'E2FA:58106:禋
CJK UNIFIED IDEOGRAPH:'E2FB:58107:禖
CJK UNIFIED IDEOGRAPH:'E2FC:58108:禕
CJK UNIFIED IDEOGRAPH:'E2FD:58109:禔
CJK UNIFIED IDEOGRAPH:'E2FE:58110:禓
CJK UNIFIED IDEOGRAPH:'E340:58176:禗
CJK UNIFIED IDEOGRAPH:'E341:58177:禈
CJK UNIFIED IDEOGRAPH:'E342:58178:禒
CJK UNIFIED IDEOGRAPH:'E343:58179:禐
CJK UNIFIED IDEOGRAPH:'E344:58180:稫
CJK UNIFIED IDEOGRAPH:'E345:58181:穊
CJK UNIFIED IDEOGRAPH:'E346:58182:稰
CJK UNIFIED IDEOGRAPH:'E347:58183:稯
CJK UNIFIED IDEOGRAPH:'E348:58184:稨
CJK UNIFIED IDEOGRAPH:'E349:58185:稦
CJK UNIFIED IDEOGRAPH:'E34A:58186:窨
CJK UNIFIED IDEOGRAPH:'E34B:58187:窫
CJK UNIFIED IDEOGRAPH:'E34C:58188:窬
CJK UNIFIED IDEOGRAPH:'E34D:58189:竮
CJK UNIFIED IDEOGRAPH:'E34E:58190:箈
CJK UNIFIED IDEOGRAPH:'E34F:58191:箜
CJK UNIFIED IDEOGRAPH:'E350:58192:箊
CJK UNIFIED IDEOGRAPH:'E351:58193:箑
CJK UNIFIED IDEOGRAPH:'E352:58194:箐
CJK UNIFIED IDEOGRAPH:'E353:58195:箖
CJK UNIFIED IDEOGRAPH:'E354:58196:箍
CJK UNIFIED IDEOGRAPH:'E355:58197:箌
CJK UNIFIED IDEOGRAPH:'E356:58198:箛
CJK UNIFIED IDEOGRAPH:'E357:58199:箎
CJK UNIFIED IDEOGRAPH:'E358:58200:箅
CJK UNIFIED IDEOGRAPH:'E359:58201:箘
CJK UNIFIED IDEOGRAPH:'E35A:58202:劄
CJK UNIFIED IDEOGRAPH:'E35B:58203:箙
CJK UNIFIED IDEOGRAPH:'E35C:58204:箤
CJK UNIFIED IDEOGRAPH:'E35D:58205:箂
CJK UNIFIED IDEOGRAPH:'E35E:58206:粻
CJK UNIFIED IDEOGRAPH:'E35F:58207:粿
CJK UNIFIED IDEOGRAPH:'E360:58208:粼
CJK UNIFIED IDEOGRAPH:'E361:58209:粺
CJK UNIFIED IDEOGRAPH:'E362:58210:綧
CJK UNIFIED IDEOGRAPH:'E363:58211:綷
CJK UNIFIED IDEOGRAPH:'E364:58212:緂
CJK UNIFIED IDEOGRAPH:'E365:58213:綣
CJK UNIFIED IDEOGRAPH:'E366:58214:綪
CJK UNIFIED IDEOGRAPH:'E367:58215:緁
CJK UNIFIED IDEOGRAPH:'E368:58216:緀
CJK UNIFIED IDEOGRAPH:'E369:58217:緅
CJK UNIFIED IDEOGRAPH:'E36A:58218:綝
CJK UNIFIED IDEOGRAPH:'E36B:58219:緎
CJK UNIFIED IDEOGRAPH:'E36C:58220:緄
CJK UNIFIED IDEOGRAPH:'E36D:58221:緆
CJK UNIFIED IDEOGRAPH:'E36E:58222:緋
CJK UNIFIED IDEOGRAPH:'E36F:58223:緌
CJK UNIFIED IDEOGRAPH:'E370:58224:綯
CJK UNIFIED IDEOGRAPH:'E371:58225:綹
CJK UNIFIED IDEOGRAPH:'E372:58226:綖
CJK UNIFIED IDEOGRAPH:'E373:58227:綼
CJK UNIFIED IDEOGRAPH:'E374:58228:綟
CJK UNIFIED IDEOGRAPH:'E375:58229:綦
CJK UNIFIED IDEOGRAPH:'E376:58230:綮
CJK UNIFIED IDEOGRAPH:'E377:58231:綩
CJK UNIFIED IDEOGRAPH:'E378:58232:綡
CJK UNIFIED IDEOGRAPH:'E379:58233:緉
CJK UNIFIED IDEOGRAPH:'E37A:58234:罳
CJK UNIFIED IDEOGRAPH:'E37B:58235:翢
CJK UNIFIED IDEOGRAPH:'E37C:58236:翣
CJK UNIFIED IDEOGRAPH:'E37D:58237:翥
CJK UNIFIED IDEOGRAPH:'E37E:58238:翞
CJK UNIFIED IDEOGRAPH:'E3A1:58273:耤
CJK UNIFIED IDEOGRAPH:'E3A2:58274:聝
CJK UNIFIED IDEOGRAPH:'E3A3:58275:聜
CJK UNIFIED IDEOGRAPH:'E3A4:58276:膉
CJK UNIFIED IDEOGRAPH:'E3A5:58277:膆
CJK UNIFIED IDEOGRAPH:'E3A6:58278:膃
CJK UNIFIED IDEOGRAPH:'E3A7:58279:膇
CJK UNIFIED IDEOGRAPH:'E3A8:58280:膍
CJK UNIFIED IDEOGRAPH:'E3A9:58281:膌
CJK UNIFIED IDEOGRAPH:'E3AA:58282:膋
CJK UNIFIED IDEOGRAPH:'E3AB:58283:舕
CJK UNIFIED IDEOGRAPH:'E3AC:58284:蒗
CJK UNIFIED IDEOGRAPH:'E3AD:58285:蒤
CJK UNIFIED IDEOGRAPH:'E3AE:58286:蒡
CJK UNIFIED IDEOGRAPH:'E3AF:58287:蒟
CJK UNIFIED IDEOGRAPH:'E3B0:58288:蒺
CJK UNIFIED IDEOGRAPH:'E3B1:58289:蓎
CJK UNIFIED IDEOGRAPH:'E3B2:58290:蓂
CJK UNIFIED IDEOGRAPH:'E3B3:58291:蒬
CJK UNIFIED IDEOGRAPH:'E3B4:58292:蒮
CJK UNIFIED IDEOGRAPH:'E3B5:58293:蒫
CJK UNIFIED IDEOGRAPH:'E3B6:58294:蒹
CJK UNIFIED IDEOGRAPH:'E3B7:58295:蒴
CJK UNIFIED IDEOGRAPH:'E3B8:58296:蓁
CJK UNIFIED IDEOGRAPH:'E3B9:58297:蓍
CJK UNIFIED IDEOGRAPH:'E3BA:58298:蒪
CJK UNIFIED IDEOGRAPH:'E3BB:58299:蒚
CJK UNIFIED IDEOGRAPH:'E3BC:58300:蒱
CJK UNIFIED IDEOGRAPH:'E3BD:58301:蓐
CJK UNIFIED IDEOGRAPH:'E3BE:58302:蒝
CJK UNIFIED IDEOGRAPH:'E3BF:58303:蒧
CJK UNIFIED IDEOGRAPH:'E3C0:58304:蒻
CJK UNIFIED IDEOGRAPH:'E3C1:58305:蒢
CJK UNIFIED IDEOGRAPH:'E3C2:58306:蒔
CJK UNIFIED IDEOGRAPH:'E3C3:58307:蓇
CJK UNIFIED IDEOGRAPH:'E3C4:58308:蓌
CJK UNIFIED IDEOGRAPH:'E3C5:58309:蒛
CJK UNIFIED IDEOGRAPH:'E3C6:58310:蒩
CJK UNIFIED IDEOGRAPH:'E3C7:58311:蒯
CJK UNIFIED IDEOGRAPH:'E3C8:58312:蒨
CJK UNIFIED IDEOGRAPH:'E3C9:58313:蓖
CJK UNIFIED IDEOGRAPH:'E3CA:58314:蒘
CJK UNIFIED IDEOGRAPH:'E3CB:58315:蒶
CJK UNIFIED IDEOGRAPH:'E3CC:58316:蓏
CJK UNIFIED IDEOGRAPH:'E3CD:58317:蒠
CJK UNIFIED IDEOGRAPH:'E3CE:58318:蓗
CJK UNIFIED IDEOGRAPH:'E3CF:58319:蓔
CJK UNIFIED IDEOGRAPH:'E3D0:58320:蓒
CJK UNIFIED IDEOGRAPH:'E3D1:58321:蓛
CJK UNIFIED IDEOGRAPH:'E3D2:58322:蒰
CJK UNIFIED IDEOGRAPH:'E3D3:58323:蒑
CJK UNIFIED IDEOGRAPH:'E3D4:58324:虡
CJK UNIFIED IDEOGRAPH:'E3D5:58325:蜳
CJK UNIFIED IDEOGRAPH:'E3D6:58326:蜣
CJK UNIFIED IDEOGRAPH:'E3D7:58327:蜨
CJK UNIFIED IDEOGRAPH:'E3D8:58328:蝫
CJK UNIFIED IDEOGRAPH:'E3D9:58329:蝀
CJK UNIFIED IDEOGRAPH:'E3DA:58330:蜮
CJK UNIFIED IDEOGRAPH:'E3DB:58331:蜞
CJK UNIFIED IDEOGRAPH:'E3DC:58332:蜡
CJK UNIFIED IDEOGRAPH:'E3DD:58333:蜙
CJK UNIFIED IDEOGRAPH:'E3DE:58334:蜛
CJK UNIFIED IDEOGRAPH:'E3DF:58335:蝃
CJK UNIFIED IDEOGRAPH:'E3E0:58336:蜬
CJK UNIFIED IDEOGRAPH:'E3E1:58337:蝁
CJK UNIFIED IDEOGRAPH:'E3E2:58338:蜾
CJK UNIFIED IDEOGRAPH:'E3E3:58339:蝆
CJK UNIFIED IDEOGRAPH:'E3E4:58340:蜠
CJK UNIFIED IDEOGRAPH:'E3E5:58341:蜲
CJK UNIFIED IDEOGRAPH:'E3E6:58342:蜪
CJK UNIFIED IDEOGRAPH:'E3E7:58343:蜭
CJK UNIFIED IDEOGRAPH:'E3E8:58344:蜼
CJK UNIFIED IDEOGRAPH:'E3E9:58345:蜒
CJK UNIFIED IDEOGRAPH:'E3EA:58346:蜺
CJK UNIFIED IDEOGRAPH:'E3EB:58347:蜱
CJK UNIFIED IDEOGRAPH:'E3EC:58348:蜵
CJK UNIFIED IDEOGRAPH:'E3ED:58349:蝂
CJK UNIFIED IDEOGRAPH:'E3EE:58350:蜦
CJK UNIFIED IDEOGRAPH:'E3EF:58351:蜧
CJK UNIFIED IDEOGRAPH:'E3F0:58352:蜸
CJK UNIFIED IDEOGRAPH:'E3F1:58353:蜤
CJK UNIFIED IDEOGRAPH:'E3F2:58354:蜚
CJK UNIFIED IDEOGRAPH:'E3F3:58355:蜰
CJK UNIFIED IDEOGRAPH:'E3F4:58356:蜑
CJK UNIFIED IDEOGRAPH:'E3F5:58357:裷
CJK UNIFIED IDEOGRAPH:'E3F6:58358:裧
CJK UNIFIED IDEOGRAPH:'E3F7:58359:裱
CJK UNIFIED IDEOGRAPH:'E3F8:58360:裲
CJK UNIFIED IDEOGRAPH:'E3F9:58361:裺
CJK UNIFIED IDEOGRAPH:'E3FA:58362:裾
CJK UNIFIED IDEOGRAPH:'E3FB:58363:裮
CJK UNIFIED IDEOGRAPH:'E3FC:58364:裼
CJK UNIFIED IDEOGRAPH:'E3FD:58365:裶
CJK UNIFIED IDEOGRAPH:'E3FE:58366:裻
CJK UNIFIED IDEOGRAPH:'E440:58432:裰
CJK UNIFIED IDEOGRAPH:'E441:58433:裬
CJK UNIFIED IDEOGRAPH:'E442:58434:裫
CJK UNIFIED IDEOGRAPH:'E443:58435:覝
CJK UNIFIED IDEOGRAPH:'E444:58436:覡
CJK UNIFIED IDEOGRAPH:'E445:58437:覟
CJK UNIFIED IDEOGRAPH:'E446:58438:覞
CJK UNIFIED IDEOGRAPH:'E447:58439:觩
CJK UNIFIED IDEOGRAPH:'E448:58440:觫
CJK UNIFIED IDEOGRAPH:'E449:58441:觨
CJK UNIFIED IDEOGRAPH:'E44A:58442:誫
CJK UNIFIED IDEOGRAPH:'E44B:58443:誙
CJK UNIFIED IDEOGRAPH:'E44C:58444:誋
CJK UNIFIED IDEOGRAPH:'E44D:58445:誒
CJK UNIFIED IDEOGRAPH:'E44E:58446:誏
CJK UNIFIED IDEOGRAPH:'E44F:58447:誖
CJK UNIFIED IDEOGRAPH:'E450:58448:谽
CJK UNIFIED IDEOGRAPH:'E451:58449:豨
CJK UNIFIED IDEOGRAPH:'E452:58450:豩
CJK UNIFIED IDEOGRAPH:'E453:58451:賕
CJK UNIFIED IDEOGRAPH:'E454:58452:賏
CJK UNIFIED IDEOGRAPH:'E455:58453:賗
CJK UNIFIED IDEOGRAPH:'E456:58454:趖
CJK UNIFIED IDEOGRAPH:'E457:58455:踉
CJK UNIFIED IDEOGRAPH:'E458:58456:踂
CJK UNIFIED IDEOGRAPH:'E459:58457:跿
CJK UNIFIED IDEOGRAPH:'E45A:58458:踍
CJK UNIFIED IDEOGRAPH:'E45B:58459:跽
CJK UNIFIED IDEOGRAPH:'E45C:58460:踊
CJK UNIFIED IDEOGRAPH:'E45D:58461:踃
CJK UNIFIED IDEOGRAPH:'E45E:58462:踇
CJK UNIFIED IDEOGRAPH:'E45F:58463:踆
CJK UNIFIED IDEOGRAPH:'E460:58464:踅
CJK UNIFIED IDEOGRAPH:'E461:58465:跾
CJK UNIFIED IDEOGRAPH:'E462:58466:踀
CJK UNIFIED IDEOGRAPH:'E463:58467:踄
CJK UNIFIED IDEOGRAPH:'E464:58468:輐
CJK UNIFIED IDEOGRAPH:'E465:58469:輑
CJK UNIFIED IDEOGRAPH:'E466:58470:輎
CJK UNIFIED IDEOGRAPH:'E467:58471:輍
CJK UNIFIED IDEOGRAPH:'E468:58472:鄣
CJK UNIFIED IDEOGRAPH:'E469:58473:鄜
CJK UNIFIED IDEOGRAPH:'E46A:58474:鄠
CJK UNIFIED IDEOGRAPH:'E46B:58475:鄢
CJK UNIFIED IDEOGRAPH:'E46C:58476:鄟
CJK UNIFIED IDEOGRAPH:'E46D:58477:鄝
CJK UNIFIED IDEOGRAPH:'E46E:58478:鄚
CJK UNIFIED IDEOGRAPH:'E46F:58479:鄤
CJK UNIFIED IDEOGRAPH:'E470:58480:鄡
CJK UNIFIED IDEOGRAPH:'E471:58481:鄛
CJK UNIFIED IDEOGRAPH:'E472:58482:酺
CJK UNIFIED IDEOGRAPH:'E473:58483:酲
CJK UNIFIED IDEOGRAPH:'E474:58484:酹
CJK UNIFIED IDEOGRAPH:'E475:58485:酳
CJK UNIFIED IDEOGRAPH:'E476:58486:銥
CJK UNIFIED IDEOGRAPH:'E477:58487:銤
CJK UNIFIED IDEOGRAPH:'E478:58488:鉶
CJK UNIFIED IDEOGRAPH:'E479:58489:銛
CJK UNIFIED IDEOGRAPH:'E47A:58490:鉺
CJK UNIFIED IDEOGRAPH:'E47B:58491:銠
CJK UNIFIED IDEOGRAPH:'E47C:58492:銔
CJK UNIFIED IDEOGRAPH:'E47D:58493:銪
CJK UNIFIED IDEOGRAPH:'E47E:58494:銍
CJK UNIFIED IDEOGRAPH:'E4A1:58529:銦
CJK UNIFIED IDEOGRAPH:'E4A2:58530:銚
CJK UNIFIED IDEOGRAPH:'E4A3:58531:銫
CJK UNIFIED IDEOGRAPH:'E4A4:58532:鉹
CJK UNIFIED IDEOGRAPH:'E4A5:58533:銗
CJK UNIFIED IDEOGRAPH:'E4A6:58534:鉿
CJK UNIFIED IDEOGRAPH:'E4A7:58535:銣
CJK UNIFIED IDEOGRAPH:'E4A8:58536:鋮
CJK UNIFIED IDEOGRAPH:'E4A9:58537:銎
CJK UNIFIED IDEOGRAPH:'E4AA:58538:銂
CJK UNIFIED IDEOGRAPH:'E4AB:58539:銕
CJK UNIFIED IDEOGRAPH:'E4AC:58540:銢
CJK UNIFIED IDEOGRAPH:'E4AD:58541:鉽
CJK UNIFIED IDEOGRAPH:'E4AE:58542:銈
CJK UNIFIED IDEOGRAPH:'E4AF:58543:銡
CJK UNIFIED IDEOGRAPH:'E4B0:58544:銊
CJK UNIFIED IDEOGRAPH:'E4B1:58545:銆
CJK UNIFIED IDEOGRAPH:'E4B2:58546:銌
CJK UNIFIED IDEOGRAPH:'E4B3:58547:銙
CJK UNIFIED IDEOGRAPH:'E4B4:58548:銧
CJK UNIFIED IDEOGRAPH:'E4B5:58549:鉾
CJK UNIFIED IDEOGRAPH:'E4B6:58550:銇
CJK UNIFIED IDEOGRAPH:'E4B7:58551:銩
CJK UNIFIED IDEOGRAPH:'E4B8:58552:銝
CJK UNIFIED IDEOGRAPH:'E4B9:58553:銋
CJK UNIFIED IDEOGRAPH:'E4BA:58554:鈭
CJK UNIFIED IDEOGRAPH:'E4BB:58555:隞
CJK UNIFIED IDEOGRAPH:'E4BC:58556:隡
CJK UNIFIED IDEOGRAPH:'E4BD:58557:雿
CJK UNIFIED IDEOGRAPH:'E4BE:58558:靘
CJK UNIFIED IDEOGRAPH:'E4BF:58559:靽
CJK UNIFIED IDEOGRAPH:'E4C0:58560:靺
CJK UNIFIED IDEOGRAPH:'E4C1:58561:靾
CJK UNIFIED IDEOGRAPH:'E4C2:58562:鞃
CJK UNIFIED IDEOGRAPH:'E4C3:58563:鞀
CJK UNIFIED IDEOGRAPH:'E4C4:58564:鞂
CJK UNIFIED IDEOGRAPH:'E4C5:58565:靻
CJK UNIFIED IDEOGRAPH:'E4C6:58566:鞄
CJK UNIFIED IDEOGRAPH:'E4C7:58567:鞁
CJK UNIFIED IDEOGRAPH:'E4C8:58568:靿
CJK UNIFIED IDEOGRAPH:'E4C9:58569:韎
CJK UNIFIED IDEOGRAPH:'E4CA:58570:韍
CJK UNIFIED IDEOGRAPH:'E4CB:58571:頖
CJK UNIFIED IDEOGRAPH:'E4CC:58572:颭
CJK UNIFIED IDEOGRAPH:'E4CD:58573:颮
CJK UNIFIED IDEOGRAPH:'E4CE:58574:餂
CJK UNIFIED IDEOGRAPH:'E4CF:58575:餀
CJK UNIFIED IDEOGRAPH:'E4D0:58576:餇
CJK UNIFIED IDEOGRAPH:'E4D1:58577:馝
CJK UNIFIED IDEOGRAPH:'E4D2:58578:馜
CJK UNIFIED IDEOGRAPH:'E4D3:58579:駃
CJK UNIFIED IDEOGRAPH:'E4D4:58580:馹
CJK UNIFIED IDEOGRAPH:'E4D5:58581:馻
CJK UNIFIED IDEOGRAPH:'E4D6:58582:馺
CJK UNIFIED IDEOGRAPH:'E4D7:58583:駂
CJK UNIFIED IDEOGRAPH:'E4D8:58584:馽
CJK UNIFIED IDEOGRAPH:'E4D9:58585:駇
CJK UNIFIED IDEOGRAPH:'E4DA:58586:骱
CJK UNIFIED IDEOGRAPH:'E4DB:58587:髣
CJK UNIFIED IDEOGRAPH:'E4DC:58588:髧
CJK UNIFIED IDEOGRAPH:'E4DD:58589:鬾
CJK UNIFIED IDEOGRAPH:'E4DE:58590:鬿
CJK UNIFIED IDEOGRAPH:'E4DF:58591:魠
CJK UNIFIED IDEOGRAPH:'E4E0:58592:魡
CJK UNIFIED IDEOGRAPH:'E4E1:58593:魟
CJK UNIFIED IDEOGRAPH:'E4E2:58594:鳱
CJK UNIFIED IDEOGRAPH:'E4E3:58595:鳲
CJK UNIFIED IDEOGRAPH:'E4E4:58596:鳵
CJK UNIFIED IDEOGRAPH:'E4E5:58597:麧
CJK UNIFIED IDEOGRAPH:'E4E6:58598:僿
CJK UNIFIED IDEOGRAPH:'E4E7:58599:儃
CJK UNIFIED IDEOGRAPH:'E4E8:58600:儰
CJK UNIFIED IDEOGRAPH:'E4E9:58601:僸
CJK UNIFIED IDEOGRAPH:'E4EA:58602:儆
CJK UNIFIED IDEOGRAPH:'E4EB:58603:儇
CJK UNIFIED IDEOGRAPH:'E4EC:58604:僶
CJK UNIFIED IDEOGRAPH:'E4ED:58605:僾
CJK UNIFIED IDEOGRAPH:'E4EE:58606:儋
CJK UNIFIED IDEOGRAPH:'E4EF:58607:儌
CJK UNIFIED IDEOGRAPH:'E4F0:58608:僽
CJK UNIFIED IDEOGRAPH:'E4F1:58609:儊
CJK UNIFIED IDEOGRAPH:'E4F2:58610:劋
CJK UNIFIED IDEOGRAPH:'E4F3:58611:劌
CJK UNIFIED IDEOGRAPH:'E4F4:58612:勱
CJK UNIFIED IDEOGRAPH:'E4F5:58613:勯
CJK UNIFIED IDEOGRAPH:'E4F6:58614:噈
CJK UNIFIED IDEOGRAPH:'E4F7:58615:噂
CJK UNIFIED IDEOGRAPH:'E4F8:58616:噌
CJK UNIFIED IDEOGRAPH:'E4F9:58617:嘵
CJK UNIFIED IDEOGRAPH:'E4FA:58618:噁
CJK UNIFIED IDEOGRAPH:'E4FB:58619:噊
CJK UNIFIED IDEOGRAPH:'E4FC:58620:噉
CJK UNIFIED IDEOGRAPH:'E4FD:58621:噆
CJK UNIFIED IDEOGRAPH:'E4FE:58622:噘
CJK UNIFIED IDEOGRAPH:'E540:58688:噚
CJK UNIFIED IDEOGRAPH:'E541:58689:噀
CJK UNIFIED IDEOGRAPH:'E542:58690:嘳
CJK UNIFIED IDEOGRAPH:'E543:58691:嘽
CJK UNIFIED IDEOGRAPH:'E544:58692:嘬
CJK UNIFIED IDEOGRAPH:'E545:58693:嘾
CJK UNIFIED IDEOGRAPH:'E546:58694:嘸
CJK UNIFIED IDEOGRAPH:'E547:58695:嘪
CJK UNIFIED IDEOGRAPH:'E548:58696:嘺
CJK UNIFIED IDEOGRAPH:'E549:58697:圚
CJK UNIFIED IDEOGRAPH:'E54A:58698:墫
CJK UNIFIED IDEOGRAPH:'E54B:58699:墝
CJK UNIFIED IDEOGRAPH:'E54C:58700:墱
CJK UNIFIED IDEOGRAPH:'E54D:58701:墠
CJK UNIFIED IDEOGRAPH:'E54E:58702:墣
CJK UNIFIED IDEOGRAPH:'E54F:58703:墯
CJK UNIFIED IDEOGRAPH:'E550:58704:墬
CJK UNIFIED IDEOGRAPH:'E551:58705:墥
CJK UNIFIED IDEOGRAPH:'E552:58706:墡
CJK UNIFIED IDEOGRAPH:'E553:58707:壿
CJK UNIFIED IDEOGRAPH:'E554:58708:嫿
CJK UNIFIED IDEOGRAPH:'E555:58709:嫴
CJK UNIFIED IDEOGRAPH:'E556:58710:嫽
CJK UNIFIED IDEOGRAPH:'E557:58711:嫷
CJK UNIFIED IDEOGRAPH:'E558:58712:嫶
CJK UNIFIED IDEOGRAPH:'E559:58713:嬃
CJK UNIFIED IDEOGRAPH:'E55A:58714:嫸
CJK UNIFIED IDEOGRAPH:'E55B:58715:嬂
CJK UNIFIED IDEOGRAPH:'E55C:58716:嫹
CJK UNIFIED IDEOGRAPH:'E55D:58717:嬁
CJK UNIFIED IDEOGRAPH:'E55E:58718:嬇
CJK UNIFIED IDEOGRAPH:'E55F:58719:嬅
CJK UNIFIED IDEOGRAPH:'E560:58720:嬏
CJK UNIFIED IDEOGRAPH:'E561:58721:屧
CJK UNIFIED IDEOGRAPH:'E562:58722:嶙
CJK UNIFIED IDEOGRAPH:'E563:58723:嶗
CJK UNIFIED IDEOGRAPH:'E564:58724:嶟
CJK UNIFIED IDEOGRAPH:'E565:58725:嶒
CJK UNIFIED IDEOGRAPH:'E566:58726:嶢
CJK UNIFIED IDEOGRAPH:'E567:58727:嶓
CJK UNIFIED IDEOGRAPH:'E568:58728:嶕
CJK UNIFIED IDEOGRAPH:'E569:58729:嶠
CJK UNIFIED IDEOGRAPH:'E56A:58730:嶜
CJK UNIFIED IDEOGRAPH:'E56B:58731:嶡
CJK UNIFIED IDEOGRAPH:'E56C:58732:嶚
CJK UNIFIED IDEOGRAPH:'E56D:58733:嶞
CJK UNIFIED IDEOGRAPH:'E56E:58734:幩
CJK UNIFIED IDEOGRAPH:'E56F:58735:幝
CJK UNIFIED IDEOGRAPH:'E570:58736:幠
CJK UNIFIED IDEOGRAPH:'E571:58737:幜
CJK UNIFIED IDEOGRAPH:'E572:58738:緳
CJK UNIFIED IDEOGRAPH:'E573:58739:廛
CJK UNIFIED IDEOGRAPH:'E574:58740:廞
CJK UNIFIED IDEOGRAPH:'E575:58741:廡
CJK UNIFIED IDEOGRAPH:'E576:58742:彉
CJK UNIFIED IDEOGRAPH:'E577:58743:徲
CJK UNIFIED IDEOGRAPH:'E578:58744:憋
CJK UNIFIED IDEOGRAPH:'E579:58745:憃
CJK UNIFIED IDEOGRAPH:'E57A:58746:慹
CJK UNIFIED IDEOGRAPH:'E57B:58747:憱
CJK UNIFIED IDEOGRAPH:'E57C:58748:憰
CJK UNIFIED IDEOGRAPH:'E57D:58749:憢
CJK UNIFIED IDEOGRAPH:'E57E:58750:憉
CJK UNIFIED IDEOGRAPH:'E5A1:58785:憛
CJK UNIFIED IDEOGRAPH:'E5A2:58786:憓
CJK UNIFIED IDEOGRAPH:'E5A3:58787:憯
CJK UNIFIED IDEOGRAPH:'E5A4:58788:憭
CJK UNIFIED IDEOGRAPH:'E5A5:58789:憟
CJK UNIFIED IDEOGRAPH:'E5A6:58790:憒
CJK UNIFIED IDEOGRAPH:'E5A7:58791:憪
CJK UNIFIED IDEOGRAPH:'E5A8:58792:憡
CJK UNIFIED IDEOGRAPH:'E5A9:58793:憍
CJK UNIFIED IDEOGRAPH:'E5AA:58794:慦
CJK UNIFIED IDEOGRAPH:'E5AB:58795:憳
CJK UNIFIED IDEOGRAPH:'E5AC:58796:戭
CJK UNIFIED IDEOGRAPH:'E5AD:58797:摮
CJK UNIFIED IDEOGRAPH:'E5AE:58798:摰
CJK UNIFIED IDEOGRAPH:'E5AF:58799:撖
CJK UNIFIED IDEOGRAPH:'E5B0:58800:撠
CJK UNIFIED IDEOGRAPH:'E5B1:58801:撅
CJK UNIFIED IDEOGRAPH:'E5B2:58802:撗
CJK UNIFIED IDEOGRAPH:'E5B3:58803:撜
CJK UNIFIED IDEOGRAPH:'E5B4:58804:撏
CJK UNIFIED IDEOGRAPH:'E5B5:58805:撋
CJK UNIFIED IDEOGRAPH:'E5B6:58806:撊
CJK UNIFIED IDEOGRAPH:'E5B7:58807:撌
CJK UNIFIED IDEOGRAPH:'E5B8:58808:撣
CJK UNIFIED IDEOGRAPH:'E5B9:58809:撟
CJK UNIFIED IDEOGRAPH:'E5BA:58810:摨
CJK UNIFIED IDEOGRAPH:'E5BB:58811:撱
CJK UNIFIED IDEOGRAPH:'E5BC:58812:撘
CJK UNIFIED IDEOGRAPH:'E5BD:58813:敶
CJK UNIFIED IDEOGRAPH:'E5BE:58814:敺
CJK UNIFIED IDEOGRAPH:'E5BF:58815:敹
CJK UNIFIED IDEOGRAPH:'E5C0:58816:敻
CJK UNIFIED IDEOGRAPH:'E5C1:58817:斲
CJK UNIFIED IDEOGRAPH:'E5C2:58818:斳
CJK UNIFIED IDEOGRAPH:'E5C3:58819:暵
CJK UNIFIED IDEOGRAPH:'E5C4:58820:暰
CJK UNIFIED IDEOGRAPH:'E5C5:58821:暩
CJK UNIFIED IDEOGRAPH:'E5C6:58822:暲
CJK UNIFIED IDEOGRAPH:'E5C7:58823:暷
CJK UNIFIED IDEOGRAPH:'E5C8:58824:暪
CJK UNIFIED IDEOGRAPH:'E5C9:58825:暯
CJK UNIFIED IDEOGRAPH:'E5CA:58826:樀
CJK UNIFIED IDEOGRAPH:'E5CB:58827:樆
CJK UNIFIED IDEOGRAPH:'E5CC:58828:樗
CJK UNIFIED IDEOGRAPH:'E5CD:58829:槥
CJK UNIFIED IDEOGRAPH:'E5CE:58830:槸
CJK UNIFIED IDEOGRAPH:'E5CF:58831:樕
CJK UNIFIED IDEOGRAPH:'E5D0:58832:槱
CJK UNIFIED IDEOGRAPH:'E5D1:58833:槤
CJK UNIFIED IDEOGRAPH:'E5D2:58834:樠
CJK UNIFIED IDEOGRAPH:'E5D3:58835:槿
CJK UNIFIED IDEOGRAPH:'E5D4:58836:槬
CJK UNIFIED IDEOGRAPH:'E5D5:58837:槢
CJK UNIFIED IDEOGRAPH:'E5D6:58838:樛
CJK UNIFIED IDEOGRAPH:'E5D7:58839:樝
CJK UNIFIED IDEOGRAPH:'E5D8:58840:槾
CJK UNIFIED IDEOGRAPH:'E5D9:58841:樧
CJK UNIFIED IDEOGRAPH:'E5DA:58842:槲
CJK UNIFIED IDEOGRAPH:'E5DB:58843:槮
CJK UNIFIED IDEOGRAPH:'E5DC:58844:樔
CJK UNIFIED IDEOGRAPH:'E5DD:58845:槷
CJK UNIFIED IDEOGRAPH:'E5DE:58846:槧
CJK UNIFIED IDEOGRAPH:'E5DF:58847:橀
CJK UNIFIED IDEOGRAPH:'E5E0:58848:樈
CJK UNIFIED IDEOGRAPH:'E5E1:58849:槦
CJK UNIFIED IDEOGRAPH:'E5E2:58850:槻
CJK UNIFIED IDEOGRAPH:'E5E3:58851:樍
CJK UNIFIED IDEOGRAPH:'E5E4:58852:槼
CJK UNIFIED IDEOGRAPH:'E5E5:58853:槫
CJK UNIFIED IDEOGRAPH:'E5E6:58854:樉
CJK UNIFIED IDEOGRAPH:'E5E7:58855:樄
CJK UNIFIED IDEOGRAPH:'E5E8:58856:樘
CJK UNIFIED IDEOGRAPH:'E5E9:58857:樥
CJK UNIFIED IDEOGRAPH:'E5EA:58858:樏
CJK UNIFIED IDEOGRAPH:'E5EB:58859:槶
CJK UNIFIED IDEOGRAPH:'E5EC:58860:樦
CJK UNIFIED IDEOGRAPH:'E5ED:58861:樇
CJK UNIFIED IDEOGRAPH:'E5EE:58862:槴
CJK UNIFIED IDEOGRAPH:'E5EF:58863:樖
CJK UNIFIED IDEOGRAPH:'E5F0:58864:歑
CJK UNIFIED IDEOGRAPH:'E5F1:58865:殥
CJK UNIFIED IDEOGRAPH:'E5F2:58866:殣
CJK UNIFIED IDEOGRAPH:'E5F3:58867:殢
CJK UNIFIED IDEOGRAPH:'E5F4:58868:殦
CJK UNIFIED IDEOGRAPH:'E5F5:58869:氁
CJK UNIFIED IDEOGRAPH:'E5F6:58870:氀
CJK UNIFIED IDEOGRAPH:'E5F7:58871:毿
CJK UNIFIED IDEOGRAPH:'E5F8:58872:氂
CJK UNIFIED IDEOGRAPH:'E5F9:58873:潁
CJK UNIFIED IDEOGRAPH:'E5FA:58874:漦
CJK UNIFIED IDEOGRAPH:'E5FB:58875:潾
CJK UNIFIED IDEOGRAPH:'E5FC:58876:澇
CJK UNIFIED IDEOGRAPH:'E5FD:58877:濆
CJK UNIFIED IDEOGRAPH:'E5FE:58878:澒
CJK UNIFIED IDEOGRAPH:'E640:58944:澍
CJK UNIFIED IDEOGRAPH:'E641:58945:澉
CJK UNIFIED IDEOGRAPH:'E642:58946:澌
CJK UNIFIED IDEOGRAPH:'E643:58947:潢
CJK UNIFIED IDEOGRAPH:'E644:58948:潏
CJK UNIFIED IDEOGRAPH:'E645:58949:澅
CJK UNIFIED IDEOGRAPH:'E646:58950:潚
CJK UNIFIED IDEOGRAPH:'E647:58951:澖
CJK UNIFIED IDEOGRAPH:'E648:58952:潶
CJK UNIFIED IDEOGRAPH:'E649:58953:潬
CJK UNIFIED IDEOGRAPH:'E64A:58954:澂
CJK UNIFIED IDEOGRAPH:'E64B:58955:潕
CJK UNIFIED IDEOGRAPH:'E64C:58956:潲
CJK UNIFIED IDEOGRAPH:'E64D:58957:潒
CJK UNIFIED IDEOGRAPH:'E64E:58958:潐
CJK UNIFIED IDEOGRAPH:'E64F:58959:潗
CJK UNIFIED IDEOGRAPH:'E650:58960:澔
CJK UNIFIED IDEOGRAPH:'E651:58961:澓
CJK UNIFIED IDEOGRAPH:'E652:58962:潝
CJK UNIFIED IDEOGRAPH:'E653:58963:漀
CJK UNIFIED IDEOGRAPH:'E654:58964:潡
CJK UNIFIED IDEOGRAPH:'E655:58965:潫
CJK UNIFIED IDEOGRAPH:'E656:58966:潽
CJK UNIFIED IDEOGRAPH:'E657:58967:潧
CJK UNIFIED IDEOGRAPH:'E658:58968:澐
CJK UNIFIED IDEOGRAPH:'E659:58969:潓
CJK UNIFIED IDEOGRAPH:'E65A:58970:澋
CJK UNIFIED IDEOGRAPH:'E65B:58971:潩
CJK UNIFIED IDEOGRAPH:'E65C:58972:潿
CJK UNIFIED IDEOGRAPH:'E65D:58973:澕
CJK UNIFIED IDEOGRAPH:'E65E:58974:潣
CJK UNIFIED IDEOGRAPH:'E65F:58975:潷
CJK UNIFIED IDEOGRAPH:'E660:58976:潪
CJK UNIFIED IDEOGRAPH:'E661:58977:潻
CJK UNIFIED IDEOGRAPH:'E662:58978:熲
CJK UNIFIED IDEOGRAPH:'E663:58979:熯
CJK UNIFIED IDEOGRAPH:'E664:58980:熛
CJK UNIFIED IDEOGRAPH:'E665:58981:熰
CJK UNIFIED IDEOGRAPH:'E666:58982:熠
CJK UNIFIED IDEOGRAPH:'E667:58983:熚
CJK UNIFIED IDEOGRAPH:'E668:58984:熩
CJK UNIFIED IDEOGRAPH:'E669:58985:熵
CJK UNIFIED IDEOGRAPH:'E66A:58986:熝
CJK UNIFIED IDEOGRAPH:'E66B:58987:熥
CJK UNIFIED IDEOGRAPH:'E66C:58988:熞
CJK UNIFIED IDEOGRAPH:'E66D:58989:熤
CJK UNIFIED IDEOGRAPH:'E66E:58990:熡
CJK UNIFIED IDEOGRAPH:'E66F:58991:熪
CJK UNIFIED IDEOGRAPH:'E670:58992:熜
CJK UNIFIED IDEOGRAPH:'E671:58993:熧
CJK UNIFIED IDEOGRAPH:'E672:58994:熳
CJK UNIFIED IDEOGRAPH:'E673:58995:犘
CJK UNIFIED IDEOGRAPH:'E674:58996:犚
CJK UNIFIED IDEOGRAPH:'E675:58997:獘
CJK UNIFIED IDEOGRAPH:'E676:58998:獒
CJK UNIFIED IDEOGRAPH:'E677:58999:獞
CJK UNIFIED IDEOGRAPH:'E678:59000:獟
CJK UNIFIED IDEOGRAPH:'E679:59001:獠
CJK UNIFIED IDEOGRAPH:'E67A:59002:獝
CJK UNIFIED IDEOGRAPH:'E67B:59003:獛
CJK UNIFIED IDEOGRAPH:'E67C:59004:獡
CJK UNIFIED IDEOGRAPH:'E67D:59005:獚
CJK UNIFIED IDEOGRAPH:'E67E:59006:獙
CJK UNIFIED IDEOGRAPH:'E6A1:59041:獢
CJK UNIFIED IDEOGRAPH:'E6A2:59042:璇
CJK UNIFIED IDEOGRAPH:'E6A3:59043:璉
CJK UNIFIED IDEOGRAPH:'E6A4:59044:璊
CJK UNIFIED IDEOGRAPH:'E6A5:59045:璆
CJK UNIFIED IDEOGRAPH:'E6A6:59046:璁
CJK UNIFIED IDEOGRAPH:'E6A7:59047:瑽
CJK UNIFIED IDEOGRAPH:'E6A8:59048:璅
CJK UNIFIED IDEOGRAPH:'E6A9:59049:璈
CJK UNIFIED IDEOGRAPH:'E6AA:59050:瑼
CJK UNIFIED IDEOGRAPH:'E6AB:59051:瑹
CJK UNIFIED IDEOGRAPH:'E6AC:59052:甈
CJK UNIFIED IDEOGRAPH:'E6AD:59053:甇
CJK UNIFIED IDEOGRAPH:'E6AE:59054:畾
CJK UNIFIED IDEOGRAPH:'E6AF:59055:瘥
CJK UNIFIED IDEOGRAPH:'E6B0:59056:瘞
CJK UNIFIED IDEOGRAPH:'E6B1:59057:瘙
CJK UNIFIED IDEOGRAPH:'E6B2:59058:瘝
CJK UNIFIED IDEOGRAPH:'E6B3:59059:瘜
CJK UNIFIED IDEOGRAPH:'E6B4:59060:瘣
CJK UNIFIED IDEOGRAPH:'E6B5:59061:瘚
CJK UNIFIED IDEOGRAPH:'E6B6:59062:瘨
CJK UNIFIED IDEOGRAPH:'E6B7:59063:瘛
CJK UNIFIED IDEOGRAPH:'E6B8:59064:皜
CJK UNIFIED IDEOGRAPH:'E6B9:59065:皝
CJK UNIFIED IDEOGRAPH:'E6BA:59066:皞
CJK UNIFIED IDEOGRAPH:'E6BB:59067:皛
CJK UNIFIED IDEOGRAPH:'E6BC:59068:瞍
CJK UNIFIED IDEOGRAPH:'E6BD:59069:瞏
CJK UNIFIED IDEOGRAPH:'E6BE:59070:瞉
CJK UNIFIED IDEOGRAPH:'E6BF:59071:瞈
CJK UNIFIED IDEOGRAPH:'E6C0:59072:磍
CJK UNIFIED IDEOGRAPH:'E6C1:59073:碻
CJK UNIFIED IDEOGRAPH:'E6C2:59074:磏
CJK UNIFIED IDEOGRAPH:'E6C3:59075:磌
CJK UNIFIED IDEOGRAPH:'E6C4:59076:磑
CJK UNIFIED IDEOGRAPH:'E6C5:59077:磎
CJK UNIFIED IDEOGRAPH:'E6C6:59078:磔
CJK UNIFIED IDEOGRAPH:'E6C7:59079:磈
CJK UNIFIED IDEOGRAPH:'E6C8:59080:磃
CJK UNIFIED IDEOGRAPH:'E6C9:59081:磄
CJK UNIFIED IDEOGRAPH:'E6CA:59082:磉
CJK UNIFIED IDEOGRAPH:'E6CB:59083:禚
CJK UNIFIED IDEOGRAPH:'E6CC:59084:禡
CJK UNIFIED IDEOGRAPH:'E6CD:59085:禠
CJK UNIFIED IDEOGRAPH:'E6CE:59086:禜
CJK UNIFIED IDEOGRAPH:'E6CF:59087:禢
CJK UNIFIED IDEOGRAPH:'E6D0:59088:禛
CJK UNIFIED IDEOGRAPH:'E6D1:59089:歶
CJK UNIFIED IDEOGRAPH:'E6D2:59090:稹
CJK UNIFIED IDEOGRAPH:'E6D3:59091:窲
CJK UNIFIED IDEOGRAPH:'E6D4:59092:窴
CJK UNIFIED IDEOGRAPH:'E6D5:59093:窳
CJK UNIFIED IDEOGRAPH:'E6D6:59094:箷
CJK UNIFIED IDEOGRAPH:'E6D7:59095:篋
CJK UNIFIED IDEOGRAPH:'E6D8:59096:箾
CJK UNIFIED IDEOGRAPH:'E6D9:59097:箬
CJK UNIFIED IDEOGRAPH:'E6DA:59098:篎
CJK UNIFIED IDEOGRAPH:'E6DB:59099:箯
CJK UNIFIED IDEOGRAPH:'E6DC:59100:箹
CJK UNIFIED IDEOGRAPH:'E6DD:59101:篊
CJK UNIFIED IDEOGRAPH:'E6DE:59102:箵
CJK UNIFIED IDEOGRAPH:'E6DF:59103:糅
CJK UNIFIED IDEOGRAPH:'E6E0:59104:糈
CJK UNIFIED IDEOGRAPH:'E6E1:59105:糌
CJK UNIFIED IDEOGRAPH:'E6E2:59106:糋
CJK UNIFIED IDEOGRAPH:'E6E3:59107:緷
CJK UNIFIED IDEOGRAPH:'E6E4:59108:緛
CJK UNIFIED IDEOGRAPH:'E6E5:59109:緪
CJK UNIFIED IDEOGRAPH:'E6E6:59110:緧
CJK UNIFIED IDEOGRAPH:'E6E7:59111:緗
CJK UNIFIED IDEOGRAPH:'E6E8:59112:緡
CJK UNIFIED IDEOGRAPH:'E6E9:59113:縃
CJK UNIFIED IDEOGRAPH:'E6EA:59114:緺
CJK UNIFIED IDEOGRAPH:'E6EB:59115:緦
CJK UNIFIED IDEOGRAPH:'E6EC:59116:緶
CJK UNIFIED IDEOGRAPH:'E6ED:59117:緱
CJK UNIFIED IDEOGRAPH:'E6EE:59118:緰
CJK UNIFIED IDEOGRAPH:'E6EF:59119:緮
CJK UNIFIED IDEOGRAPH:'E6F0:59120:緟
CJK UNIFIED IDEOGRAPH:'E6F1:59121:罶
CJK UNIFIED IDEOGRAPH:'E6F2:59122:羬
CJK UNIFIED IDEOGRAPH:'E6F3:59123:羰
CJK UNIFIED IDEOGRAPH:'E6F4:59124:羭
CJK UNIFIED IDEOGRAPH:'E6F5:59125:翭
CJK UNIFIED IDEOGRAPH:'E6F6:59126:翫
CJK UNIFIED IDEOGRAPH:'E6F7:59127:翪
CJK UNIFIED IDEOGRAPH:'E6F8:59128:翬
CJK UNIFIED IDEOGRAPH:'E6F9:59129:翦
CJK UNIFIED IDEOGRAPH:'E6FA:59130:翨
CJK UNIFIED IDEOGRAPH:'E6FB:59131:聤
CJK UNIFIED IDEOGRAPH:'E6FC:59132:聧
CJK UNIFIED IDEOGRAPH:'E6FD:59133:膣
CJK UNIFIED IDEOGRAPH:'E6FE:59134:膟
CJK UNIFIED IDEOGRAPH:'E740:59200:膞
CJK UNIFIED IDEOGRAPH:'E741:59201:膕
CJK UNIFIED IDEOGRAPH:'E742:59202:膢
CJK UNIFIED IDEOGRAPH:'E743:59203:膙
CJK UNIFIED IDEOGRAPH:'E744:59204:膗
CJK UNIFIED IDEOGRAPH:'E745:59205:舖
CJK UNIFIED IDEOGRAPH:'E746:59206:艏
CJK UNIFIED IDEOGRAPH:'E747:59207:艓
CJK UNIFIED IDEOGRAPH:'E748:59208:艒
CJK UNIFIED IDEOGRAPH:'E749:59209:艐
CJK UNIFIED IDEOGRAPH:'E74A:59210:艎
CJK UNIFIED IDEOGRAPH:'E74B:59211:艑
CJK UNIFIED IDEOGRAPH:'E74C:59212:蔤
CJK UNIFIED IDEOGRAPH:'E74D:59213:蔻
CJK UNIFIED IDEOGRAPH:'E74E:59214:蔏
CJK UNIFIED IDEOGRAPH:'E74F:59215:蔀
CJK UNIFIED IDEOGRAPH:'E750:59216:蔩
CJK UNIFIED IDEOGRAPH:'E751:59217:蔎
CJK UNIFIED IDEOGRAPH:'E752:59218:蔉
CJK UNIFIED IDEOGRAPH:'E753:59219:蔍
CJK UNIFIED IDEOGRAPH:'E754:59220:蔟
CJK UNIFIED IDEOGRAPH:'E755:59221:蔊
CJK UNIFIED IDEOGRAPH:'E756:59222:蔧
CJK UNIFIED IDEOGRAPH:'E757:59223:蔜
CJK UNIFIED IDEOGRAPH:'E758:59224:蓻
CJK UNIFIED IDEOGRAPH:'E759:59225:蔫
CJK UNIFIED IDEOGRAPH:'E75A:59226:蓺
CJK UNIFIED IDEOGRAPH:'E75B:59227:蔈
CJK UNIFIED IDEOGRAPH:'E75C:59228:蔌
CJK UNIFIED IDEOGRAPH:'E75D:59229:蓴
CJK UNIFIED IDEOGRAPH:'E75E:59230:蔪
CJK UNIFIED IDEOGRAPH:'E75F:59231:蓲
CJK UNIFIED IDEOGRAPH:'E760:59232:蔕
CJK UNIFIED IDEOGRAPH:'E761:59233:蓷
CJK UNIFIED IDEOGRAPH:'E762:59234:蓫
CJK UNIFIED IDEOGRAPH:'E763:59235:蓳
CJK UNIFIED IDEOGRAPH:'E764:59236:蓼
CJK UNIFIED IDEOGRAPH:'E765:59237:蔒
CJK UNIFIED IDEOGRAPH:'E766:59238:蓪
CJK UNIFIED IDEOGRAPH:'E767:59239:蓩
CJK UNIFIED IDEOGRAPH:'E768:59240:蔖
CJK UNIFIED IDEOGRAPH:'E769:59241:蓾
CJK UNIFIED IDEOGRAPH:'E76A:59242:蔨
CJK UNIFIED IDEOGRAPH:'E76B:59243:蔝
CJK UNIFIED IDEOGRAPH:'E76C:59244:蔮
CJK UNIFIED IDEOGRAPH:'E76D:59245:蔂
CJK UNIFIED IDEOGRAPH:'E76E:59246:蓽
CJK UNIFIED IDEOGRAPH:'E76F:59247:蔞
CJK UNIFIED IDEOGRAPH:'E770:59248:蓶
CJK UNIFIED IDEOGRAPH:'E771:59249:蔱
CJK UNIFIED IDEOGRAPH:'E772:59250:蔦
CJK UNIFIED IDEOGRAPH:'E773:59251:蓧
CJK UNIFIED IDEOGRAPH:'E774:59252:蓨
CJK UNIFIED IDEOGRAPH:'E775:59253:蓰
CJK UNIFIED IDEOGRAPH:'E776:59254:蓯
CJK UNIFIED IDEOGRAPH:'E777:59255:蓹
CJK UNIFIED IDEOGRAPH:'E778:59256:蔘
CJK UNIFIED IDEOGRAPH:'E779:59257:蔠
CJK UNIFIED IDEOGRAPH:'E77A:59258:蔰
CJK UNIFIED IDEOGRAPH:'E77B:59259:蔋
CJK UNIFIED IDEOGRAPH:'E77C:59260:蔙
CJK UNIFIED IDEOGRAPH:'E77D:59261:蔯
CJK UNIFIED IDEOGRAPH:'E77E:59262:虢
CJK UNIFIED IDEOGRAPH:'E7A1:59297:蝖
CJK UNIFIED IDEOGRAPH:'E7A2:59298:蝣
CJK UNIFIED IDEOGRAPH:'E7A3:59299:蝤
CJK UNIFIED IDEOGRAPH:'E7A4:59300:蝷
CJK UNIFIED IDEOGRAPH:'E7A5:59301:蟡
CJK UNIFIED IDEOGRAPH:'E7A6:59302:蝳
CJK UNIFIED IDEOGRAPH:'E7A7:59303:蝘
CJK UNIFIED IDEOGRAPH:'E7A8:59304:蝔
CJK UNIFIED IDEOGRAPH:'E7A9:59305:蝛
CJK UNIFIED IDEOGRAPH:'E7AA:59306:蝒
CJK UNIFIED IDEOGRAPH:'E7AB:59307:蝡
CJK UNIFIED IDEOGRAPH:'E7AC:59308:蝚
CJK UNIFIED IDEOGRAPH:'E7AD:59309:蝑
CJK UNIFIED IDEOGRAPH:'E7AE:59310:蝞
CJK UNIFIED IDEOGRAPH:'E7AF:59311:蝭
CJK UNIFIED IDEOGRAPH:'E7B0:59312:蝪
CJK UNIFIED IDEOGRAPH:'E7B1:59313:蝐
CJK UNIFIED IDEOGRAPH:'E7B2:59314:蝎
CJK UNIFIED IDEOGRAPH:'E7B3:59315:蝟
CJK UNIFIED IDEOGRAPH:'E7B4:59316:蝝
CJK UNIFIED IDEOGRAPH:'E7B5:59317:蝯
CJK UNIFIED IDEOGRAPH:'E7B6:59318:蝬
CJK UNIFIED IDEOGRAPH:'E7B7:59319:蝺
CJK UNIFIED IDEOGRAPH:'E7B8:59320:蝮
CJK UNIFIED IDEOGRAPH:'E7B9:59321:蝜
CJK UNIFIED IDEOGRAPH:'E7BA:59322:蝥
CJK UNIFIED IDEOGRAPH:'E7BB:59323:蝏
CJK UNIFIED IDEOGRAPH:'E7BC:59324:蝻
CJK UNIFIED IDEOGRAPH:'E7BD:59325:蝵
CJK UNIFIED IDEOGRAPH:'E7BE:59326:蝢
CJK UNIFIED IDEOGRAPH:'E7BF:59327:蝧
CJK UNIFIED IDEOGRAPH:'E7C0:59328:蝩
CJK UNIFIED IDEOGRAPH:'E7C1:59329:衚
CJK UNIFIED IDEOGRAPH:'E7C2:59330:褅
CJK UNIFIED IDEOGRAPH:'E7C3:59331:褌
CJK UNIFIED IDEOGRAPH:'E7C4:59332:褔
CJK UNIFIED IDEOGRAPH:'E7C5:59333:褋
CJK UNIFIED IDEOGRAPH:'E7C6:59334:褗
CJK UNIFIED IDEOGRAPH:'E7C7:59335:褘
CJK UNIFIED IDEOGRAPH:'E7C8:59336:褙
CJK UNIFIED IDEOGRAPH:'E7C9:59337:褆
CJK UNIFIED IDEOGRAPH:'E7CA:59338:褖
CJK UNIFIED IDEOGRAPH:'E7CB:59339:褑
CJK UNIFIED IDEOGRAPH:'E7CC:59340:褎
CJK UNIFIED IDEOGRAPH:'E7CD:59341:褉
CJK UNIFIED IDEOGRAPH:'E7CE:59342:覢
CJK UNIFIED IDEOGRAPH:'E7CF:59343:覤
CJK UNIFIED IDEOGRAPH:'E7D0:59344:覣
CJK UNIFIED IDEOGRAPH:'E7D1:59345:觭
CJK UNIFIED IDEOGRAPH:'E7D2:59346:觰
CJK UNIFIED IDEOGRAPH:'E7D3:59347:觬
CJK UNIFIED IDEOGRAPH:'E7D4:59348:諏
CJK UNIFIED IDEOGRAPH:'E7D5:59349:諆
CJK UNIFIED IDEOGRAPH:'E7D6:59350:誸
CJK UNIFIED IDEOGRAPH:'E7D7:59351:諓
CJK UNIFIED IDEOGRAPH:'E7D8:59352:諑
CJK UNIFIED IDEOGRAPH:'E7D9:59353:諔
CJK UNIFIED IDEOGRAPH:'E7DA:59354:諕
CJK UNIFIED IDEOGRAPH:'E7DB:59355:誻
CJK UNIFIED IDEOGRAPH:'E7DC:59356:諗
CJK UNIFIED IDEOGRAPH:'E7DD:59357:誾
CJK UNIFIED IDEOGRAPH:'E7DE:59358:諀
CJK UNIFIED IDEOGRAPH:'E7DF:59359:諅
CJK UNIFIED IDEOGRAPH:'E7E0:59360:諘
CJK UNIFIED IDEOGRAPH:'E7E1:59361:諃
CJK UNIFIED IDEOGRAPH:'E7E2:59362:誺
CJK UNIFIED IDEOGRAPH:'E7E3:59363:誽
CJK UNIFIED IDEOGRAPH:'E7E4:59364:諙
CJK UNIFIED IDEOGRAPH:'E7E5:59365:谾
CJK UNIFIED IDEOGRAPH:'E7E6:59366:豍
CJK UNIFIED IDEOGRAPH:'E7E7:59367:貏
CJK UNIFIED IDEOGRAPH:'E7E8:59368:賥
CJK UNIFIED IDEOGRAPH:'E7E9:59369:賟
CJK UNIFIED IDEOGRAPH:'E7EA:59370:賙
CJK UNIFIED IDEOGRAPH:'E7EB:59371:賨
CJK UNIFIED IDEOGRAPH:'E7EC:59372:賚
CJK UNIFIED IDEOGRAPH:'E7ED:59373:賝
CJK UNIFIED IDEOGRAPH:'E7EE:59374:賧
CJK UNIFIED IDEOGRAPH:'E7EF:59375:趠
CJK UNIFIED IDEOGRAPH:'E7F0:59376:趜
CJK UNIFIED IDEOGRAPH:'E7F1:59377:趡
CJK UNIFIED IDEOGRAPH:'E7F2:59378:趛
CJK UNIFIED IDEOGRAPH:'E7F3:59379:踠
CJK UNIFIED IDEOGRAPH:'E7F4:59380:踣
CJK UNIFIED IDEOGRAPH:'E7F5:59381:踥
CJK UNIFIED IDEOGRAPH:'E7F6:59382:踤
CJK UNIFIED IDEOGRAPH:'E7F7:59383:踮
CJK UNIFIED IDEOGRAPH:'E7F8:59384:踕
CJK UNIFIED IDEOGRAPH:'E7F9:59385:踛
CJK UNIFIED IDEOGRAPH:'E7FA:59386:踖
CJK UNIFIED IDEOGRAPH:'E7FB:59387:踑
CJK UNIFIED IDEOGRAPH:'E7FC:59388:踙
CJK UNIFIED IDEOGRAPH:'E7FD:59389:踦
CJK UNIFIED IDEOGRAPH:'E7FE:59390:踧
CJK UNIFIED IDEOGRAPH:'E840:59456:踔
CJK UNIFIED IDEOGRAPH:'E841:59457:踒
CJK UNIFIED IDEOGRAPH:'E842:59458:踘
CJK UNIFIED IDEOGRAPH:'E843:59459:踓
CJK UNIFIED IDEOGRAPH:'E844:59460:踜
CJK UNIFIED IDEOGRAPH:'E845:59461:踗
CJK UNIFIED IDEOGRAPH:'E846:59462:踚
CJK UNIFIED IDEOGRAPH:'E847:59463:輬
CJK UNIFIED IDEOGRAPH:'E848:59464:輤
CJK UNIFIED IDEOGRAPH:'E849:59465:輘
CJK UNIFIED IDEOGRAPH:'E84A:59466:輚
CJK UNIFIED IDEOGRAPH:'E84B:59467:輠
CJK UNIFIED IDEOGRAPH:'E84C:59468:輣
CJK UNIFIED IDEOGRAPH:'E84D:59469:輖
CJK UNIFIED IDEOGRAPH:'E84E:59470:輗
CJK UNIFIED IDEOGRAPH:'E84F:59471:遳
CJK UNIFIED IDEOGRAPH:'E850:59472:遰
CJK UNIFIED IDEOGRAPH:'E851:59473:遯
CJK UNIFIED IDEOGRAPH:'E852:59474:遧
CJK UNIFIED IDEOGRAPH:'E853:59475:遫
CJK UNIFIED IDEOGRAPH:'E854:59476:鄯
CJK UNIFIED IDEOGRAPH:'E855:59477:鄫
CJK UNIFIED IDEOGRAPH:'E856:59478:鄩
CJK UNIFIED IDEOGRAPH:'E857:59479:鄪
CJK UNIFIED IDEOGRAPH:'E858:59480:鄲
CJK UNIFIED IDEOGRAPH:'E859:59481:鄦
CJK UNIFIED IDEOGRAPH:'E85A:59482:鄮
CJK UNIFIED IDEOGRAPH:'E85B:59483:醅
CJK UNIFIED IDEOGRAPH:'E85C:59484:醆
CJK UNIFIED IDEOGRAPH:'E85D:59485:醊
CJK UNIFIED IDEOGRAPH:'E85E:59486:醁
CJK UNIFIED IDEOGRAPH:'E85F:59487:醂
CJK UNIFIED IDEOGRAPH:'E860:59488:醄
CJK UNIFIED IDEOGRAPH:'E861:59489:醀
CJK UNIFIED IDEOGRAPH:'E862:59490:鋐
CJK UNIFIED IDEOGRAPH:'E863:59491:鋃
CJK UNIFIED IDEOGRAPH:'E864:59492:鋄
CJK UNIFIED IDEOGRAPH:'E865:59493:鋀
CJK UNIFIED IDEOGRAPH:'E866:59494:鋙
CJK UNIFIED IDEOGRAPH:'E867:59495:銶
CJK UNIFIED IDEOGRAPH:'E868:59496:鋏
CJK UNIFIED IDEOGRAPH:'E869:59497:鋱
CJK UNIFIED IDEOGRAPH:'E86A:59498:鋟
CJK UNIFIED IDEOGRAPH:'E86B:59499:鋘
CJK UNIFIED IDEOGRAPH:'E86C:59500:鋩
CJK UNIFIED IDEOGRAPH:'E86D:59501:鋗
CJK UNIFIED IDEOGRAPH:'E86E:59502:鋝
CJK UNIFIED IDEOGRAPH:'E86F:59503:鋌
CJK UNIFIED IDEOGRAPH:'E870:59504:鋯
CJK UNIFIED IDEOGRAPH:'E871:59505:鋂
CJK UNIFIED IDEOGRAPH:'E872:59506:鋨
CJK UNIFIED IDEOGRAPH:'E873:59507:鋊
CJK UNIFIED IDEOGRAPH:'E874:59508:鋈
CJK UNIFIED IDEOGRAPH:'E875:59509:鋎
CJK UNIFIED IDEOGRAPH:'E876:59510:鋦
CJK UNIFIED IDEOGRAPH:'E877:59511:鋍
CJK UNIFIED IDEOGRAPH:'E878:59512:鋕
CJK UNIFIED IDEOGRAPH:'E879:59513:鋉
CJK UNIFIED IDEOGRAPH:'E87A:59514:鋠
CJK UNIFIED IDEOGRAPH:'E87B:59515:鋞
CJK UNIFIED IDEOGRAPH:'E87C:59516:鋧
CJK UNIFIED IDEOGRAPH:'E87D:59517:鋑
CJK UNIFIED IDEOGRAPH:'E87E:59518:鋓
CJK UNIFIED IDEOGRAPH:'E8A1:59553:銵
CJK UNIFIED IDEOGRAPH:'E8A2:59554:鋡
CJK UNIFIED IDEOGRAPH:'E8A3:59555:鋆
CJK UNIFIED IDEOGRAPH:'E8A4:59556:銴
CJK UNIFIED IDEOGRAPH:'E8A5:59557:镼
CJK UNIFIED IDEOGRAPH:'E8A6:59558:閬
CJK UNIFIED IDEOGRAPH:'E8A7:59559:閫
CJK UNIFIED IDEOGRAPH:'E8A8:59560:閮
CJK UNIFIED IDEOGRAPH:'E8A9:59561:閰
CJK UNIFIED IDEOGRAPH:'E8AA:59562:隤
CJK UNIFIED IDEOGRAPH:'E8AB:59563:隢
CJK UNIFIED IDEOGRAPH:'E8AC:59564:雓
CJK UNIFIED IDEOGRAPH:'E8AD:59565:霅
CJK UNIFIED IDEOGRAPH:'E8AE:59566:霈
CJK UNIFIED IDEOGRAPH:'E8AF:59567:霂
CJK UNIFIED IDEOGRAPH:'E8B0:59568:靚
CJK UNIFIED IDEOGRAPH:'E8B1:59569:鞊
CJK UNIFIED IDEOGRAPH:'E8B2:59570:鞎
CJK UNIFIED IDEOGRAPH:'E8B3:59571:鞈
CJK UNIFIED IDEOGRAPH:'E8B4:59572:韐
CJK UNIFIED IDEOGRAPH:'E8B5:59573:韏
CJK UNIFIED IDEOGRAPH:'E8B6:59574:頞
CJK UNIFIED IDEOGRAPH:'E8B7:59575:頝
CJK UNIFIED IDEOGRAPH:'E8B8:59576:頦
CJK UNIFIED IDEOGRAPH:'E8B9:59577:頩
CJK UNIFIED IDEOGRAPH:'E8BA:59578:頨
CJK UNIFIED IDEOGRAPH:'E8BB:59579:頠
CJK UNIFIED IDEOGRAPH:'E8BC:59580:頛
CJK UNIFIED IDEOGRAPH:'E8BD:59581:頧
CJK UNIFIED IDEOGRAPH:'E8BE:59582:颲
CJK UNIFIED IDEOGRAPH:'E8BF:59583:餈
CJK UNIFIED IDEOGRAPH:'E8C0:59584:飺
CJK UNIFIED IDEOGRAPH:'E8C1:59585:餑
CJK UNIFIED IDEOGRAPH:'E8C2:59586:餔
CJK UNIFIED IDEOGRAPH:'E8C3:59587:餖
CJK UNIFIED IDEOGRAPH:'E8C4:59588:餗
CJK UNIFIED IDEOGRAPH:'E8C5:59589:餕
CJK UNIFIED IDEOGRAPH:'E8C6:59590:駜
CJK UNIFIED IDEOGRAPH:'E8C7:59591:駍
CJK UNIFIED IDEOGRAPH:'E8C8:59592:駏
CJK UNIFIED IDEOGRAPH:'E8C9:59593:駓
CJK UNIFIED IDEOGRAPH:'E8CA:59594:駔
CJK UNIFIED IDEOGRAPH:'E8CB:59595:駎
CJK UNIFIED IDEOGRAPH:'E8CC:59596:駉
CJK UNIFIED IDEOGRAPH:'E8CD:59597:駖
CJK UNIFIED IDEOGRAPH:'E8CE:59598:駘
CJK UNIFIED IDEOGRAPH:'E8CF:59599:駋
CJK UNIFIED IDEOGRAPH:'E8D0:59600:駗
CJK UNIFIED IDEOGRAPH:'E8D1:59601:駌
CJK UNIFIED IDEOGRAPH:'E8D2:59602:骳
CJK UNIFIED IDEOGRAPH:'E8D3:59603:髬
CJK UNIFIED IDEOGRAPH:'E8D4:59604:髫
CJK UNIFIED IDEOGRAPH:'E8D5:59605:髳
CJK UNIFIED IDEOGRAPH:'E8D6:59606:髲
CJK UNIFIED IDEOGRAPH:'E8D7:59607:髱
CJK UNIFIED IDEOGRAPH:'E8D8:59608:魆
CJK UNIFIED IDEOGRAPH:'E8D9:59609:魃
CJK UNIFIED IDEOGRAPH:'E8DA:59610:魧
CJK UNIFIED IDEOGRAPH:'E8DB:59611:魴
CJK UNIFIED IDEOGRAPH:'E8DC:59612:魱
CJK UNIFIED IDEOGRAPH:'E8DD:59613:魦
CJK UNIFIED IDEOGRAPH:'E8DE:59614:魶
CJK UNIFIED IDEOGRAPH:'E8DF:59615:魵
CJK UNIFIED IDEOGRAPH:'E8E0:59616:魰
CJK UNIFIED IDEOGRAPH:'E8E1:59617:魨
CJK UNIFIED IDEOGRAPH:'E8E2:59618:魤
CJK UNIFIED IDEOGRAPH:'E8E3:59619:魬
CJK UNIFIED IDEOGRAPH:'E8E4:59620:鳼
CJK UNIFIED IDEOGRAPH:'E8E5:59621:鳺
CJK UNIFIED IDEOGRAPH:'E8E6:59622:鳽
CJK UNIFIED IDEOGRAPH:'E8E7:59623:鳿
CJK UNIFIED IDEOGRAPH:'E8E8:59624:鳷
CJK UNIFIED IDEOGRAPH:'E8E9:59625:鴇
CJK UNIFIED IDEOGRAPH:'E8EA:59626:鴀
CJK UNIFIED IDEOGRAPH:'E8EB:59627:鳹
CJK UNIFIED IDEOGRAPH:'E8EC:59628:鳻
CJK UNIFIED IDEOGRAPH:'E8ED:59629:鴈
CJK UNIFIED IDEOGRAPH:'E8EE:59630:鴅
CJK UNIFIED IDEOGRAPH:'E8EF:59631:鴄
CJK UNIFIED IDEOGRAPH:'E8F0:59632:麃
CJK UNIFIED IDEOGRAPH:'E8F1:59633:黓
CJK UNIFIED IDEOGRAPH:'E8F2:59634:鼏
CJK UNIFIED IDEOGRAPH:'E8F3:59635:鼐
CJK UNIFIED IDEOGRAPH:'E8F4:59636:儜
CJK UNIFIED IDEOGRAPH:'E8F5:59637:儓
CJK UNIFIED IDEOGRAPH:'E8F6:59638:儗
CJK UNIFIED IDEOGRAPH:'E8F7:59639:儚
CJK UNIFIED IDEOGRAPH:'E8F8:59640:儑
CJK UNIFIED IDEOGRAPH:'E8F9:59641:凞
CJK UNIFIED IDEOGRAPH:'E8FA:59642:匴
CJK UNIFIED IDEOGRAPH:'E8FB:59643:叡
CJK UNIFIED IDEOGRAPH:'E8FC:59644:噰
CJK UNIFIED IDEOGRAPH:'E8FD:59645:噠
CJK UNIFIED IDEOGRAPH:'E8FE:59646:噮
CJK UNIFIED IDEOGRAPH:'E940:59712:噳
CJK UNIFIED IDEOGRAPH:'E941:59713:噦
CJK UNIFIED IDEOGRAPH:'E942:59714:噣
CJK UNIFIED IDEOGRAPH:'E943:59715:噭
CJK UNIFIED IDEOGRAPH:'E944:59716:噲
CJK UNIFIED IDEOGRAPH:'E945:59717:噞
CJK UNIFIED IDEOGRAPH:'E946:59718:噷
CJK UNIFIED IDEOGRAPH:'E947:59719:圜
CJK UNIFIED IDEOGRAPH:'E948:59720:圛
CJK UNIFIED IDEOGRAPH:'E949:59721:壈
CJK UNIFIED IDEOGRAPH:'E94A:59722:墽
CJK UNIFIED IDEOGRAPH:'E94B:59723:壉
CJK UNIFIED IDEOGRAPH:'E94C:59724:墿
CJK UNIFIED IDEOGRAPH:'E94D:59725:墺
CJK UNIFIED IDEOGRAPH:'E94E:59726:壂
CJK UNIFIED IDEOGRAPH:'E94F:59727:墼
CJK UNIFIED IDEOGRAPH:'E950:59728:壆
CJK UNIFIED IDEOGRAPH:'E951:59729:嬗
CJK UNIFIED IDEOGRAPH:'E952:59730:嬙
CJK UNIFIED IDEOGRAPH:'E953:59731:嬛
CJK UNIFIED IDEOGRAPH:'E954:59732:嬡
CJK UNIFIED IDEOGRAPH:'E955:59733:嬔
CJK UNIFIED IDEOGRAPH:'E956:59734:嬓
CJK UNIFIED IDEOGRAPH:'E957:59735:嬐
CJK UNIFIED IDEOGRAPH:'E958:59736:嬖
CJK UNIFIED IDEOGRAPH:'E959:59737:嬨
CJK UNIFIED IDEOGRAPH:'E95A:59738:嬚
CJK UNIFIED IDEOGRAPH:'E95B:59739:嬠
CJK UNIFIED IDEOGRAPH:'E95C:59740:嬞
CJK UNIFIED IDEOGRAPH:'E95D:59741:寯
CJK UNIFIED IDEOGRAPH:'E95E:59742:嶬
CJK UNIFIED IDEOGRAPH:'E95F:59743:嶱
CJK UNIFIED IDEOGRAPH:'E960:59744:嶩
CJK UNIFIED IDEOGRAPH:'E961:59745:嶧
CJK UNIFIED IDEOGRAPH:'E962:59746:嶵
CJK UNIFIED IDEOGRAPH:'E963:59747:嶰
CJK UNIFIED IDEOGRAPH:'E964:59748:嶮
CJK UNIFIED IDEOGRAPH:'E965:59749:嶪
CJK UNIFIED IDEOGRAPH:'E966:59750:嶨
CJK UNIFIED IDEOGRAPH:'E967:59751:嶲
CJK UNIFIED IDEOGRAPH:'E968:59752:嶭
CJK UNIFIED IDEOGRAPH:'E969:59753:嶯
CJK UNIFIED IDEOGRAPH:'E96A:59754:嶴
CJK UNIFIED IDEOGRAPH:'E96B:59755:幧
CJK UNIFIED IDEOGRAPH:'E96C:59756:幨
CJK UNIFIED IDEOGRAPH:'E96D:59757:幦
CJK UNIFIED IDEOGRAPH:'E96E:59758:幯
CJK UNIFIED IDEOGRAPH:'E96F:59759:廩
CJK UNIFIED IDEOGRAPH:'E970:59760:廧
CJK UNIFIED IDEOGRAPH:'E971:59761:廦
CJK UNIFIED IDEOGRAPH:'E972:59762:廨
CJK UNIFIED IDEOGRAPH:'E973:59763:廥
CJK UNIFIED IDEOGRAPH:'E974:59764:彋
CJK UNIFIED IDEOGRAPH:'E975:59765:徼
CJK UNIFIED IDEOGRAPH:'E976:59766:憝
CJK UNIFIED IDEOGRAPH:'E977:59767:憨
CJK UNIFIED IDEOGRAPH:'E978:59768:憖
CJK UNIFIED IDEOGRAPH:'E979:59769:懅
CJK UNIFIED IDEOGRAPH:'E97A:59770:憴
CJK UNIFIED IDEOGRAPH:'E97B:59771:懆
CJK UNIFIED IDEOGRAPH:'E97C:59772:懁
CJK UNIFIED IDEOGRAPH:'E97D:59773:懌
CJK UNIFIED IDEOGRAPH:'E97E:59774:憺
CJK UNIFIED IDEOGRAPH:'E9A1:59809:憿
CJK UNIFIED IDEOGRAPH:'E9A2:59810:憸
CJK UNIFIED IDEOGRAPH:'E9A3:59811:憌
CJK UNIFIED IDEOGRAPH:'E9A4:59812:擗
CJK UNIFIED IDEOGRAPH:'E9A5:59813:擖
CJK UNIFIED IDEOGRAPH:'E9A6:59814:擐
CJK UNIFIED IDEOGRAPH:'E9A7:59815:擏
CJK UNIFIED IDEOGRAPH:'E9A8:59816:擉
CJK UNIFIED IDEOGRAPH:'E9A9:59817:撽
CJK UNIFIED IDEOGRAPH:'E9AA:59818:撉
CJK UNIFIED IDEOGRAPH:'E9AB:59819:擃
CJK UNIFIED IDEOGRAPH:'E9AC:59820:擛
CJK UNIFIED IDEOGRAPH:'E9AD:59821:擳
CJK UNIFIED IDEOGRAPH:'E9AE:59822:擙
CJK UNIFIED IDEOGRAPH:'E9AF:59823:攳
CJK UNIFIED IDEOGRAPH:'E9B0:59824:敿
CJK UNIFIED IDEOGRAPH:'E9B1:59825:敼
CJK UNIFIED IDEOGRAPH:'E9B2:59826:斢
CJK UNIFIED IDEOGRAPH:'E9B3:59827:曈
CJK UNIFIED IDEOGRAPH:'E9B4:59828:暾
CJK UNIFIED IDEOGRAPH:'E9B5:59829:曀
CJK UNIFIED IDEOGRAPH:'E9B6:59830:曊
CJK UNIFIED IDEOGRAPH:'E9B7:59831:曋
CJK UNIFIED IDEOGRAPH:'E9B8:59832:曏
CJK UNIFIED IDEOGRAPH:'E9B9:59833:暽
CJK UNIFIED IDEOGRAPH:'E9BA:59834:暻
CJK UNIFIED IDEOGRAPH:'E9BB:59835:暺
CJK UNIFIED IDEOGRAPH:'E9BC:59836:曌
CJK UNIFIED IDEOGRAPH:'E9BD:59837:朣
CJK UNIFIED IDEOGRAPH:'E9BE:59838:樴
CJK UNIFIED IDEOGRAPH:'E9BF:59839:橦
CJK UNIFIED IDEOGRAPH:'E9C0:59840:橉
CJK UNIFIED IDEOGRAPH:'E9C1:59841:橧
CJK UNIFIED IDEOGRAPH:'E9C2:59842:樲
CJK UNIFIED IDEOGRAPH:'E9C3:59843:橨
CJK UNIFIED IDEOGRAPH:'E9C4:59844:樾
CJK UNIFIED IDEOGRAPH:'E9C5:59845:橝
CJK UNIFIED IDEOGRAPH:'E9C6:59846:橭
CJK UNIFIED IDEOGRAPH:'E9C7:59847:橶
CJK UNIFIED IDEOGRAPH:'E9C8:59848:橛
CJK UNIFIED IDEOGRAPH:'E9C9:59849:橑
CJK UNIFIED IDEOGRAPH:'E9CA:59850:樨
CJK UNIFIED IDEOGRAPH:'E9CB:59851:橚
CJK UNIFIED IDEOGRAPH:'E9CC:59852:樻
CJK UNIFIED IDEOGRAPH:'E9CD:59853:樿
CJK UNIFIED IDEOGRAPH:'E9CE:59854:橁
CJK UNIFIED IDEOGRAPH:'E9CF:59855:橪
CJK UNIFIED IDEOGRAPH:'E9D0:59856:橤
CJK UNIFIED IDEOGRAPH:'E9D1:59857:橐
CJK UNIFIED IDEOGRAPH:'E9D2:59858:橏
CJK UNIFIED IDEOGRAPH:'E9D3:59859:橔
CJK UNIFIED IDEOGRAPH:'E9D4:59860:橯
CJK UNIFIED IDEOGRAPH:'E9D5:59861:橩
CJK UNIFIED IDEOGRAPH:'E9D6:59862:橠
CJK UNIFIED IDEOGRAPH:'E9D7:59863:樼
CJK UNIFIED IDEOGRAPH:'E9D8:59864:橞
CJK UNIFIED IDEOGRAPH:'E9D9:59865:橖
CJK UNIFIED IDEOGRAPH:'E9DA:59866:橕
CJK UNIFIED IDEOGRAPH:'E9DB:59867:橍
CJK UNIFIED IDEOGRAPH:'E9DC:59868:橎
CJK UNIFIED IDEOGRAPH:'E9DD:59869:橆
CJK UNIFIED IDEOGRAPH:'E9DE:59870:歕
CJK UNIFIED IDEOGRAPH:'E9DF:59871:歔
CJK UNIFIED IDEOGRAPH:'E9E0:59872:歖
CJK UNIFIED IDEOGRAPH:'E9E1:59873:殧
CJK UNIFIED IDEOGRAPH:'E9E2:59874:殪
CJK UNIFIED IDEOGRAPH:'E9E3:59875:殫
CJK UNIFIED IDEOGRAPH:'E9E4:59876:毈
CJK UNIFIED IDEOGRAPH:'E9E5:59877:毇
CJK UNIFIED IDEOGRAPH:'E9E6:59878:氄
CJK UNIFIED IDEOGRAPH:'E9E7:59879:氃
CJK UNIFIED IDEOGRAPH:'E9E8:59880:氆
CJK UNIFIED IDEOGRAPH:'E9E9:59881:澭
CJK UNIFIED IDEOGRAPH:'E9EA:59882:濋
CJK UNIFIED IDEOGRAPH:'E9EB:59883:澣
CJK UNIFIED IDEOGRAPH:'E9EC:59884:濇
CJK UNIFIED IDEOGRAPH:'E9ED:59885:澼
CJK UNIFIED IDEOGRAPH:'E9EE:59886:濎
CJK UNIFIED IDEOGRAPH:'E9EF:59887:濈
CJK UNIFIED IDEOGRAPH:'E9F0:59888:潞
CJK UNIFIED IDEOGRAPH:'E9F1:59889:濄
CJK UNIFIED IDEOGRAPH:'E9F2:59890:澽
CJK UNIFIED IDEOGRAPH:'E9F3:59891:澞
CJK UNIFIED IDEOGRAPH:'E9F4:59892:濊
CJK UNIFIED IDEOGRAPH:'E9F5:59893:澨
CJK UNIFIED IDEOGRAPH:'E9F6:59894:瀄
CJK UNIFIED IDEOGRAPH:'E9F7:59895:澥
CJK UNIFIED IDEOGRAPH:'E9F8:59896:澮
CJK UNIFIED IDEOGRAPH:'E9F9:59897:澺
CJK UNIFIED IDEOGRAPH:'E9FA:59898:澬
CJK UNIFIED IDEOGRAPH:'E9FB:59899:澪
CJK UNIFIED IDEOGRAPH:'E9FC:59900:濏
CJK UNIFIED IDEOGRAPH:'E9FD:59901:澿
CJK UNIFIED IDEOGRAPH:'E9FE:59902:澸
CJK UNIFIED IDEOGRAPH:'EA40:59968:澢
CJK UNIFIED IDEOGRAPH:'EA41:59969:濉
CJK UNIFIED IDEOGRAPH:'EA42:59970:澫
CJK UNIFIED IDEOGRAPH:'EA43:59971:濍
CJK UNIFIED IDEOGRAPH:'EA44:59972:澯
CJK UNIFIED IDEOGRAPH:'EA45:59973:澲
CJK UNIFIED IDEOGRAPH:'EA46:59974:澰
CJK UNIFIED IDEOGRAPH:'EA47:59975:燅
CJK UNIFIED IDEOGRAPH:'EA48:59976:燂
CJK UNIFIED IDEOGRAPH:'EA49:59977:熿
CJK UNIFIED IDEOGRAPH:'EA4A:59978:熸
CJK UNIFIED IDEOGRAPH:'EA4B:59979:燖
CJK UNIFIED IDEOGRAPH:'EA4C:59980:燀
CJK UNIFIED IDEOGRAPH:'EA4D:59981:燁
CJK UNIFIED IDEOGRAPH:'EA4E:59982:燋
CJK UNIFIED IDEOGRAPH:'EA4F:59983:燔
CJK UNIFIED IDEOGRAPH:'EA50:59984:燊
CJK UNIFIED IDEOGRAPH:'EA51:59985:燇
CJK UNIFIED IDEOGRAPH:'EA52:59986:燏
CJK UNIFIED IDEOGRAPH:'EA53:59987:熽
CJK UNIFIED IDEOGRAPH:'EA54:59988:燘
CJK UNIFIED IDEOGRAPH:'EA55:59989:熼
CJK UNIFIED IDEOGRAPH:'EA56:59990:燆
CJK UNIFIED IDEOGRAPH:'EA57:59991:燚
CJK UNIFIED IDEOGRAPH:'EA58:59992:燛
CJK UNIFIED IDEOGRAPH:'EA59:59993:犝
CJK UNIFIED IDEOGRAPH:'EA5A:59994:犞
CJK UNIFIED IDEOGRAPH:'EA5B:59995:獩
CJK UNIFIED IDEOGRAPH:'EA5C:59996:獦
CJK UNIFIED IDEOGRAPH:'EA5D:59997:獧
CJK UNIFIED IDEOGRAPH:'EA5E:59998:獬
CJK UNIFIED IDEOGRAPH:'EA5F:59999:獥
CJK UNIFIED IDEOGRAPH:'EA60:60000:獫
CJK UNIFIED IDEOGRAPH:'EA61:60001:獪
CJK UNIFIED IDEOGRAPH:'EA62:60002:瑿
CJK UNIFIED IDEOGRAPH:'EA63:60003:璚
CJK UNIFIED IDEOGRAPH:'EA64:60004:璠
CJK UNIFIED IDEOGRAPH:'EA65:60005:璔
CJK UNIFIED IDEOGRAPH:'EA66:60006:璒
CJK UNIFIED IDEOGRAPH:'EA67:60007:璕
CJK UNIFIED IDEOGRAPH:'EA68:60008:璡
CJK UNIFIED IDEOGRAPH:'EA69:60009:甋
CJK UNIFIED IDEOGRAPH:'EA6A:60010:疀
CJK UNIFIED IDEOGRAPH:'EA6B:60011:瘯
CJK UNIFIED IDEOGRAPH:'EA6C:60012:瘭
CJK UNIFIED IDEOGRAPH:'EA6D:60013:瘱
CJK UNIFIED IDEOGRAPH:'EA6E:60014:瘽
CJK UNIFIED IDEOGRAPH:'EA6F:60015:瘳
CJK UNIFIED IDEOGRAPH:'EA70:60016:瘼
CJK UNIFIED IDEOGRAPH:'EA71:60017:瘵
CJK UNIFIED IDEOGRAPH:'EA72:60018:瘲
CJK UNIFIED IDEOGRAPH:'EA73:60019:瘰
CJK UNIFIED IDEOGRAPH:'EA74:60020:皻
CJK UNIFIED IDEOGRAPH:'EA75:60021:盦
CJK UNIFIED IDEOGRAPH:'EA76:60022:瞚
CJK UNIFIED IDEOGRAPH:'EA77:60023:瞝
CJK UNIFIED IDEOGRAPH:'EA78:60024:瞡
CJK UNIFIED IDEOGRAPH:'EA79:60025:瞜
CJK UNIFIED IDEOGRAPH:'EA7A:60026:瞛
CJK UNIFIED IDEOGRAPH:'EA7B:60027:瞢
CJK UNIFIED IDEOGRAPH:'EA7C:60028:瞣
CJK UNIFIED IDEOGRAPH:'EA7D:60029:瞕
CJK UNIFIED IDEOGRAPH:'EA7E:60030:瞙
CJK UNIFIED IDEOGRAPH:'EAA1:60065:瞗
CJK UNIFIED IDEOGRAPH:'EAA2:60066:磝
CJK UNIFIED IDEOGRAPH:'EAA3:60067:磩
CJK UNIFIED IDEOGRAPH:'EAA4:60068:磥
CJK UNIFIED IDEOGRAPH:'EAA5:60069:磪
CJK UNIFIED IDEOGRAPH:'EAA6:60070:磞
CJK UNIFIED IDEOGRAPH:'EAA7:60071:磣
CJK UNIFIED IDEOGRAPH:'EAA8:60072:磛
CJK UNIFIED IDEOGRAPH:'EAA9:60073:磡
CJK UNIFIED IDEOGRAPH:'EAAA:60074:磢
CJK UNIFIED IDEOGRAPH:'EAAB:60075:磭
CJK UNIFIED IDEOGRAPH:'EAAC:60076:磟
CJK UNIFIED IDEOGRAPH:'EAAD:60077:磠
CJK UNIFIED IDEOGRAPH:'EAAE:60078:禤
CJK UNIFIED IDEOGRAPH:'EAAF:60079:穄
CJK UNIFIED IDEOGRAPH:'EAB0:60080:穈
CJK UNIFIED IDEOGRAPH:'EAB1:60081:穇
CJK UNIFIED IDEOGRAPH:'EAB2:60082:窶
CJK UNIFIED IDEOGRAPH:'EAB3:60083:窸
CJK UNIFIED IDEOGRAPH:'EAB4:60084:窵
CJK UNIFIED IDEOGRAPH:'EAB5:60085:窱
CJK UNIFIED IDEOGRAPH:'EAB6:60086:窷
CJK UNIFIED IDEOGRAPH:'EAB7:60087:篞
CJK UNIFIED IDEOGRAPH:'EAB8:60088:篣
CJK UNIFIED IDEOGRAPH:'EAB9:60089:篧
CJK UNIFIED IDEOGRAPH:'EABA:60090:篝
CJK UNIFIED IDEOGRAPH:'EABB:60091:篕
CJK UNIFIED IDEOGRAPH:'EABC:60092:篥
CJK UNIFIED IDEOGRAPH:'EABD:60093:篚
CJK UNIFIED IDEOGRAPH:'EABE:60094:篨
CJK UNIFIED IDEOGRAPH:'EABF:60095:篹
CJK UNIFIED IDEOGRAPH:'EAC0:60096:篔
CJK UNIFIED IDEOGRAPH:'EAC1:60097:篪
CJK UNIFIED IDEOGRAPH:'EAC2:60098:篢
CJK UNIFIED IDEOGRAPH:'EAC3:60099:篜
CJK UNIFIED IDEOGRAPH:'EAC4:60100:篫
CJK UNIFIED IDEOGRAPH:'EAC5:60101:篘
CJK UNIFIED IDEOGRAPH:'EAC6:60102:篟
CJK UNIFIED IDEOGRAPH:'EAC7:60103:糒
CJK UNIFIED IDEOGRAPH:'EAC8:60104:糔
CJK UNIFIED IDEOGRAPH:'EAC9:60105:糗
CJK UNIFIED IDEOGRAPH:'EACA:60106:糐
CJK UNIFIED IDEOGRAPH:'EACB:60107:糑
CJK UNIFIED IDEOGRAPH:'EACC:60108:縒
CJK UNIFIED IDEOGRAPH:'EACD:60109:縡
CJK UNIFIED IDEOGRAPH:'EACE:60110:縗
CJK UNIFIED IDEOGRAPH:'EACF:60111:縌
CJK UNIFIED IDEOGRAPH:'EAD0:60112:縟
CJK UNIFIED IDEOGRAPH:'EAD1:60113:縠
CJK UNIFIED IDEOGRAPH:'EAD2:60114:縓
CJK UNIFIED IDEOGRAPH:'EAD3:60115:縎
CJK UNIFIED IDEOGRAPH:'EAD4:60116:縜
CJK UNIFIED IDEOGRAPH:'EAD5:60117:縕
CJK UNIFIED IDEOGRAPH:'EAD6:60118:縚
CJK UNIFIED IDEOGRAPH:'EAD7:60119:縢
CJK UNIFIED IDEOGRAPH:'EAD8:60120:縋
CJK UNIFIED IDEOGRAPH:'EAD9:60121:縏
CJK UNIFIED IDEOGRAPH:'EADA:60122:縖
CJK UNIFIED IDEOGRAPH:'EADB:60123:縍
CJK UNIFIED IDEOGRAPH:'EADC:60124:縔
CJK UNIFIED IDEOGRAPH:'EADD:60125:縥
CJK UNIFIED IDEOGRAPH:'EADE:60126:縤
CJK UNIFIED IDEOGRAPH:'EADF:60127:罃
CJK UNIFIED IDEOGRAPH:'EAE0:60128:罻
CJK UNIFIED IDEOGRAPH:'EAE1:60129:罼
CJK UNIFIED IDEOGRAPH:'EAE2:60130:罺
CJK UNIFIED IDEOGRAPH:'EAE3:60131:羱
CJK UNIFIED IDEOGRAPH:'EAE4:60132:翯
CJK UNIFIED IDEOGRAPH:'EAE5:60133:耪
CJK UNIFIED IDEOGRAPH:'EAE6:60134:耩
CJK UNIFIED IDEOGRAPH:'EAE7:60135:聬
CJK UNIFIED IDEOGRAPH:'EAE8:60136:膱
CJK UNIFIED IDEOGRAPH:'EAE9:60137:膦
CJK UNIFIED IDEOGRAPH:'EAEA:60138:膮
CJK UNIFIED IDEOGRAPH:'EAEB:60139:膹
CJK UNIFIED IDEOGRAPH:'EAEC:60140:膵
CJK UNIFIED IDEOGRAPH:'EAED:60141:膫
CJK UNIFIED IDEOGRAPH:'EAEE:60142:膰
CJK UNIFIED IDEOGRAPH:'EAEF:60143:膬
CJK UNIFIED IDEOGRAPH:'EAF0:60144:膴
CJK UNIFIED IDEOGRAPH:'EAF1:60145:膲
CJK UNIFIED IDEOGRAPH:'EAF2:60146:膷
CJK UNIFIED IDEOGRAPH:'EAF3:60147:膧
CJK UNIFIED IDEOGRAPH:'EAF4:60148:臲
CJK UNIFIED IDEOGRAPH:'EAF5:60149:艕
CJK UNIFIED IDEOGRAPH:'EAF6:60150:艖
CJK UNIFIED IDEOGRAPH:'EAF7:60151:艗
CJK UNIFIED IDEOGRAPH:'EAF8:60152:蕖
CJK UNIFIED IDEOGRAPH:'EAF9:60153:蕅
CJK UNIFIED IDEOGRAPH:'EAFA:60154:蕫
CJK UNIFIED IDEOGRAPH:'EAFB:60155:蕍
CJK UNIFIED IDEOGRAPH:'EAFC:60156:蕓
CJK UNIFIED IDEOGRAPH:'EAFD:60157:蕡
CJK UNIFIED IDEOGRAPH:'EAFE:60158:蕘
CJK UNIFIED IDEOGRAPH:'EB40:60224:蕀
CJK UNIFIED IDEOGRAPH:'EB41:60225:蕆
CJK UNIFIED IDEOGRAPH:'EB42:60226:蕤
CJK UNIFIED IDEOGRAPH:'EB43:60227:蕁
CJK UNIFIED IDEOGRAPH:'EB44:60228:蕢
CJK UNIFIED IDEOGRAPH:'EB45:60229:蕄
CJK UNIFIED IDEOGRAPH:'EB46:60230:蕑
CJK UNIFIED IDEOGRAPH:'EB47:60231:蕇
CJK UNIFIED IDEOGRAPH:'EB48:60232:蕣
CJK UNIFIED IDEOGRAPH:'EB49:60233:蔾
CJK UNIFIED IDEOGRAPH:'EB4A:60234:蕛
CJK UNIFIED IDEOGRAPH:'EB4B:60235:蕱
CJK UNIFIED IDEOGRAPH:'EB4C:60236:蕎
CJK UNIFIED IDEOGRAPH:'EB4D:60237:蕮
CJK UNIFIED IDEOGRAPH:'EB4E:60238:蕵
CJK UNIFIED IDEOGRAPH:'EB4F:60239:蕕
CJK UNIFIED IDEOGRAPH:'EB50:60240:蕧
CJK UNIFIED IDEOGRAPH:'EB51:60241:蕠
CJK UNIFIED IDEOGRAPH:'EB52:60242:薌
CJK UNIFIED IDEOGRAPH:'EB53:60243:蕦
CJK UNIFIED IDEOGRAPH:'EB54:60244:蕝
CJK UNIFIED IDEOGRAPH:'EB55:60245:蕔
CJK UNIFIED IDEOGRAPH:'EB56:60246:蕥
CJK UNIFIED IDEOGRAPH:'EB57:60247:蕬
CJK UNIFIED IDEOGRAPH:'EB58:60248:虣
CJK UNIFIED IDEOGRAPH:'EB59:60249:虥
CJK UNIFIED IDEOGRAPH:'EB5A:60250:虤
CJK UNIFIED IDEOGRAPH:'EB5B:60251:螛
CJK UNIFIED IDEOGRAPH:'EB5C:60252:螏
CJK UNIFIED IDEOGRAPH:'EB5D:60253:螗
CJK UNIFIED IDEOGRAPH:'EB5E:60254:螓
CJK UNIFIED IDEOGRAPH:'EB5F:60255:螒
CJK UNIFIED IDEOGRAPH:'EB60:60256:螈
CJK UNIFIED IDEOGRAPH:'EB61:60257:螁
CJK UNIFIED IDEOGRAPH:'EB62:60258:螖
CJK UNIFIED IDEOGRAPH:'EB63:60259:螘
CJK UNIFIED IDEOGRAPH:'EB64:60260:蝹
CJK UNIFIED IDEOGRAPH:'EB65:60261:螇
CJK UNIFIED IDEOGRAPH:'EB66:60262:螣
CJK UNIFIED IDEOGRAPH:'EB67:60263:螅
CJK UNIFIED IDEOGRAPH:'EB68:60264:螐
CJK UNIFIED IDEOGRAPH:'EB69:60265:螑
CJK UNIFIED IDEOGRAPH:'EB6A:60266:螝
CJK UNIFIED IDEOGRAPH:'EB6B:60267:螄
CJK UNIFIED IDEOGRAPH:'EB6C:60268:螔
CJK UNIFIED IDEOGRAPH:'EB6D:60269:螜
CJK UNIFIED IDEOGRAPH:'EB6E:60270:螚
CJK UNIFIED IDEOGRAPH:'EB6F:60271:螉
CJK UNIFIED IDEOGRAPH:'EB70:60272:褞
CJK UNIFIED IDEOGRAPH:'EB71:60273:褦
CJK UNIFIED IDEOGRAPH:'EB72:60274:褰
CJK UNIFIED IDEOGRAPH:'EB73:60275:褭
CJK UNIFIED IDEOGRAPH:'EB74:60276:褮
CJK UNIFIED IDEOGRAPH:'EB75:60277:褧
CJK UNIFIED IDEOGRAPH:'EB76:60278:褱
CJK UNIFIED IDEOGRAPH:'EB77:60279:褢
CJK UNIFIED IDEOGRAPH:'EB78:60280:褩
CJK UNIFIED IDEOGRAPH:'EB79:60281:褣
CJK UNIFIED IDEOGRAPH:'EB7A:60282:褯
CJK UNIFIED IDEOGRAPH:'EB7B:60283:褬
CJK UNIFIED IDEOGRAPH:'EB7C:60284:褟
CJK UNIFIED IDEOGRAPH:'EB7D:60285:觱
CJK UNIFIED IDEOGRAPH:'EB7E:60286:諠
CJK UNIFIED IDEOGRAPH:'EBA1:60321:諢
CJK UNIFIED IDEOGRAPH:'EBA2:60322:諲
CJK UNIFIED IDEOGRAPH:'EBA3:60323:諴
CJK UNIFIED IDEOGRAPH:'EBA4:60324:諵
CJK UNIFIED IDEOGRAPH:'EBA5:60325:諝
CJK UNIFIED IDEOGRAPH:'EBA6:60326:謔
CJK UNIFIED IDEOGRAPH:'EBA7:60327:諤
CJK UNIFIED IDEOGRAPH:'EBA8:60328:諟
CJK UNIFIED IDEOGRAPH:'EBA9:60329:諰
CJK UNIFIED IDEOGRAPH:'EBAA:60330:諈
CJK UNIFIED IDEOGRAPH:'EBAB:60331:諞
CJK UNIFIED IDEOGRAPH:'EBAC:60332:諡
CJK UNIFIED IDEOGRAPH:'EBAD:60333:諨
CJK UNIFIED IDEOGRAPH:'EBAE:60334:諿
CJK UNIFIED IDEOGRAPH:'EBAF:60335:諯
CJK UNIFIED IDEOGRAPH:'EBB0:60336:諻
CJK UNIFIED IDEOGRAPH:'EBB1:60337:貑
CJK UNIFIED IDEOGRAPH:'EBB2:60338:貒
CJK UNIFIED IDEOGRAPH:'EBB3:60339:貐
CJK UNIFIED IDEOGRAPH:'EBB4:60340:賵
CJK UNIFIED IDEOGRAPH:'EBB5:60341:賮
CJK UNIFIED IDEOGRAPH:'EBB6:60342:賱
CJK UNIFIED IDEOGRAPH:'EBB7:60343:賰
CJK UNIFIED IDEOGRAPH:'EBB8:60344:賳
CJK UNIFIED IDEOGRAPH:'EBB9:60345:赬
CJK UNIFIED IDEOGRAPH:'EBBA:60346:赮
CJK UNIFIED IDEOGRAPH:'EBBB:60347:趥
CJK UNIFIED IDEOGRAPH:'EBBC:60348:趧
CJK UNIFIED IDEOGRAPH:'EBBD:60349:踳
CJK UNIFIED IDEOGRAPH:'EBBE:60350:踾
CJK UNIFIED IDEOGRAPH:'EBBF:60351:踸
CJK UNIFIED IDEOGRAPH:'EBC0:60352:蹀
CJK UNIFIED IDEOGRAPH:'EBC1:60353:蹅
CJK UNIFIED IDEOGRAPH:'EBC2:60354:踶
CJK UNIFIED IDEOGRAPH:'EBC3:60355:踼
CJK UNIFIED IDEOGRAPH:'EBC4:60356:踽
CJK UNIFIED IDEOGRAPH:'EBC5:60357:蹁
CJK UNIFIED IDEOGRAPH:'EBC6:60358:踰
CJK UNIFIED IDEOGRAPH:'EBC7:60359:踿
CJK UNIFIED IDEOGRAPH:'EBC8:60360:躽
CJK UNIFIED IDEOGRAPH:'EBC9:60361:輶
CJK UNIFIED IDEOGRAPH:'EBCA:60362:輮
CJK UNIFIED IDEOGRAPH:'EBCB:60363:輵
CJK UNIFIED IDEOGRAPH:'EBCC:60364:輲
CJK UNIFIED IDEOGRAPH:'EBCD:60365:輹
CJK UNIFIED IDEOGRAPH:'EBCE:60366:輷
CJK UNIFIED IDEOGRAPH:'EBCF:60367:輴
CJK UNIFIED IDEOGRAPH:'EBD0:60368:遶
CJK UNIFIED IDEOGRAPH:'EBD1:60369:遹
CJK UNIFIED IDEOGRAPH:'EBD2:60370:遻
CJK UNIFIED IDEOGRAPH:'EBD3:60371:邆
CJK UNIFIED IDEOGRAPH:'EBD4:60372:郺
CJK UNIFIED IDEOGRAPH:'EBD5:60373:鄳
CJK UNIFIED IDEOGRAPH:'EBD6:60374:鄵
CJK UNIFIED IDEOGRAPH:'EBD7:60375:鄶
CJK UNIFIED IDEOGRAPH:'EBD8:60376:醓
CJK UNIFIED IDEOGRAPH:'EBD9:60377:醐
CJK UNIFIED IDEOGRAPH:'EBDA:60378:醑
CJK UNIFIED IDEOGRAPH:'EBDB:60379:醍
CJK UNIFIED IDEOGRAPH:'EBDC:60380:醏
CJK UNIFIED IDEOGRAPH:'EBDD:60381:錧
CJK UNIFIED IDEOGRAPH:'EBDE:60382:錞
CJK UNIFIED IDEOGRAPH:'EBDF:60383:錈
CJK UNIFIED IDEOGRAPH:'EBE0:60384:錟
CJK UNIFIED IDEOGRAPH:'EBE1:60385:錆
CJK UNIFIED IDEOGRAPH:'EBE2:60386:錏
CJK UNIFIED IDEOGRAPH:'EBE3:60387:鍺
CJK UNIFIED IDEOGRAPH:'EBE4:60388:錸
CJK UNIFIED IDEOGRAPH:'EBE5:60389:錼
CJK UNIFIED IDEOGRAPH:'EBE6:60390:錛
CJK UNIFIED IDEOGRAPH:'EBE7:60391:錣
CJK UNIFIED IDEOGRAPH:'EBE8:60392:錒
CJK UNIFIED IDEOGRAPH:'EBE9:60393:錁
CJK UNIFIED IDEOGRAPH:'EBEA:60394:鍆
CJK UNIFIED IDEOGRAPH:'EBEB:60395:錭
CJK UNIFIED IDEOGRAPH:'EBEC:60396:錎
CJK UNIFIED IDEOGRAPH:'EBED:60397:錍
CJK UNIFIED IDEOGRAPH:'EBEE:60398:鋋
CJK UNIFIED IDEOGRAPH:'EBEF:60399:錝
CJK UNIFIED IDEOGRAPH:'EBF0:60400:鋺
CJK UNIFIED IDEOGRAPH:'EBF1:60401:錥
CJK UNIFIED IDEOGRAPH:'EBF2:60402:錓
CJK UNIFIED IDEOGRAPH:'EBF3:60403:鋹
CJK UNIFIED IDEOGRAPH:'EBF4:60404:鋷
CJK UNIFIED IDEOGRAPH:'EBF5:60405:錴
CJK UNIFIED IDEOGRAPH:'EBF6:60406:錂
CJK UNIFIED IDEOGRAPH:'EBF7:60407:錤
CJK UNIFIED IDEOGRAPH:'EBF8:60408:鋿
CJK UNIFIED IDEOGRAPH:'EBF9:60409:錩
CJK UNIFIED IDEOGRAPH:'EBFA:60410:錹
CJK UNIFIED IDEOGRAPH:'EBFB:60411:錵
CJK UNIFIED IDEOGRAPH:'EBFC:60412:錪
CJK UNIFIED IDEOGRAPH:'EBFD:60413:錔
CJK UNIFIED IDEOGRAPH:'EBFE:60414:錌
CJK UNIFIED IDEOGRAPH:'EC40:60480:錋
CJK UNIFIED IDEOGRAPH:'EC41:60481:鋾
CJK UNIFIED IDEOGRAPH:'EC42:60482:錉
CJK UNIFIED IDEOGRAPH:'EC43:60483:錀
CJK UNIFIED IDEOGRAPH:'EC44:60484:鋻
CJK UNIFIED IDEOGRAPH:'EC45:60485:錖
CJK UNIFIED IDEOGRAPH:'EC46:60486:閼
CJK UNIFIED IDEOGRAPH:'EC47:60487:闍
CJK UNIFIED IDEOGRAPH:'EC48:60488:閾
CJK UNIFIED IDEOGRAPH:'EC49:60489:閹
CJK UNIFIED IDEOGRAPH:'EC4A:60490:閺
CJK UNIFIED IDEOGRAPH:'EC4B:60491:閶
CJK UNIFIED IDEOGRAPH:'EC4C:60492:閿
CJK UNIFIED IDEOGRAPH:'EC4D:60493:閵
CJK UNIFIED IDEOGRAPH:'EC4E:60494:閽
CJK UNIFIED IDEOGRAPH:'EC4F:60495:隩
CJK UNIFIED IDEOGRAPH:'EC50:60496:雔
CJK UNIFIED IDEOGRAPH:'EC51:60497:霋
CJK UNIFIED IDEOGRAPH:'EC52:60498:霒
CJK UNIFIED IDEOGRAPH:'EC53:60499:霐
CJK UNIFIED IDEOGRAPH:'EC54:60500:鞙
CJK UNIFIED IDEOGRAPH:'EC55:60501:鞗
CJK UNIFIED IDEOGRAPH:'EC56:60502:鞔
CJK UNIFIED IDEOGRAPH:'EC57:60503:韰
CJK UNIFIED IDEOGRAPH:'EC58:60504:韸
CJK UNIFIED IDEOGRAPH:'EC59:60505:頵
CJK UNIFIED IDEOGRAPH:'EC5A:60506:頯
CJK UNIFIED IDEOGRAPH:'EC5B:60507:頲
CJK UNIFIED IDEOGRAPH:'EC5C:60508:餤
CJK UNIFIED IDEOGRAPH:'EC5D:60509:餟
CJK UNIFIED IDEOGRAPH:'EC5E:60510:餧
CJK UNIFIED IDEOGRAPH:'EC5F:60511:餩
CJK UNIFIED IDEOGRAPH:'EC60:60512:馞
CJK UNIFIED IDEOGRAPH:'EC61:60513:駮
CJK UNIFIED IDEOGRAPH:'EC62:60514:駬
CJK UNIFIED IDEOGRAPH:'EC63:60515:駥
CJK UNIFIED IDEOGRAPH:'EC64:60516:駤
CJK UNIFIED IDEOGRAPH:'EC65:60517:駰
CJK UNIFIED IDEOGRAPH:'EC66:60518:駣
CJK UNIFIED IDEOGRAPH:'EC67:60519:駪
CJK UNIFIED IDEOGRAPH:'EC68:60520:駩
CJK UNIFIED IDEOGRAPH:'EC69:60521:駧
CJK UNIFIED IDEOGRAPH:'EC6A:60522:骹
CJK UNIFIED IDEOGRAPH:'EC6B:60523:骿
CJK UNIFIED IDEOGRAPH:'EC6C:60524:骴
CJK UNIFIED IDEOGRAPH:'EC6D:60525:骻
CJK UNIFIED IDEOGRAPH:'EC6E:60526:髶
CJK UNIFIED IDEOGRAPH:'EC6F:60527:髺
CJK UNIFIED IDEOGRAPH:'EC70:60528:髹
CJK UNIFIED IDEOGRAPH:'EC71:60529:髷
CJK UNIFIED IDEOGRAPH:'EC72:60530:鬳
CJK UNIFIED IDEOGRAPH:'EC73:60531:鮀
CJK UNIFIED IDEOGRAPH:'EC74:60532:鮅
CJK UNIFIED IDEOGRAPH:'EC75:60533:鮇
CJK UNIFIED IDEOGRAPH:'EC76:60534:魼
CJK UNIFIED IDEOGRAPH:'EC77:60535:魾
CJK UNIFIED IDEOGRAPH:'EC78:60536:魻
CJK UNIFIED IDEOGRAPH:'EC79:60537:鮂
CJK UNIFIED IDEOGRAPH:'EC7A:60538:鮓
CJK UNIFIED IDEOGRAPH:'EC7B:60539:鮒
CJK UNIFIED IDEOGRAPH:'EC7C:60540:鮐
CJK UNIFIED IDEOGRAPH:'EC7D:60541:魺
CJK UNIFIED IDEOGRAPH:'EC7E:60542:鮕
CJK UNIFIED IDEOGRAPH:'ECA1:60577:魽
CJK UNIFIED IDEOGRAPH:'ECA2:60578:鮈
CJK UNIFIED IDEOGRAPH:'ECA3:60579:鴥
CJK UNIFIED IDEOGRAPH:'ECA4:60580:鴗
CJK UNIFIED IDEOGRAPH:'ECA5:60581:鴠
CJK UNIFIED IDEOGRAPH:'ECA6:60582:鴞
CJK UNIFIED IDEOGRAPH:'ECA7:60583:鴔
CJK UNIFIED IDEOGRAPH:'ECA8:60584:鴩
CJK UNIFIED IDEOGRAPH:'ECA9:60585:鴝
CJK UNIFIED IDEOGRAPH:'ECAA:60586:鴘
CJK UNIFIED IDEOGRAPH:'ECAB:60587:鴢
CJK UNIFIED IDEOGRAPH:'ECAC:60588:鴐
CJK UNIFIED IDEOGRAPH:'ECAD:60589:鴙
CJK UNIFIED IDEOGRAPH:'ECAE:60590:鴟
CJK UNIFIED IDEOGRAPH:'ECAF:60591:麈
CJK UNIFIED IDEOGRAPH:'ECB0:60592:麆
CJK UNIFIED IDEOGRAPH:'ECB1:60593:麇
CJK UNIFIED IDEOGRAPH:'ECB2:60594:麮
CJK UNIFIED IDEOGRAPH:'ECB3:60595:麭
CJK UNIFIED IDEOGRAPH:'ECB4:60596:黕
CJK UNIFIED IDEOGRAPH:'ECB5:60597:黖
CJK UNIFIED IDEOGRAPH:'ECB6:60598:黺
CJK UNIFIED IDEOGRAPH:'ECB7:60599:鼒
CJK UNIFIED IDEOGRAPH:'ECB8:60600:鼽
CJK UNIFIED IDEOGRAPH:'ECB9:60601:儦
CJK UNIFIED IDEOGRAPH:'ECBA:60602:儥
CJK UNIFIED IDEOGRAPH:'ECBB:60603:儢
CJK UNIFIED IDEOGRAPH:'ECBC:60604:儤
CJK UNIFIED IDEOGRAPH:'ECBD:60605:儠
CJK UNIFIED IDEOGRAPH:'ECBE:60606:儩
CJK UNIFIED IDEOGRAPH:'ECBF:60607:勴
CJK UNIFIED IDEOGRAPH:'ECC0:60608:嚓
CJK UNIFIED IDEOGRAPH:'ECC1:60609:嚌
CJK UNIFIED IDEOGRAPH:'ECC2:60610:嚍
CJK UNIFIED IDEOGRAPH:'ECC3:60611:嚆
CJK UNIFIED IDEOGRAPH:'ECC4:60612:嚄
CJK UNIFIED IDEOGRAPH:'ECC5:60613:嚃
CJK UNIFIED IDEOGRAPH:'ECC6:60614:噾
CJK UNIFIED IDEOGRAPH:'ECC7:60615:嚂
CJK UNIFIED IDEOGRAPH:'ECC8:60616:噿
CJK UNIFIED IDEOGRAPH:'ECC9:60617:嚁
CJK UNIFIED IDEOGRAPH:'ECCA:60618:壖
CJK UNIFIED IDEOGRAPH:'ECCB:60619:壔
CJK UNIFIED IDEOGRAPH:'ECCC:60620:壏
CJK UNIFIED IDEOGRAPH:'ECCD:60621:壒
CJK UNIFIED IDEOGRAPH:'ECCE:60622:嬭
CJK UNIFIED IDEOGRAPH:'ECCF:60623:嬥
CJK UNIFIED IDEOGRAPH:'ECD0:60624:嬲
CJK UNIFIED IDEOGRAPH:'ECD1:60625:嬣
CJK UNIFIED IDEOGRAPH:'ECD2:60626:嬬
CJK UNIFIED IDEOGRAPH:'ECD3:60627:嬧
CJK UNIFIED IDEOGRAPH:'ECD4:60628:嬦
CJK UNIFIED IDEOGRAPH:'ECD5:60629:嬯
CJK UNIFIED IDEOGRAPH:'ECD6:60630:嬮
CJK UNIFIED IDEOGRAPH:'ECD7:60631:孻
CJK UNIFIED IDEOGRAPH:'ECD8:60632:寱
CJK UNIFIED IDEOGRAPH:'ECD9:60633:寲
CJK UNIFIED IDEOGRAPH:'ECDA:60634:嶷
CJK UNIFIED IDEOGRAPH:'ECDB:60635:幬
CJK UNIFIED IDEOGRAPH:'ECDC:60636:幪
CJK UNIFIED IDEOGRAPH:'ECDD:60637:徾
CJK UNIFIED IDEOGRAPH:'ECDE:60638:徻
CJK UNIFIED IDEOGRAPH:'ECDF:60639:懃
CJK UNIFIED IDEOGRAPH:'ECE0:60640:憵
CJK UNIFIED IDEOGRAPH:'ECE1:60641:憼
CJK UNIFIED IDEOGRAPH:'ECE2:60642:懧
CJK UNIFIED IDEOGRAPH:'ECE3:60643:懠
CJK UNIFIED IDEOGRAPH:'ECE4:60644:懥
CJK UNIFIED IDEOGRAPH:'ECE5:60645:懤
CJK UNIFIED IDEOGRAPH:'ECE6:60646:懨
CJK UNIFIED IDEOGRAPH:'ECE7:60647:懞
CJK UNIFIED IDEOGRAPH:'ECE8:60648:擯
CJK UNIFIED IDEOGRAPH:'ECE9:60649:擩
CJK UNIFIED IDEOGRAPH:'ECEA:60650:擣
CJK UNIFIED IDEOGRAPH:'ECEB:60651:擫
CJK UNIFIED IDEOGRAPH:'ECEC:60652:擤
CJK UNIFIED IDEOGRAPH:'ECED:60653:擨
CJK UNIFIED IDEOGRAPH:'ECEE:60654:斁
CJK UNIFIED IDEOGRAPH:'ECEF:60655:斀
CJK UNIFIED IDEOGRAPH:'ECF0:60656:斶
CJK UNIFIED IDEOGRAPH:'ECF1:60657:旚
CJK UNIFIED IDEOGRAPH:'ECF2:60658:曒
CJK UNIFIED IDEOGRAPH:'ECF3:60659:檍
CJK UNIFIED IDEOGRAPH:'ECF4:60660:檖
CJK UNIFIED IDEOGRAPH:'ECF5:60661:檁
CJK UNIFIED IDEOGRAPH:'ECF6:60662:檥
CJK UNIFIED IDEOGRAPH:'ECF7:60663:檉
CJK UNIFIED IDEOGRAPH:'ECF8:60664:檟
CJK UNIFIED IDEOGRAPH:'ECF9:60665:檛
CJK UNIFIED IDEOGRAPH:'ECFA:60666:檡
CJK UNIFIED IDEOGRAPH:'ECFB:60667:檞
CJK UNIFIED IDEOGRAPH:'ECFC:60668:檇
CJK UNIFIED IDEOGRAPH:'ECFD:60669:檓
CJK UNIFIED IDEOGRAPH:'ECFE:60670:檎
CJK UNIFIED IDEOGRAPH:'ED40:60736:檕
CJK UNIFIED IDEOGRAPH:'ED41:60737:檃
CJK UNIFIED IDEOGRAPH:'ED42:60738:檨
CJK UNIFIED IDEOGRAPH:'ED43:60739:檤
CJK UNIFIED IDEOGRAPH:'ED44:60740:檑
CJK UNIFIED IDEOGRAPH:'ED45:60741:橿
CJK UNIFIED IDEOGRAPH:'ED46:60742:檦
CJK UNIFIED IDEOGRAPH:'ED47:60743:檚
CJK UNIFIED IDEOGRAPH:'ED48:60744:檅
CJK UNIFIED IDEOGRAPH:'ED49:60745:檌
CJK UNIFIED IDEOGRAPH:'ED4A:60746:檒
CJK UNIFIED IDEOGRAPH:'ED4B:60747:歛
CJK UNIFIED IDEOGRAPH:'ED4C:60748:殭
CJK UNIFIED IDEOGRAPH:'ED4D:60749:氉
CJK UNIFIED IDEOGRAPH:'ED4E:60750:濌
CJK UNIFIED IDEOGRAPH:'ED4F:60751:澩
CJK UNIFIED IDEOGRAPH:'ED50:60752:濴
CJK UNIFIED IDEOGRAPH:'ED51:60753:濔
CJK UNIFIED IDEOGRAPH:'ED52:60754:濣
CJK UNIFIED IDEOGRAPH:'ED53:60755:濜
CJK UNIFIED IDEOGRAPH:'ED54:60756:濭
CJK UNIFIED IDEOGRAPH:'ED55:60757:濧
CJK UNIFIED IDEOGRAPH:'ED56:60758:濦
CJK UNIFIED IDEOGRAPH:'ED57:60759:濞
CJK UNIFIED IDEOGRAPH:'ED58:60760:濲
CJK UNIFIED IDEOGRAPH:'ED59:60761:濝
CJK UNIFIED IDEOGRAPH:'ED5A:60762:濢
CJK UNIFIED IDEOGRAPH:'ED5B:60763:濨
CJK UNIFIED IDEOGRAPH:'ED5C:60764:燡
CJK UNIFIED IDEOGRAPH:'ED5D:60765:燱
CJK UNIFIED IDEOGRAPH:'ED5E:60766:燨
CJK UNIFIED IDEOGRAPH:'ED5F:60767:燲
CJK UNIFIED IDEOGRAPH:'ED60:60768:燤
CJK UNIFIED IDEOGRAPH:'ED61:60769:燰
CJK UNIFIED IDEOGRAPH:'ED62:60770:燢
CJK UNIFIED IDEOGRAPH:'ED63:60771:獳
CJK UNIFIED IDEOGRAPH:'ED64:60772:獮
CJK UNIFIED IDEOGRAPH:'ED65:60773:獯
CJK UNIFIED IDEOGRAPH:'ED66:60774:璗
CJK UNIFIED IDEOGRAPH:'ED67:60775:璲
CJK UNIFIED IDEOGRAPH:'ED68:60776:璫
CJK UNIFIED IDEOGRAPH:'ED69:60777:璐
CJK UNIFIED IDEOGRAPH:'ED6A:60778:璪
CJK UNIFIED IDEOGRAPH:'ED6B:60779:璭
CJK UNIFIED IDEOGRAPH:'ED6C:60780:璱
CJK UNIFIED IDEOGRAPH:'ED6D:60781:璥
CJK UNIFIED IDEOGRAPH:'ED6E:60782:璯
CJK UNIFIED IDEOGRAPH:'ED6F:60783:甐
CJK UNIFIED IDEOGRAPH:'ED70:60784:甑
CJK UNIFIED IDEOGRAPH:'ED71:60785:甒
CJK UNIFIED IDEOGRAPH:'ED72:60786:甏
CJK UNIFIED IDEOGRAPH:'ED73:60787:疄
CJK UNIFIED IDEOGRAPH:'ED74:60788:癃
CJK UNIFIED IDEOGRAPH:'ED75:60789:癈
CJK UNIFIED IDEOGRAPH:'ED76:60790:癉
CJK UNIFIED IDEOGRAPH:'ED77:60791:癇
CJK UNIFIED IDEOGRAPH:'ED78:60792:皤
CJK UNIFIED IDEOGRAPH:'ED79:60793:盩
CJK UNIFIED IDEOGRAPH:'ED7A:60794:瞵
CJK UNIFIED IDEOGRAPH:'ED7B:60795:瞫
CJK UNIFIED IDEOGRAPH:'ED7C:60796:瞲
CJK UNIFIED IDEOGRAPH:'ED7D:60797:瞷
CJK UNIFIED IDEOGRAPH:'ED7E:60798:瞶
CJK UNIFIED IDEOGRAPH:'EDA1:60833:瞴
CJK UNIFIED IDEOGRAPH:'EDA2:60834:瞱
CJK UNIFIED IDEOGRAPH:'EDA3:60835:瞨
CJK UNIFIED IDEOGRAPH:'EDA4:60836:矰
CJK UNIFIED IDEOGRAPH:'EDA5:60837:磳
CJK UNIFIED IDEOGRAPH:'EDA6:60838:磽
CJK UNIFIED IDEOGRAPH:'EDA7:60839:礂
CJK UNIFIED IDEOGRAPH:'EDA8:60840:磻
CJK UNIFIED IDEOGRAPH:'EDA9:60841:磼
CJK UNIFIED IDEOGRAPH:'EDAA:60842:磲
CJK UNIFIED IDEOGRAPH:'EDAB:60843:礅
CJK UNIFIED IDEOGRAPH:'EDAC:60844:磹
CJK UNIFIED IDEOGRAPH:'EDAD:60845:磾
CJK UNIFIED IDEOGRAPH:'EDAE:60846:礄
CJK UNIFIED IDEOGRAPH:'EDAF:60847:禫
CJK UNIFIED IDEOGRAPH:'EDB0:60848:禨
CJK UNIFIED IDEOGRAPH:'EDB1:60849:穜
CJK UNIFIED IDEOGRAPH:'EDB2:60850:穛
CJK UNIFIED IDEOGRAPH:'EDB3:60851:穖
CJK UNIFIED IDEOGRAPH:'EDB4:60852:穘
CJK UNIFIED IDEOGRAPH:'EDB5:60853:穔
CJK UNIFIED IDEOGRAPH:'EDB6:60854:穚
CJK UNIFIED IDEOGRAPH:'EDB7:60855:窾
CJK UNIFIED IDEOGRAPH:'EDB8:60856:竀
CJK UNIFIED IDEOGRAPH:'EDB9:60857:竁
CJK UNIFIED IDEOGRAPH:'EDBA:60858:簅
CJK UNIFIED IDEOGRAPH:'EDBB:60859:簏
CJK UNIFIED IDEOGRAPH:'EDBC:60860:篲
CJK UNIFIED IDEOGRAPH:'EDBD:60861:簀
CJK UNIFIED IDEOGRAPH:'EDBE:60862:篿
CJK UNIFIED IDEOGRAPH:'EDBF:60863:篻
CJK UNIFIED IDEOGRAPH:'EDC0:60864:簎
CJK UNIFIED IDEOGRAPH:'EDC1:60865:篴
CJK UNIFIED IDEOGRAPH:'EDC2:60866:簋
CJK UNIFIED IDEOGRAPH:'EDC3:60867:篳
CJK UNIFIED IDEOGRAPH:'EDC4:60868:簂
CJK UNIFIED IDEOGRAPH:'EDC5:60869:簉
CJK UNIFIED IDEOGRAPH:'EDC6:60870:簃
CJK UNIFIED IDEOGRAPH:'EDC7:60871:簁
CJK UNIFIED IDEOGRAPH:'EDC8:60872:篸
CJK UNIFIED IDEOGRAPH:'EDC9:60873:篽
CJK UNIFIED IDEOGRAPH:'EDCA:60874:簆
CJK UNIFIED IDEOGRAPH:'EDCB:60875:篰
CJK UNIFIED IDEOGRAPH:'EDCC:60876:篱
CJK UNIFIED IDEOGRAPH:'EDCD:60877:簐
CJK UNIFIED IDEOGRAPH:'EDCE:60878:簊
CJK UNIFIED IDEOGRAPH:'EDCF:60879:糨
CJK UNIFIED IDEOGRAPH:'EDD0:60880:縭
CJK UNIFIED IDEOGRAPH:'EDD1:60881:縼
CJK UNIFIED IDEOGRAPH:'EDD2:60882:繂
CJK UNIFIED IDEOGRAPH:'EDD3:60883:縳
CJK UNIFIED IDEOGRAPH:'EDD4:60884:顈
CJK UNIFIED IDEOGRAPH:'EDD5:60885:縸
CJK UNIFIED IDEOGRAPH:'EDD6:60886:縪
CJK UNIFIED IDEOGRAPH:'EDD7:60887:繉
CJK UNIFIED IDEOGRAPH:'EDD8:60888:繀
CJK UNIFIED IDEOGRAPH:'EDD9:60889:繇
CJK UNIFIED IDEOGRAPH:'EDDA:60890:縩
CJK UNIFIED IDEOGRAPH:'EDDB:60891:繌
CJK UNIFIED IDEOGRAPH:'EDDC:60892:縰
CJK UNIFIED IDEOGRAPH:'EDDD:60893:縻
CJK UNIFIED IDEOGRAPH:'EDDE:60894:縶
CJK UNIFIED IDEOGRAPH:'EDDF:60895:繄
CJK UNIFIED IDEOGRAPH:'EDE0:60896:縺
CJK UNIFIED IDEOGRAPH:'EDE1:60897:罅
CJK UNIFIED IDEOGRAPH:'EDE2:60898:罿
CJK UNIFIED IDEOGRAPH:'EDE3:60899:罾
CJK UNIFIED IDEOGRAPH:'EDE4:60900:罽
CJK UNIFIED IDEOGRAPH:'EDE5:60901:翴
CJK UNIFIED IDEOGRAPH:'EDE6:60902:翲
CJK UNIFIED IDEOGRAPH:'EDE7:60903:耬
CJK UNIFIED IDEOGRAPH:'EDE8:60904:膻
CJK UNIFIED IDEOGRAPH:'EDE9:60905:臄
CJK UNIFIED IDEOGRAPH:'EDEA:60906:臌
CJK UNIFIED IDEOGRAPH:'EDEB:60907:臊
CJK UNIFIED IDEOGRAPH:'EDEC:60908:臅
CJK UNIFIED IDEOGRAPH:'EDED:60909:臇
CJK UNIFIED IDEOGRAPH:'EDEE:60910:膼
CJK UNIFIED IDEOGRAPH:'EDEF:60911:臩
CJK UNIFIED IDEOGRAPH:'EDF0:60912:艛
CJK UNIFIED IDEOGRAPH:'EDF1:60913:艚
CJK UNIFIED IDEOGRAPH:'EDF2:60914:艜
CJK UNIFIED IDEOGRAPH:'EDF3:60915:薃
CJK UNIFIED IDEOGRAPH:'EDF4:60916:薀
CJK UNIFIED IDEOGRAPH:'EDF5:60917:薏
CJK UNIFIED IDEOGRAPH:'EDF6:60918:薧
CJK UNIFIED IDEOGRAPH:'EDF7:60919:薕
CJK UNIFIED IDEOGRAPH:'EDF8:60920:薠
CJK UNIFIED IDEOGRAPH:'EDF9:60921:薋
CJK UNIFIED IDEOGRAPH:'EDFA:60922:薣
CJK UNIFIED IDEOGRAPH:'EDFB:60923:蕻
CJK UNIFIED IDEOGRAPH:'EDFC:60924:薤
CJK UNIFIED IDEOGRAPH:'EDFD:60925:薚
CJK UNIFIED IDEOGRAPH:'EDFE:60926:薞
CJK UNIFIED IDEOGRAPH:'EE40:60992:蕷
CJK UNIFIED IDEOGRAPH:'EE41:60993:蕼
CJK UNIFIED IDEOGRAPH:'EE42:60994:薉
CJK UNIFIED IDEOGRAPH:'EE43:60995:薡
CJK UNIFIED IDEOGRAPH:'EE44:60996:蕺
CJK UNIFIED IDEOGRAPH:'EE45:60997:蕸
CJK UNIFIED IDEOGRAPH:'EE46:60998:蕗
CJK UNIFIED IDEOGRAPH:'EE47:60999:薎
CJK UNIFIED IDEOGRAPH:'EE48:61000:薖
CJK UNIFIED IDEOGRAPH:'EE49:61001:薆
CJK UNIFIED IDEOGRAPH:'EE4A:61002:薍
CJK UNIFIED IDEOGRAPH:'EE4B:61003:薙
CJK UNIFIED IDEOGRAPH:'EE4C:61004:薝
CJK UNIFIED IDEOGRAPH:'EE4D:61005:薁
CJK UNIFIED IDEOGRAPH:'EE4E:61006:薢
CJK UNIFIED IDEOGRAPH:'EE4F:61007:薂
CJK UNIFIED IDEOGRAPH:'EE50:61008:薈
CJK UNIFIED IDEOGRAPH:'EE51:61009:薅
CJK UNIFIED IDEOGRAPH:'EE52:61010:蕹
CJK UNIFIED IDEOGRAPH:'EE53:61011:蕶
CJK UNIFIED IDEOGRAPH:'EE54:61012:薘
CJK UNIFIED IDEOGRAPH:'EE55:61013:薐
CJK UNIFIED IDEOGRAPH:'EE56:61014:薟
CJK UNIFIED IDEOGRAPH:'EE57:61015:虨
CJK UNIFIED IDEOGRAPH:'EE58:61016:螾
CJK UNIFIED IDEOGRAPH:'EE59:61017:螪
CJK UNIFIED IDEOGRAPH:'EE5A:61018:螭
CJK UNIFIED IDEOGRAPH:'EE5B:61019:蟅
CJK UNIFIED IDEOGRAPH:'EE5C:61020:螰
CJK UNIFIED IDEOGRAPH:'EE5D:61021:螬
CJK UNIFIED IDEOGRAPH:'EE5E:61022:螹
CJK UNIFIED IDEOGRAPH:'EE5F:61023:螵
CJK UNIFIED IDEOGRAPH:'EE60:61024:螼
CJK UNIFIED IDEOGRAPH:'EE61:61025:螮
CJK UNIFIED IDEOGRAPH:'EE62:61026:蟉
CJK UNIFIED IDEOGRAPH:'EE63:61027:蟃
CJK UNIFIED IDEOGRAPH:'EE64:61028:蟂
CJK UNIFIED IDEOGRAPH:'EE65:61029:蟌
CJK UNIFIED IDEOGRAPH:'EE66:61030:螷
CJK UNIFIED IDEOGRAPH:'EE67:61031:螯
CJK UNIFIED IDEOGRAPH:'EE68:61032:蟄
CJK UNIFIED IDEOGRAPH:'EE69:61033:蟊
CJK UNIFIED IDEOGRAPH:'EE6A:61034:螴
CJK UNIFIED IDEOGRAPH:'EE6B:61035:螶
CJK UNIFIED IDEOGRAPH:'EE6C:61036:螿
CJK UNIFIED IDEOGRAPH:'EE6D:61037:螸
CJK UNIFIED IDEOGRAPH:'EE6E:61038:螽
CJK UNIFIED IDEOGRAPH:'EE6F:61039:蟞
CJK UNIFIED IDEOGRAPH:'EE70:61040:螲
CJK UNIFIED IDEOGRAPH:'EE71:61041:褵
CJK UNIFIED IDEOGRAPH:'EE72:61042:褳
CJK UNIFIED IDEOGRAPH:'EE73:61043:褼
CJK UNIFIED IDEOGRAPH:'EE74:61044:褾
CJK UNIFIED IDEOGRAPH:'EE75:61045:襁
CJK UNIFIED IDEOGRAPH:'EE76:61046:襒
CJK UNIFIED IDEOGRAPH:'EE77:61047:褷
CJK UNIFIED IDEOGRAPH:'EE78:61048:襂
CJK UNIFIED IDEOGRAPH:'EE79:61049:覭
CJK UNIFIED IDEOGRAPH:'EE7A:61050:覯
CJK UNIFIED IDEOGRAPH:'EE7B:61051:覮
CJK UNIFIED IDEOGRAPH:'EE7C:61052:觲
CJK UNIFIED IDEOGRAPH:'EE7D:61053:觳
CJK UNIFIED IDEOGRAPH:'EE7E:61054:謞
CJK UNIFIED IDEOGRAPH:'EEA1:61089:謘
CJK UNIFIED IDEOGRAPH:'EEA2:61090:謖
CJK UNIFIED IDEOGRAPH:'EEA3:61091:謑
CJK UNIFIED IDEOGRAPH:'EEA4:61092:謅
CJK UNIFIED IDEOGRAPH:'EEA5:61093:謋
CJK UNIFIED IDEOGRAPH:'EEA6:61094:謢
CJK UNIFIED IDEOGRAPH:'EEA7:61095:謏
CJK UNIFIED IDEOGRAPH:'EEA8:61096:謒
CJK UNIFIED IDEOGRAPH:'EEA9:61097:謕
CJK UNIFIED IDEOGRAPH:'EEAA:61098:謇
CJK UNIFIED IDEOGRAPH:'EEAB:61099:謍
CJK UNIFIED IDEOGRAPH:'EEAC:61100:謈
CJK UNIFIED IDEOGRAPH:'EEAD:61101:謆
CJK UNIFIED IDEOGRAPH:'EEAE:61102:謜
CJK UNIFIED IDEOGRAPH:'EEAF:61103:謓
CJK UNIFIED IDEOGRAPH:'EEB0:61104:謚
CJK UNIFIED IDEOGRAPH:'EEB1:61105:豏
CJK UNIFIED IDEOGRAPH:'EEB2:61106:豰
CJK UNIFIED IDEOGRAPH:'EEB3:61107:豲
CJK UNIFIED IDEOGRAPH:'EEB4:61108:豱
CJK UNIFIED IDEOGRAPH:'EEB5:61109:豯
CJK UNIFIED IDEOGRAPH:'EEB6:61110:貕
CJK UNIFIED IDEOGRAPH:'EEB7:61111:貔
CJK UNIFIED IDEOGRAPH:'EEB8:61112:賹
CJK UNIFIED IDEOGRAPH:'EEB9:61113:赯
CJK UNIFIED IDEOGRAPH:'EEBA:61114:蹎
CJK UNIFIED IDEOGRAPH:'EEBB:61115:蹍
CJK UNIFIED IDEOGRAPH:'EEBC:61116:蹓
CJK UNIFIED IDEOGRAPH:'EEBD:61117:蹐
CJK UNIFIED IDEOGRAPH:'EEBE:61118:蹌
CJK UNIFIED IDEOGRAPH:'EEBF:61119:蹇
CJK UNIFIED IDEOGRAPH:'EEC0:61120:轃
CJK UNIFIED IDEOGRAPH:'EEC1:61121:轀
CJK UNIFIED IDEOGRAPH:'EEC2:61122:邅
CJK UNIFIED IDEOGRAPH:'EEC3:61123:遾
CJK UNIFIED IDEOGRAPH:'EEC4:61124:鄸
CJK UNIFIED IDEOGRAPH:'EEC5:61125:醚
CJK UNIFIED IDEOGRAPH:'EEC6:61126:醢
CJK UNIFIED IDEOGRAPH:'EEC7:61127:醛
CJK UNIFIED IDEOGRAPH:'EEC8:61128:醙
CJK UNIFIED IDEOGRAPH:'EEC9:61129:醟
CJK UNIFIED IDEOGRAPH:'EECA:61130:醡
CJK UNIFIED IDEOGRAPH:'EECB:61131:醝
CJK UNIFIED IDEOGRAPH:'EECC:61132:醠
CJK UNIFIED IDEOGRAPH:'EECD:61133:鎡
CJK UNIFIED IDEOGRAPH:'EECE:61134:鎃
CJK UNIFIED IDEOGRAPH:'EECF:61135:鎯
CJK UNIFIED IDEOGRAPH:'EED0:61136:鍤
CJK UNIFIED IDEOGRAPH:'EED1:61137:鍖
CJK UNIFIED IDEOGRAPH:'EED2:61138:鍇
CJK UNIFIED IDEOGRAPH:'EED3:61139:鍼
CJK UNIFIED IDEOGRAPH:'EED4:61140:鍘
CJK UNIFIED IDEOGRAPH:'EED5:61141:鍜
CJK UNIFIED IDEOGRAPH:'EED6:61142:鍶
CJK UNIFIED IDEOGRAPH:'EED7:61143:鍉
CJK UNIFIED IDEOGRAPH:'EED8:61144:鍐
CJK UNIFIED IDEOGRAPH:'EED9:61145:鍑
CJK UNIFIED IDEOGRAPH:'EEDA:61146:鍠
CJK UNIFIED IDEOGRAPH:'EEDB:61147:鍭
CJK UNIFIED IDEOGRAPH:'EEDC:61148:鎏
CJK UNIFIED IDEOGRAPH:'EEDD:61149:鍌
CJK UNIFIED IDEOGRAPH:'EEDE:61150:鍪
CJK UNIFIED IDEOGRAPH:'EEDF:61151:鍹
CJK UNIFIED IDEOGRAPH:'EEE0:61152:鍗
CJK UNIFIED IDEOGRAPH:'EEE1:61153:鍕
CJK UNIFIED IDEOGRAPH:'EEE2:61154:鍒
CJK UNIFIED IDEOGRAPH:'EEE3:61155:鍏
CJK UNIFIED IDEOGRAPH:'EEE4:61156:鍱
CJK UNIFIED IDEOGRAPH:'EEE5:61157:鍷
CJK UNIFIED IDEOGRAPH:'EEE6:61158:鍻
CJK UNIFIED IDEOGRAPH:'EEE7:61159:鍡
CJK UNIFIED IDEOGRAPH:'EEE8:61160:鍞
CJK UNIFIED IDEOGRAPH:'EEE9:61161:鍣
CJK UNIFIED IDEOGRAPH:'EEEA:61162:鍧
CJK UNIFIED IDEOGRAPH:'EEEB:61163:鎀
CJK UNIFIED IDEOGRAPH:'EEEC:61164:鍎
CJK UNIFIED IDEOGRAPH:'EEED:61165:鍙
CJK UNIFIED IDEOGRAPH:'EEEE:61166:闇
CJK UNIFIED IDEOGRAPH:'EEEF:61167:闀
CJK UNIFIED IDEOGRAPH:'EEF0:61168:闉
CJK UNIFIED IDEOGRAPH:'EEF1:61169:闃
CJK UNIFIED IDEOGRAPH:'EEF2:61170:闅
CJK UNIFIED IDEOGRAPH:'EEF3:61171:閷
CJK UNIFIED IDEOGRAPH:'EEF4:61172:隮
CJK UNIFIED IDEOGRAPH:'EEF5:61173:隰
CJK UNIFIED IDEOGRAPH:'EEF6:61174:隬
CJK UNIFIED IDEOGRAPH:'EEF7:61175:霠
CJK UNIFIED IDEOGRAPH:'EEF8:61176:霟
CJK UNIFIED IDEOGRAPH:'EEF9:61177:霘
CJK UNIFIED IDEOGRAPH:'EEFA:61178:霝
CJK UNIFIED IDEOGRAPH:'EEFB:61179:霙
CJK UNIFIED IDEOGRAPH:'EEFC:61180:鞚
CJK UNIFIED IDEOGRAPH:'EEFD:61181:鞡
CJK UNIFIED IDEOGRAPH:'EEFE:61182:鞜
CJK UNIFIED IDEOGRAPH:'EF40:61248:鞞
CJK UNIFIED IDEOGRAPH:'EF41:61249:鞝
CJK UNIFIED IDEOGRAPH:'EF42:61250:韕
CJK UNIFIED IDEOGRAPH:'EF43:61251:韔
CJK UNIFIED IDEOGRAPH:'EF44:61252:韱
CJK UNIFIED IDEOGRAPH:'EF45:61253:顁
CJK UNIFIED IDEOGRAPH:'EF46:61254:顄
CJK UNIFIED IDEOGRAPH:'EF47:61255:顊
CJK UNIFIED IDEOGRAPH:'EF48:61256:顉
CJK UNIFIED IDEOGRAPH:'EF49:61257:顅
CJK UNIFIED IDEOGRAPH:'EF4A:61258:顃
CJK UNIFIED IDEOGRAPH:'EF4B:61259:餥
CJK UNIFIED IDEOGRAPH:'EF4C:61260:餫
CJK UNIFIED IDEOGRAPH:'EF4D:61261:餬
CJK UNIFIED IDEOGRAPH:'EF4E:61262:餪
CJK UNIFIED IDEOGRAPH:'EF4F:61263:餳
CJK UNIFIED IDEOGRAPH:'EF50:61264:餲
CJK UNIFIED IDEOGRAPH:'EF51:61265:餯
CJK UNIFIED IDEOGRAPH:'EF52:61266:餭
CJK UNIFIED IDEOGRAPH:'EF53:61267:餱
CJK UNIFIED IDEOGRAPH:'EF54:61268:餰
CJK UNIFIED IDEOGRAPH:'EF55:61269:馘
CJK UNIFIED IDEOGRAPH:'EF56:61270:馣
CJK UNIFIED IDEOGRAPH:'EF57:61271:馡
CJK UNIFIED IDEOGRAPH:'EF58:61272:騂
CJK UNIFIED IDEOGRAPH:'EF59:61273:駺
CJK UNIFIED IDEOGRAPH:'EF5A:61274:駴
CJK UNIFIED IDEOGRAPH:'EF5B:61275:駷
CJK UNIFIED IDEOGRAPH:'EF5C:61276:駹
CJK UNIFIED IDEOGRAPH:'EF5D:61277:駸
CJK UNIFIED IDEOGRAPH:'EF5E:61278:駶
CJK UNIFIED IDEOGRAPH:'EF5F:61279:駻
CJK UNIFIED IDEOGRAPH:'EF60:61280:駽
CJK UNIFIED IDEOGRAPH:'EF61:61281:駾
CJK UNIFIED IDEOGRAPH:'EF62:61282:駼
CJK UNIFIED IDEOGRAPH:'EF63:61283:騃
CJK UNIFIED IDEOGRAPH:'EF64:61284:骾
CJK UNIFIED IDEOGRAPH:'EF65:61285:髾
CJK UNIFIED IDEOGRAPH:'EF66:61286:髽
CJK UNIFIED IDEOGRAPH:'EF67:61287:鬁
CJK UNIFIED IDEOGRAPH:'EF68:61288:髼
CJK UNIFIED IDEOGRAPH:'EF69:61289:魈
CJK UNIFIED IDEOGRAPH:'EF6A:61290:鮚
CJK UNIFIED IDEOGRAPH:'EF6B:61291:鮨
CJK UNIFIED IDEOGRAPH:'EF6C:61292:鮞
CJK UNIFIED IDEOGRAPH:'EF6D:61293:鮛
CJK UNIFIED IDEOGRAPH:'EF6E:61294:鮦
CJK UNIFIED IDEOGRAPH:'EF6F:61295:鮡
CJK UNIFIED IDEOGRAPH:'EF70:61296:鮥
CJK UNIFIED IDEOGRAPH:'EF71:61297:鮤
CJK UNIFIED IDEOGRAPH:'EF72:61298:鮆
CJK UNIFIED IDEOGRAPH:'EF73:61299:鮢
CJK UNIFIED IDEOGRAPH:'EF74:61300:鮠
CJK UNIFIED IDEOGRAPH:'EF75:61301:鮯
CJK UNIFIED IDEOGRAPH:'EF76:61302:鴳
CJK UNIFIED IDEOGRAPH:'EF77:61303:鵁
CJK UNIFIED IDEOGRAPH:'EF78:61304:鵧
CJK UNIFIED IDEOGRAPH:'EF79:61305:鴶
CJK UNIFIED IDEOGRAPH:'EF7A:61306:鴮
CJK UNIFIED IDEOGRAPH:'EF7B:61307:鴯
CJK UNIFIED IDEOGRAPH:'EF7C:61308:鴱
CJK UNIFIED IDEOGRAPH:'EF7D:61309:鴸
CJK UNIFIED IDEOGRAPH:'EF7E:61310:鴰
CJK UNIFIED IDEOGRAPH:'EFA1:61345:鵅
CJK UNIFIED IDEOGRAPH:'EFA2:61346:鵂
CJK UNIFIED IDEOGRAPH:'EFA3:61347:鵃
CJK UNIFIED IDEOGRAPH:'EFA4:61348:鴾
CJK UNIFIED IDEOGRAPH:'EFA5:61349:鴷
CJK UNIFIED IDEOGRAPH:'EFA6:61350:鵀
CJK UNIFIED IDEOGRAPH:'EFA7:61351:鴽
CJK UNIFIED IDEOGRAPH:'EFA8:61352:翵
CJK UNIFIED IDEOGRAPH:'EFA9:61353:鴭
CJK UNIFIED IDEOGRAPH:'EFAA:61354:麊
CJK UNIFIED IDEOGRAPH:'EFAB:61355:麉
CJK UNIFIED IDEOGRAPH:'EFAC:61356:麍
CJK UNIFIED IDEOGRAPH:'EFAD:61357:麰
CJK UNIFIED IDEOGRAPH:'EFAE:61358:黈
CJK UNIFIED IDEOGRAPH:'EFAF:61359:黚
CJK UNIFIED IDEOGRAPH:'EFB0:61360:黻
CJK UNIFIED IDEOGRAPH:'EFB1:61361:黿
CJK UNIFIED IDEOGRAPH:'EFB2:61362:鼤
CJK UNIFIED IDEOGRAPH:'EFB3:61363:鼣
CJK UNIFIED IDEOGRAPH:'EFB4:61364:鼢
CJK UNIFIED IDEOGRAPH:'EFB5:61365:齔
CJK UNIFIED IDEOGRAPH:'EFB6:61366:龠
CJK UNIFIED IDEOGRAPH:'EFB7:61367:儱
CJK UNIFIED IDEOGRAPH:'EFB8:61368:儭
CJK UNIFIED IDEOGRAPH:'EFB9:61369:儮
CJK UNIFIED IDEOGRAPH:'EFBA:61370:嚘
CJK UNIFIED IDEOGRAPH:'EFBB:61371:嚜
CJK UNIFIED IDEOGRAPH:'EFBC:61372:嚗
CJK UNIFIED IDEOGRAPH:'EFBD:61373:嚚
CJK UNIFIED IDEOGRAPH:'EFBE:61374:嚝
CJK UNIFIED IDEOGRAPH:'EFBF:61375:嚙
CJK UNIFIED IDEOGRAPH:'EFC0:61376:奰
CJK UNIFIED IDEOGRAPH:'EFC1:61377:嬼
CJK UNIFIED IDEOGRAPH:'EFC2:61378:屩
CJK UNIFIED IDEOGRAPH:'EFC3:61379:屪
CJK UNIFIED IDEOGRAPH:'EFC4:61380:巀
CJK UNIFIED IDEOGRAPH:'EFC5:61381:幭
CJK UNIFIED IDEOGRAPH:'EFC6:61382:幮
CJK UNIFIED IDEOGRAPH:'EFC7:61383:懘
CJK UNIFIED IDEOGRAPH:'EFC8:61384:懟
CJK UNIFIED IDEOGRAPH:'EFC9:61385:懭
CJK UNIFIED IDEOGRAPH:'EFCA:61386:懮
CJK UNIFIED IDEOGRAPH:'EFCB:61387:懱
CJK UNIFIED IDEOGRAPH:'EFCC:61388:懪
CJK UNIFIED IDEOGRAPH:'EFCD:61389:懰
CJK UNIFIED IDEOGRAPH:'EFCE:61390:懫
CJK UNIFIED IDEOGRAPH:'EFCF:61391:懖
CJK UNIFIED IDEOGRAPH:'EFD0:61392:懩
CJK UNIFIED IDEOGRAPH:'EFD1:61393:擿
CJK UNIFIED IDEOGRAPH:'EFD2:61394:攄
CJK UNIFIED IDEOGRAPH:'EFD3:61395:擽
CJK UNIFIED IDEOGRAPH:'EFD4:61396:擸
CJK UNIFIED IDEOGRAPH:'EFD5:61397:攁
CJK UNIFIED IDEOGRAPH:'EFD6:61398:攃
CJK UNIFIED IDEOGRAPH:'EFD7:61399:擼
CJK UNIFIED IDEOGRAPH:'EFD8:61400:斔
CJK UNIFIED IDEOGRAPH:'EFD9:61401:旛
CJK UNIFIED IDEOGRAPH:'EFDA:61402:曚
CJK UNIFIED IDEOGRAPH:'EFDB:61403:曛
CJK UNIFIED IDEOGRAPH:'EFDC:61404:曘
CJK UNIFIED IDEOGRAPH:'EFDD:61405:櫅
CJK UNIFIED IDEOGRAPH:'EFDE:61406:檹
CJK UNIFIED IDEOGRAPH:'EFDF:61407:檽
CJK UNIFIED IDEOGRAPH:'EFE0:61408:櫡
CJK UNIFIED IDEOGRAPH:'EFE1:61409:櫆
CJK UNIFIED IDEOGRAPH:'EFE2:61410:檺
CJK UNIFIED IDEOGRAPH:'EFE3:61411:檶
CJK UNIFIED IDEOGRAPH:'EFE4:61412:檷
CJK UNIFIED IDEOGRAPH:'EFE5:61413:櫇
CJK UNIFIED IDEOGRAPH:'EFE6:61414:檴
CJK UNIFIED IDEOGRAPH:'EFE7:61415:檭
CJK UNIFIED IDEOGRAPH:'EFE8:61416:歞
CJK UNIFIED IDEOGRAPH:'EFE9:61417:毉
CJK UNIFIED IDEOGRAPH:'EFEA:61418:氋
CJK UNIFIED IDEOGRAPH:'EFEB:61419:瀇
CJK UNIFIED IDEOGRAPH:'EFEC:61420:瀌
CJK UNIFIED IDEOGRAPH:'EFED:61421:瀍
CJK UNIFIED IDEOGRAPH:'EFEE:61422:瀁
CJK UNIFIED IDEOGRAPH:'EFEF:61423:瀅
CJK UNIFIED IDEOGRAPH:'EFF0:61424:瀔
CJK UNIFIED IDEOGRAPH:'EFF1:61425:瀎
CJK UNIFIED IDEOGRAPH:'EFF2:61426:濿
CJK UNIFIED IDEOGRAPH:'EFF3:61427:瀀
CJK UNIFIED IDEOGRAPH:'EFF4:61428:濻
CJK UNIFIED IDEOGRAPH:'EFF5:61429:瀦
CJK UNIFIED IDEOGRAPH:'EFF6:61430:濼
CJK UNIFIED IDEOGRAPH:'EFF7:61431:濷
CJK UNIFIED IDEOGRAPH:'EFF8:61432:瀊
CJK UNIFIED IDEOGRAPH:'EFF9:61433:爁
CJK UNIFIED IDEOGRAPH:'EFFA:61434:燿
CJK UNIFIED IDEOGRAPH:'EFFB:61435:燹
CJK UNIFIED IDEOGRAPH:'EFFC:61436:爃
CJK UNIFIED IDEOGRAPH:'EFFD:61437:燽
CJK UNIFIED IDEOGRAPH:'EFFE:61438:獶
CJK UNIFIED IDEOGRAPH:'F040:61504:璸
CJK UNIFIED IDEOGRAPH:'F041:61505:瓀
CJK UNIFIED IDEOGRAPH:'F042:61506:璵
CJK UNIFIED IDEOGRAPH:'F043:61507:瓁
CJK UNIFIED IDEOGRAPH:'F044:61508:璾
CJK UNIFIED IDEOGRAPH:'F045:61509:璶
CJK UNIFIED IDEOGRAPH:'F046:61510:璻
CJK UNIFIED IDEOGRAPH:'F047:61511:瓂
CJK UNIFIED IDEOGRAPH:'F048:61512:甔
CJK UNIFIED IDEOGRAPH:'F049:61513:甓
CJK UNIFIED IDEOGRAPH:'F04A:61514:癜
CJK UNIFIED IDEOGRAPH:'F04B:61515:癤
CJK UNIFIED IDEOGRAPH:'F04C:61516:癙
CJK UNIFIED IDEOGRAPH:'F04D:61517:癐
CJK UNIFIED IDEOGRAPH:'F04E:61518:癓
CJK UNIFIED IDEOGRAPH:'F04F:61519:癗
CJK UNIFIED IDEOGRAPH:'F050:61520:癚
CJK UNIFIED IDEOGRAPH:'F051:61521:皦
CJK UNIFIED IDEOGRAPH:'F052:61522:皽
CJK UNIFIED IDEOGRAPH:'F053:61523:盬
CJK UNIFIED IDEOGRAPH:'F054:61524:矂
CJK UNIFIED IDEOGRAPH:'F055:61525:瞺
CJK UNIFIED IDEOGRAPH:'F056:61526:磿
CJK UNIFIED IDEOGRAPH:'F057:61527:礌
CJK UNIFIED IDEOGRAPH:'F058:61528:礓
CJK UNIFIED IDEOGRAPH:'F059:61529:礔
CJK UNIFIED IDEOGRAPH:'F05A:61530:礉
CJK UNIFIED IDEOGRAPH:'F05B:61531:礐
CJK UNIFIED IDEOGRAPH:'F05C:61532:礒
CJK UNIFIED IDEOGRAPH:'F05D:61533:礑
CJK UNIFIED IDEOGRAPH:'F05E:61534:禭
CJK UNIFIED IDEOGRAPH:'F05F:61535:禬
CJK UNIFIED IDEOGRAPH:'F060:61536:穟
CJK UNIFIED IDEOGRAPH:'F061:61537:簜
CJK UNIFIED IDEOGRAPH:'F062:61538:簩
CJK UNIFIED IDEOGRAPH:'F063:61539:簙
CJK UNIFIED IDEOGRAPH:'F064:61540:簠
CJK UNIFIED IDEOGRAPH:'F065:61541:簟
CJK UNIFIED IDEOGRAPH:'F066:61542:簭
CJK UNIFIED IDEOGRAPH:'F067:61543:簝
CJK UNIFIED IDEOGRAPH:'F068:61544:簦
CJK UNIFIED IDEOGRAPH:'F069:61545:簨
CJK UNIFIED IDEOGRAPH:'F06A:61546:簢
CJK UNIFIED IDEOGRAPH:'F06B:61547:簥
CJK UNIFIED IDEOGRAPH:'F06C:61548:簰
CJK UNIFIED IDEOGRAPH:'F06D:61549:繜
CJK UNIFIED IDEOGRAPH:'F06E:61550:繐
CJK UNIFIED IDEOGRAPH:'F06F:61551:繖
CJK UNIFIED IDEOGRAPH:'F070:61552:繣
CJK UNIFIED IDEOGRAPH:'F071:61553:繘
CJK UNIFIED IDEOGRAPH:'F072:61554:繢
CJK UNIFIED IDEOGRAPH:'F073:61555:繟
CJK UNIFIED IDEOGRAPH:'F074:61556:繑
CJK UNIFIED IDEOGRAPH:'F075:61557:繠
CJK UNIFIED IDEOGRAPH:'F076:61558:繗
CJK UNIFIED IDEOGRAPH:'F077:61559:繓
CJK UNIFIED IDEOGRAPH:'F078:61560:羵
CJK UNIFIED IDEOGRAPH:'F079:61561:羳
CJK UNIFIED IDEOGRAPH:'F07A:61562:翷
CJK UNIFIED IDEOGRAPH:'F07B:61563:翸
CJK UNIFIED IDEOGRAPH:'F07C:61564:聵
CJK UNIFIED IDEOGRAPH:'F07D:61565:臑
CJK UNIFIED IDEOGRAPH:'F07E:61566:臒
CJK UNIFIED IDEOGRAPH:'F0A1:61601:臐
CJK UNIFIED IDEOGRAPH:'F0A2:61602:艟
CJK UNIFIED IDEOGRAPH:'F0A3:61603:艞
CJK UNIFIED IDEOGRAPH:'F0A4:61604:薴
CJK UNIFIED IDEOGRAPH:'F0A5:61605:藆
CJK UNIFIED IDEOGRAPH:'F0A6:61606:藀
CJK UNIFIED IDEOGRAPH:'F0A7:61607:藃
CJK UNIFIED IDEOGRAPH:'F0A8:61608:藂
CJK UNIFIED IDEOGRAPH:'F0A9:61609:薳
CJK UNIFIED IDEOGRAPH:'F0AA:61610:薵
CJK UNIFIED IDEOGRAPH:'F0AB:61611:薽
CJK UNIFIED IDEOGRAPH:'F0AC:61612:藇
CJK UNIFIED IDEOGRAPH:'F0AD:61613:藄
CJK UNIFIED IDEOGRAPH:'F0AE:61614:薿
CJK UNIFIED IDEOGRAPH:'F0AF:61615:藋
CJK UNIFIED IDEOGRAPH:'F0B0:61616:藎
CJK UNIFIED IDEOGRAPH:'F0B1:61617:藈
CJK UNIFIED IDEOGRAPH:'F0B2:61618:藅
CJK UNIFIED IDEOGRAPH:'F0B3:61619:薱
CJK UNIFIED IDEOGRAPH:'F0B4:61620:薶
CJK UNIFIED IDEOGRAPH:'F0B5:61621:藒
CJK UNIFIED IDEOGRAPH:'F0B6:61622:蘤
CJK UNIFIED IDEOGRAPH:'F0B7:61623:薸
CJK UNIFIED IDEOGRAPH:'F0B8:61624:薷
CJK UNIFIED IDEOGRAPH:'F0B9:61625:薾
CJK UNIFIED IDEOGRAPH:'F0BA:61626:虩
CJK UNIFIED IDEOGRAPH:'F0BB:61627:蟧
CJK UNIFIED IDEOGRAPH:'F0BC:61628:蟦
CJK UNIFIED IDEOGRAPH:'F0BD:61629:蟢
CJK UNIFIED IDEOGRAPH:'F0BE:61630:蟛
CJK UNIFIED IDEOGRAPH:'F0BF:61631:蟫
CJK UNIFIED IDEOGRAPH:'F0C0:61632:蟪
CJK UNIFIED IDEOGRAPH:'F0C1:61633:蟥
CJK UNIFIED IDEOGRAPH:'F0C2:61634:蟟
CJK UNIFIED IDEOGRAPH:'F0C3:61635:蟳
CJK UNIFIED IDEOGRAPH:'F0C4:61636:蟤
CJK UNIFIED IDEOGRAPH:'F0C5:61637:蟔
CJK UNIFIED IDEOGRAPH:'F0C6:61638:蟜
CJK UNIFIED IDEOGRAPH:'F0C7:61639:蟓
CJK UNIFIED IDEOGRAPH:'F0C8:61640:蟭
CJK UNIFIED IDEOGRAPH:'F0C9:61641:蟘
CJK UNIFIED IDEOGRAPH:'F0CA:61642:蟣
CJK UNIFIED IDEOGRAPH:'F0CB:61643:螤
CJK UNIFIED IDEOGRAPH:'F0CC:61644:蟗
CJK UNIFIED IDEOGRAPH:'F0CD:61645:蟙
CJK UNIFIED IDEOGRAPH:'F0CE:61646:蠁
CJK UNIFIED IDEOGRAPH:'F0CF:61647:蟴
CJK UNIFIED IDEOGRAPH:'F0D0:61648:蟨
CJK UNIFIED IDEOGRAPH:'F0D1:61649:蟝
CJK UNIFIED IDEOGRAPH:'F0D2:61650:襓
CJK UNIFIED IDEOGRAPH:'F0D3:61651:襋
CJK UNIFIED IDEOGRAPH:'F0D4:61652:襏
CJK UNIFIED IDEOGRAPH:'F0D5:61653:襌
CJK UNIFIED IDEOGRAPH:'F0D6:61654:襆
CJK UNIFIED IDEOGRAPH:'F0D7:61655:襐
CJK UNIFIED IDEOGRAPH:'F0D8:61656:襑
CJK UNIFIED IDEOGRAPH:'F0D9:61657:襉
CJK UNIFIED IDEOGRAPH:'F0DA:61658:謪
CJK UNIFIED IDEOGRAPH:'F0DB:61659:謧
CJK UNIFIED IDEOGRAPH:'F0DC:61660:謣
CJK UNIFIED IDEOGRAPH:'F0DD:61661:謳
CJK UNIFIED IDEOGRAPH:'F0DE:61662:謰
CJK UNIFIED IDEOGRAPH:'F0DF:61663:謵
CJK UNIFIED IDEOGRAPH:'F0E0:61664:譇
CJK UNIFIED IDEOGRAPH:'F0E1:61665:謯
CJK UNIFIED IDEOGRAPH:'F0E2:61666:謼
CJK UNIFIED IDEOGRAPH:'F0E3:61667:謾
CJK UNIFIED IDEOGRAPH:'F0E4:61668:謱
CJK UNIFIED IDEOGRAPH:'F0E5:61669:謥
CJK UNIFIED IDEOGRAPH:'F0E6:61670:謷
CJK UNIFIED IDEOGRAPH:'F0E7:61671:謦
CJK UNIFIED IDEOGRAPH:'F0E8:61672:謶
CJK UNIFIED IDEOGRAPH:'F0E9:61673:謮
CJK UNIFIED IDEOGRAPH:'F0EA:61674:謤
CJK UNIFIED IDEOGRAPH:'F0EB:61675:謻
CJK UNIFIED IDEOGRAPH:'F0EC:61676:謽
CJK UNIFIED IDEOGRAPH:'F0ED:61677:謺
CJK UNIFIED IDEOGRAPH:'F0EE:61678:豂
CJK UNIFIED IDEOGRAPH:'F0EF:61679:豵
CJK UNIFIED IDEOGRAPH:'F0F0:61680:貙
CJK UNIFIED IDEOGRAPH:'F0F1:61681:貘
CJK UNIFIED IDEOGRAPH:'F0F2:61682:貗
CJK UNIFIED IDEOGRAPH:'F0F3:61683:賾
CJK UNIFIED IDEOGRAPH:'F0F4:61684:贄
CJK UNIFIED IDEOGRAPH:'F0F5:61685:贂
CJK UNIFIED IDEOGRAPH:'F0F6:61686:贀
CJK UNIFIED IDEOGRAPH:'F0F7:61687:蹜
CJK UNIFIED IDEOGRAPH:'F0F8:61688:蹢
CJK UNIFIED IDEOGRAPH:'F0F9:61689:蹠
CJK UNIFIED IDEOGRAPH:'F0FA:61690:蹗
CJK UNIFIED IDEOGRAPH:'F0FB:61691:蹖
CJK UNIFIED IDEOGRAPH:'F0FC:61692:蹞
CJK UNIFIED IDEOGRAPH:'F0FD:61693:蹥
CJK UNIFIED IDEOGRAPH:'F0FE:61694:蹧
CJK UNIFIED IDEOGRAPH:'F140:61760:蹛
CJK UNIFIED IDEOGRAPH:'F141:61761:蹚
CJK UNIFIED IDEOGRAPH:'F142:61762:蹡
CJK UNIFIED IDEOGRAPH:'F143:61763:蹝
CJK UNIFIED IDEOGRAPH:'F144:61764:蹩
CJK UNIFIED IDEOGRAPH:'F145:61765:蹔
CJK UNIFIED IDEOGRAPH:'F146:61766:轆
CJK UNIFIED IDEOGRAPH:'F147:61767:轇
CJK UNIFIED IDEOGRAPH:'F148:61768:轈
CJK UNIFIED IDEOGRAPH:'F149:61769:轋
CJK UNIFIED IDEOGRAPH:'F14A:61770:鄨
CJK UNIFIED IDEOGRAPH:'F14B:61771:鄺
CJK UNIFIED IDEOGRAPH:'F14C:61772:鄻
CJK UNIFIED IDEOGRAPH:'F14D:61773:鄾
CJK UNIFIED IDEOGRAPH:'F14E:61774:醨
CJK UNIFIED IDEOGRAPH:'F14F:61775:醥
CJK UNIFIED IDEOGRAPH:'F150:61776:醧
CJK UNIFIED IDEOGRAPH:'F151:61777:醯
CJK UNIFIED IDEOGRAPH:'F152:61778:醪
CJK UNIFIED IDEOGRAPH:'F153:61779:鎵
CJK UNIFIED IDEOGRAPH:'F154:61780:鎌
CJK UNIFIED IDEOGRAPH:'F155:61781:鎒
CJK UNIFIED IDEOGRAPH:'F156:61782:鎷
CJK UNIFIED IDEOGRAPH:'F157:61783:鎛
CJK UNIFIED IDEOGRAPH:'F158:61784:鎝
CJK UNIFIED IDEOGRAPH:'F159:61785:鎉
CJK UNIFIED IDEOGRAPH:'F15A:61786:鎧
CJK UNIFIED IDEOGRAPH:'F15B:61787:鎎
CJK UNIFIED IDEOGRAPH:'F15C:61788:鎪
CJK UNIFIED IDEOGRAPH:'F15D:61789:鎞
CJK UNIFIED IDEOGRAPH:'F15E:61790:鎦
CJK UNIFIED IDEOGRAPH:'F15F:61791:鎕
CJK UNIFIED IDEOGRAPH:'F160:61792:鎈
CJK UNIFIED IDEOGRAPH:'F161:61793:鎙
CJK UNIFIED IDEOGRAPH:'F162:61794:鎟
CJK UNIFIED IDEOGRAPH:'F163:61795:鎍
CJK UNIFIED IDEOGRAPH:'F164:61796:鎱
CJK UNIFIED IDEOGRAPH:'F165:61797:鎑
CJK UNIFIED IDEOGRAPH:'F166:61798:鎲
CJK UNIFIED IDEOGRAPH:'F167:61799:鎤
CJK UNIFIED IDEOGRAPH:'F168:61800:鎨
CJK UNIFIED IDEOGRAPH:'F169:61801:鎴
CJK UNIFIED IDEOGRAPH:'F16A:61802:鎣
CJK UNIFIED IDEOGRAPH:'F16B:61803:鎥
CJK UNIFIED IDEOGRAPH:'F16C:61804:闒
CJK UNIFIED IDEOGRAPH:'F16D:61805:闓
CJK UNIFIED IDEOGRAPH:'F16E:61806:闑
CJK UNIFIED IDEOGRAPH:'F16F:61807:隳
CJK UNIFIED IDEOGRAPH:'F170:61808:雗
CJK UNIFIED IDEOGRAPH:'F171:61809:雚
CJK UNIFIED IDEOGRAPH:'F172:61810:巂
CJK UNIFIED IDEOGRAPH:'F173:61811:雟
CJK UNIFIED IDEOGRAPH:'F174:61812:雘
CJK UNIFIED IDEOGRAPH:'F175:61813:雝
CJK UNIFIED IDEOGRAPH:'F176:61814:霣
CJK UNIFIED IDEOGRAPH:'F177:61815:霢
CJK UNIFIED IDEOGRAPH:'F178:61816:霥
CJK UNIFIED IDEOGRAPH:'F179:61817:鞬
CJK UNIFIED IDEOGRAPH:'F17A:61818:鞮
CJK UNIFIED IDEOGRAPH:'F17B:61819:鞨
CJK UNIFIED IDEOGRAPH:'F17C:61820:鞫
CJK UNIFIED IDEOGRAPH:'F17D:61821:鞤
CJK UNIFIED IDEOGRAPH:'F17E:61822:鞪
CJK UNIFIED IDEOGRAPH:'F1A1:61857:鞢
CJK UNIFIED IDEOGRAPH:'F1A2:61858:鞥
CJK UNIFIED IDEOGRAPH:'F1A3:61859:韗
CJK UNIFIED IDEOGRAPH:'F1A4:61860:韙
CJK UNIFIED IDEOGRAPH:'F1A5:61861:韖
CJK UNIFIED IDEOGRAPH:'F1A6:61862:韘
CJK UNIFIED IDEOGRAPH:'F1A7:61863:韺
CJK UNIFIED IDEOGRAPH:'F1A8:61864:顐
CJK UNIFIED IDEOGRAPH:'F1A9:61865:顑
CJK UNIFIED IDEOGRAPH:'F1AA:61866:顒
CJK UNIFIED IDEOGRAPH:'F1AB:61867:颸
CJK UNIFIED IDEOGRAPH:'F1AC:61868:饁
CJK UNIFIED IDEOGRAPH:'F1AD:61869:餼
CJK UNIFIED IDEOGRAPH:'F1AE:61870:餺
CJK UNIFIED IDEOGRAPH:'F1AF:61871:騏
CJK UNIFIED IDEOGRAPH:'F1B0:61872:騋
CJK UNIFIED IDEOGRAPH:'F1B1:61873:騉
CJK UNIFIED IDEOGRAPH:'F1B2:61874:騍
CJK UNIFIED IDEOGRAPH:'F1B3:61875:騄
CJK UNIFIED IDEOGRAPH:'F1B4:61876:騑
CJK UNIFIED IDEOGRAPH:'F1B5:61877:騊
CJK UNIFIED IDEOGRAPH:'F1B6:61878:騅
CJK UNIFIED IDEOGRAPH:'F1B7:61879:騇
CJK UNIFIED IDEOGRAPH:'F1B8:61880:騆
CJK UNIFIED IDEOGRAPH:'F1B9:61881:髀
CJK UNIFIED IDEOGRAPH:'F1BA:61882:髜
CJK UNIFIED IDEOGRAPH:'F1BB:61883:鬈
CJK UNIFIED IDEOGRAPH:'F1BC:61884:鬄
CJK UNIFIED IDEOGRAPH:'F1BD:61885:鬅
CJK UNIFIED IDEOGRAPH:'F1BE:61886:鬩
CJK UNIFIED IDEOGRAPH:'F1BF:61887:鬵
CJK UNIFIED IDEOGRAPH:'F1C0:61888:魊
CJK UNIFIED IDEOGRAPH:'F1C1:61889:魌
CJK UNIFIED IDEOGRAPH:'F1C2:61890:魋
CJK UNIFIED IDEOGRAPH:'F1C3:61891:鯇
CJK UNIFIED IDEOGRAPH:'F1C4:61892:鯆
CJK UNIFIED IDEOGRAPH:'F1C5:61893:鯃
CJK UNIFIED IDEOGRAPH:'F1C6:61894:鮿
CJK UNIFIED IDEOGRAPH:'F1C7:61895:鯁
CJK UNIFIED IDEOGRAPH:'F1C8:61896:鮵
CJK UNIFIED IDEOGRAPH:'F1C9:61897:鮸
CJK UNIFIED IDEOGRAPH:'F1CA:61898:鯓
CJK UNIFIED IDEOGRAPH:'F1CB:61899:鮶
CJK UNIFIED IDEOGRAPH:'F1CC:61900:鯄
CJK UNIFIED IDEOGRAPH:'F1CD:61901:鮹
CJK UNIFIED IDEOGRAPH:'F1CE:61902:鮽
CJK UNIFIED IDEOGRAPH:'F1CF:61903:鵜
CJK UNIFIED IDEOGRAPH:'F1D0:61904:鵓
CJK UNIFIED IDEOGRAPH:'F1D1:61905:鵏
CJK UNIFIED IDEOGRAPH:'F1D2:61906:鵊
CJK UNIFIED IDEOGRAPH:'F1D3:61907:鵛
CJK UNIFIED IDEOGRAPH:'F1D4:61908:鵋
CJK UNIFIED IDEOGRAPH:'F1D5:61909:鵙
CJK UNIFIED IDEOGRAPH:'F1D6:61910:鵖
CJK UNIFIED IDEOGRAPH:'F1D7:61911:鵌
CJK UNIFIED IDEOGRAPH:'F1D8:61912:鵗
CJK UNIFIED IDEOGRAPH:'F1D9:61913:鵒
CJK UNIFIED IDEOGRAPH:'F1DA:61914:鵔
CJK UNIFIED IDEOGRAPH:'F1DB:61915:鵟
CJK UNIFIED IDEOGRAPH:'F1DC:61916:鵘
CJK UNIFIED IDEOGRAPH:'F1DD:61917:鵚
CJK UNIFIED IDEOGRAPH:'F1DE:61918:麎
CJK UNIFIED IDEOGRAPH:'F1DF:61919:麌
CJK UNIFIED IDEOGRAPH:'F1E0:61920:黟
CJK UNIFIED IDEOGRAPH:'F1E1:61921:鼁
CJK UNIFIED IDEOGRAPH:'F1E2:61922:鼀
CJK UNIFIED IDEOGRAPH:'F1E3:61923:鼖
CJK UNIFIED IDEOGRAPH:'F1E4:61924:鼥
CJK UNIFIED IDEOGRAPH:'F1E5:61925:鼫
CJK UNIFIED IDEOGRAPH:'F1E6:61926:鼪
CJK UNIFIED IDEOGRAPH:'F1E7:61927:鼩
CJK UNIFIED IDEOGRAPH:'F1E8:61928:鼨
CJK UNIFIED IDEOGRAPH:'F1E9:61929:齌
CJK UNIFIED IDEOGRAPH:'F1EA:61930:齕
CJK UNIFIED IDEOGRAPH:'F1EB:61931:儴
CJK UNIFIED IDEOGRAPH:'F1EC:61932:儵
CJK UNIFIED IDEOGRAPH:'F1ED:61933:劖
CJK UNIFIED IDEOGRAPH:'F1EE:61934:勷
CJK UNIFIED IDEOGRAPH:'F1EF:61935:厴
CJK UNIFIED IDEOGRAPH:'F1F0:61936:嚫
CJK UNIFIED IDEOGRAPH:'F1F1:61937:嚭
CJK UNIFIED IDEOGRAPH:'F1F2:61938:嚦
CJK UNIFIED IDEOGRAPH:'F1F3:61939:嚧
CJK UNIFIED IDEOGRAPH:'F1F4:61940:嚪
CJK UNIFIED IDEOGRAPH:'F1F5:61941:嚬
CJK UNIFIED IDEOGRAPH:'F1F6:61942:壚
CJK UNIFIED IDEOGRAPH:'F1F7:61943:壝
CJK UNIFIED IDEOGRAPH:'F1F8:61944:壛
CJK UNIFIED IDEOGRAPH:'F1F9:61945:夒
CJK UNIFIED IDEOGRAPH:'F1FA:61946:嬽
CJK UNIFIED IDEOGRAPH:'F1FB:61947:嬾
CJK UNIFIED IDEOGRAPH:'F1FC:61948:嬿
CJK UNIFIED IDEOGRAPH:'F1FD:61949:巃
CJK UNIFIED IDEOGRAPH:'F1FE:61950:幰
CJK UNIFIED IDEOGRAPH:'F240:62016:徿
CJK UNIFIED IDEOGRAPH:'F241:62017:懻
CJK UNIFIED IDEOGRAPH:'F242:62018:攇
CJK UNIFIED IDEOGRAPH:'F243:62019:攐
CJK UNIFIED IDEOGRAPH:'F244:62020:攍
CJK UNIFIED IDEOGRAPH:'F245:62021:攉
CJK UNIFIED IDEOGRAPH:'F246:62022:攌
CJK UNIFIED IDEOGRAPH:'F247:62023:攎
CJK UNIFIED IDEOGRAPH:'F248:62024:斄
CJK UNIFIED IDEOGRAPH:'F249:62025:旞
CJK UNIFIED IDEOGRAPH:'F24A:62026:旝
CJK UNIFIED IDEOGRAPH:'F24B:62027:曞
CJK UNIFIED IDEOGRAPH:'F24C:62028:櫧
CJK UNIFIED IDEOGRAPH:'F24D:62029:櫠
CJK UNIFIED IDEOGRAPH:'F24E:62030:櫌
CJK UNIFIED IDEOGRAPH:'F24F:62031:櫑
CJK UNIFIED IDEOGRAPH:'F250:62032:櫙
CJK UNIFIED IDEOGRAPH:'F251:62033:櫋
CJK UNIFIED IDEOGRAPH:'F252:62034:櫟
CJK UNIFIED IDEOGRAPH:'F253:62035:櫜
CJK UNIFIED IDEOGRAPH:'F254:62036:櫐
CJK UNIFIED IDEOGRAPH:'F255:62037:櫫
CJK UNIFIED IDEOGRAPH:'F256:62038:櫏
CJK UNIFIED IDEOGRAPH:'F257:62039:櫍
CJK UNIFIED IDEOGRAPH:'F258:62040:櫞
CJK UNIFIED IDEOGRAPH:'F259:62041:歠
CJK UNIFIED IDEOGRAPH:'F25A:62042:殰
CJK UNIFIED IDEOGRAPH:'F25B:62043:氌
CJK UNIFIED IDEOGRAPH:'F25C:62044:瀙
CJK UNIFIED IDEOGRAPH:'F25D:62045:瀧
CJK UNIFIED IDEOGRAPH:'F25E:62046:瀠
CJK UNIFIED IDEOGRAPH:'F25F:62047:瀖
CJK UNIFIED IDEOGRAPH:'F260:62048:瀫
CJK UNIFIED IDEOGRAPH:'F261:62049:瀡
CJK UNIFIED IDEOGRAPH:'F262:62050:瀢
CJK UNIFIED IDEOGRAPH:'F263:62051:瀣
CJK UNIFIED IDEOGRAPH:'F264:62052:瀩
CJK UNIFIED IDEOGRAPH:'F265:62053:瀗
CJK UNIFIED IDEOGRAPH:'F266:62054:瀤
CJK UNIFIED IDEOGRAPH:'F267:62055:瀜
CJK UNIFIED IDEOGRAPH:'F268:62056:瀪
CJK UNIFIED IDEOGRAPH:'F269:62057:爌
CJK UNIFIED IDEOGRAPH:'F26A:62058:爊
CJK UNIFIED IDEOGRAPH:'F26B:62059:爇
CJK UNIFIED IDEOGRAPH:'F26C:62060:爂
CJK UNIFIED IDEOGRAPH:'F26D:62061:爅
CJK UNIFIED IDEOGRAPH:'F26E:62062:犥
CJK UNIFIED IDEOGRAPH:'F26F:62063:犦
CJK UNIFIED IDEOGRAPH:'F270:62064:犤
CJK UNIFIED IDEOGRAPH:'F271:62065:犣
CJK UNIFIED IDEOGRAPH:'F272:62066:犡
CJK UNIFIED IDEOGRAPH:'F273:62067:瓋
CJK UNIFIED IDEOGRAPH:'F274:62068:瓅
CJK UNIFIED IDEOGRAPH:'F275:62069:璷
CJK UNIFIED IDEOGRAPH:'F276:62070:瓃
CJK UNIFIED IDEOGRAPH:'F277:62071:甖
CJK UNIFIED IDEOGRAPH:'F278:62072:癠
CJK UNIFIED IDEOGRAPH:'F279:62073:矉
CJK UNIFIED IDEOGRAPH:'F27A:62074:矊
CJK UNIFIED IDEOGRAPH:'F27B:62075:矄
CJK UNIFIED IDEOGRAPH:'F27C:62076:矱
CJK UNIFIED IDEOGRAPH:'F27D:62077:礝
CJK UNIFIED IDEOGRAPH:'F27E:62078:礛
CJK UNIFIED IDEOGRAPH:'F2A1:62113:礡
CJK UNIFIED IDEOGRAPH:'F2A2:62114:礜
CJK UNIFIED IDEOGRAPH:'F2A3:62115:礗
CJK UNIFIED IDEOGRAPH:'F2A4:62116:礞
CJK UNIFIED IDEOGRAPH:'F2A5:62117:禰
CJK UNIFIED IDEOGRAPH:'F2A6:62118:穧
CJK UNIFIED IDEOGRAPH:'F2A7:62119:穨
CJK UNIFIED IDEOGRAPH:'F2A8:62120:簳
CJK UNIFIED IDEOGRAPH:'F2A9:62121:簼
CJK UNIFIED IDEOGRAPH:'F2AA:62122:簹
CJK UNIFIED IDEOGRAPH:'F2AB:62123:簬
CJK UNIFIED IDEOGRAPH:'F2AC:62124:簻
CJK UNIFIED IDEOGRAPH:'F2AD:62125:糬
CJK UNIFIED IDEOGRAPH:'F2AE:62126:糪
CJK UNIFIED IDEOGRAPH:'F2AF:62127:繶
CJK UNIFIED IDEOGRAPH:'F2B0:62128:繵
CJK UNIFIED IDEOGRAPH:'F2B1:62129:繸
CJK UNIFIED IDEOGRAPH:'F2B2:62130:繰
CJK UNIFIED IDEOGRAPH:'F2B3:62131:繷
CJK UNIFIED IDEOGRAPH:'F2B4:62132:繯
CJK UNIFIED IDEOGRAPH:'F2B5:62133:繺
CJK UNIFIED IDEOGRAPH:'F2B6:62134:繲
CJK UNIFIED IDEOGRAPH:'F2B7:62135:繴
CJK UNIFIED IDEOGRAPH:'F2B8:62136:繨
CJK UNIFIED IDEOGRAPH:'F2B9:62137:罋
CJK UNIFIED IDEOGRAPH:'F2BA:62138:罊
CJK UNIFIED IDEOGRAPH:'F2BB:62139:羃
CJK UNIFIED IDEOGRAPH:'F2BC:62140:羆
CJK UNIFIED IDEOGRAPH:'F2BD:62141:羷
CJK UNIFIED IDEOGRAPH:'F2BE:62142:翽
CJK UNIFIED IDEOGRAPH:'F2BF:62143:翾
CJK UNIFIED IDEOGRAPH:'F2C0:62144:聸
CJK UNIFIED IDEOGRAPH:'F2C1:62145:臗
CJK UNIFIED IDEOGRAPH:'F2C2:62146:臕
CJK UNIFIED IDEOGRAPH:'F2C3:62147:艤
CJK UNIFIED IDEOGRAPH:'F2C4:62148:艡
CJK UNIFIED IDEOGRAPH:'F2C5:62149:艣
CJK UNIFIED IDEOGRAPH:'F2C6:62150:藫
CJK UNIFIED IDEOGRAPH:'F2C7:62151:藱
CJK UNIFIED IDEOGRAPH:'F2C8:62152:藭
CJK UNIFIED IDEOGRAPH:'F2C9:62153:藙
CJK UNIFIED IDEOGRAPH:'F2CA:62154:藡
CJK UNIFIED IDEOGRAPH:'F2CB:62155:藨
CJK UNIFIED IDEOGRAPH:'F2CC:62156:藚
CJK UNIFIED IDEOGRAPH:'F2CD:62157:藗
CJK UNIFIED IDEOGRAPH:'F2CE:62158:藬
CJK UNIFIED IDEOGRAPH:'F2CF:62159:藲
CJK UNIFIED IDEOGRAPH:'F2D0:62160:藸
CJK UNIFIED IDEOGRAPH:'F2D1:62161:藘
CJK UNIFIED IDEOGRAPH:'F2D2:62162:藟
CJK UNIFIED IDEOGRAPH:'F2D3:62163:藣
CJK UNIFIED IDEOGRAPH:'F2D4:62164:藜
CJK UNIFIED IDEOGRAPH:'F2D5:62165:藑
CJK UNIFIED IDEOGRAPH:'F2D6:62166:藰
CJK UNIFIED IDEOGRAPH:'F2D7:62167:藦
CJK UNIFIED IDEOGRAPH:'F2D8:62168:藯
CJK UNIFIED IDEOGRAPH:'F2D9:62169:藞
CJK UNIFIED IDEOGRAPH:'F2DA:62170:藢
CJK UNIFIED IDEOGRAPH:'F2DB:62171:蠀
CJK UNIFIED IDEOGRAPH:'F2DC:62172:蟺
CJK UNIFIED IDEOGRAPH:'F2DD:62173:蠃
CJK UNIFIED IDEOGRAPH:'F2DE:62174:蟶
CJK UNIFIED IDEOGRAPH:'F2DF:62175:蟷
CJK UNIFIED IDEOGRAPH:'F2E0:62176:蠉
CJK UNIFIED IDEOGRAPH:'F2E1:62177:蠌
CJK UNIFIED IDEOGRAPH:'F2E2:62178:蠋
CJK UNIFIED IDEOGRAPH:'F2E3:62179:蠆
CJK UNIFIED IDEOGRAPH:'F2E4:62180:蟼
CJK UNIFIED IDEOGRAPH:'F2E5:62181:蠈
CJK UNIFIED IDEOGRAPH:'F2E6:62182:蟿
CJK UNIFIED IDEOGRAPH:'F2E7:62183:蠊
CJK UNIFIED IDEOGRAPH:'F2E8:62184:蠂
CJK UNIFIED IDEOGRAPH:'F2E9:62185:襢
CJK UNIFIED IDEOGRAPH:'F2EA:62186:襚
CJK UNIFIED IDEOGRAPH:'F2EB:62187:襛
CJK UNIFIED IDEOGRAPH:'F2EC:62188:襗
CJK UNIFIED IDEOGRAPH:'F2ED:62189:襡
CJK UNIFIED IDEOGRAPH:'F2EE:62190:襜
CJK UNIFIED IDEOGRAPH:'F2EF:62191:襘
CJK UNIFIED IDEOGRAPH:'F2F0:62192:襝
CJK UNIFIED IDEOGRAPH:'F2F1:62193:襙
CJK UNIFIED IDEOGRAPH:'F2F2:62194:覈
CJK UNIFIED IDEOGRAPH:'F2F3:62195:覷
CJK UNIFIED IDEOGRAPH:'F2F4:62196:覶
CJK UNIFIED IDEOGRAPH:'F2F5:62197:觶
CJK UNIFIED IDEOGRAPH:'F2F6:62198:譐
CJK UNIFIED IDEOGRAPH:'F2F7:62199:譈
CJK UNIFIED IDEOGRAPH:'F2F8:62200:譊
CJK UNIFIED IDEOGRAPH:'F2F9:62201:譀
CJK UNIFIED IDEOGRAPH:'F2FA:62202:譓
CJK UNIFIED IDEOGRAPH:'F2FB:62203:譖
CJK UNIFIED IDEOGRAPH:'F2FC:62204:譔
CJK UNIFIED IDEOGRAPH:'F2FD:62205:譋
CJK UNIFIED IDEOGRAPH:'F2FE:62206:譕
CJK UNIFIED IDEOGRAPH:'F340:62272:譑
CJK UNIFIED IDEOGRAPH:'F341:62273:譂
CJK UNIFIED IDEOGRAPH:'F342:62274:譒
CJK UNIFIED IDEOGRAPH:'F343:62275:譗
CJK UNIFIED IDEOGRAPH:'F344:62276:豃
CJK UNIFIED IDEOGRAPH:'F345:62277:豷
CJK UNIFIED IDEOGRAPH:'F346:62278:豶
CJK UNIFIED IDEOGRAPH:'F347:62279:貚
CJK UNIFIED IDEOGRAPH:'F348:62280:贆
CJK UNIFIED IDEOGRAPH:'F349:62281:贇
CJK UNIFIED IDEOGRAPH:'F34A:62282:贉
CJK UNIFIED IDEOGRAPH:'F34B:62283:趬
CJK UNIFIED IDEOGRAPH:'F34C:62284:趪
CJK UNIFIED IDEOGRAPH:'F34D:62285:趭
CJK UNIFIED IDEOGRAPH:'F34E:62286:趫
CJK UNIFIED IDEOGRAPH:'F34F:62287:蹭
CJK UNIFIED IDEOGRAPH:'F350:62288:蹸
CJK UNIFIED IDEOGRAPH:'F351:62289:蹳
CJK UNIFIED IDEOGRAPH:'F352:62290:蹪
CJK UNIFIED IDEOGRAPH:'F353:62291:蹯
CJK UNIFIED IDEOGRAPH:'F354:62292:蹻
CJK UNIFIED IDEOGRAPH:'F355:62293:軂
CJK UNIFIED IDEOGRAPH:'F356:62294:轒
CJK UNIFIED IDEOGRAPH:'F357:62295:轑
CJK UNIFIED IDEOGRAPH:'F358:62296:轏
CJK UNIFIED IDEOGRAPH:'F359:62297:轐
CJK UNIFIED IDEOGRAPH:'F35A:62298:轓
CJK UNIFIED IDEOGRAPH:'F35B:62299:辴
CJK UNIFIED IDEOGRAPH:'F35C:62300:酀
CJK UNIFIED IDEOGRAPH:'F35D:62301:鄿
CJK UNIFIED IDEOGRAPH:'F35E:62302:醰
CJK UNIFIED IDEOGRAPH:'F35F:62303:醭
CJK UNIFIED IDEOGRAPH:'F360:62304:鏞
CJK UNIFIED IDEOGRAPH:'F361:62305:鏇
CJK UNIFIED IDEOGRAPH:'F362:62306:鏏
CJK UNIFIED IDEOGRAPH:'F363:62307:鏂
CJK UNIFIED IDEOGRAPH:'F364:62308:鏚
CJK UNIFIED IDEOGRAPH:'F365:62309:鏐
CJK UNIFIED IDEOGRAPH:'F366:62310:鏹
CJK UNIFIED IDEOGRAPH:'F367:62311:鏬
CJK UNIFIED IDEOGRAPH:'F368:62312:鏌
CJK UNIFIED IDEOGRAPH:'F369:62313:鏙
CJK UNIFIED IDEOGRAPH:'F36A:62314:鎩
CJK UNIFIED IDEOGRAPH:'F36B:62315:鏦
CJK UNIFIED IDEOGRAPH:'F36C:62316:鏊
CJK UNIFIED IDEOGRAPH:'F36D:62317:鏔
CJK UNIFIED IDEOGRAPH:'F36E:62318:鏮
CJK UNIFIED IDEOGRAPH:'F36F:62319:鏣
CJK UNIFIED IDEOGRAPH:'F370:62320:鏕
CJK UNIFIED IDEOGRAPH:'F371:62321:鏄
CJK UNIFIED IDEOGRAPH:'F372:62322:鏎
CJK UNIFIED IDEOGRAPH:'F373:62323:鏀
CJK UNIFIED IDEOGRAPH:'F374:62324:鏒
CJK UNIFIED IDEOGRAPH:'F375:62325:鏧
CJK UNIFIED IDEOGRAPH:'F376:62326:镽
CJK UNIFIED IDEOGRAPH:'F377:62327:闚
CJK UNIFIED IDEOGRAPH:'F378:62328:闛
CJK UNIFIED IDEOGRAPH:'F379:62329:雡
CJK UNIFIED IDEOGRAPH:'F37A:62330:霩
CJK UNIFIED IDEOGRAPH:'F37B:62331:霫
CJK UNIFIED IDEOGRAPH:'F37C:62332:霬
CJK UNIFIED IDEOGRAPH:'F37D:62333:霨
CJK UNIFIED IDEOGRAPH:'F37E:62334:霦
CJK UNIFIED IDEOGRAPH:'F3A1:62369:鞳
CJK UNIFIED IDEOGRAPH:'F3A2:62370:鞷
CJK UNIFIED IDEOGRAPH:'F3A3:62371:鞶
CJK UNIFIED IDEOGRAPH:'F3A4:62372:韝
CJK UNIFIED IDEOGRAPH:'F3A5:62373:韞
CJK UNIFIED IDEOGRAPH:'F3A6:62374:韟
CJK UNIFIED IDEOGRAPH:'F3A7:62375:顜
CJK UNIFIED IDEOGRAPH:'F3A8:62376:顙
CJK UNIFIED IDEOGRAPH:'F3A9:62377:顝
CJK UNIFIED IDEOGRAPH:'F3AA:62378:顗
CJK UNIFIED IDEOGRAPH:'F3AB:62379:颿
CJK UNIFIED IDEOGRAPH:'F3AC:62380:颽
CJK UNIFIED IDEOGRAPH:'F3AD:62381:颻
CJK UNIFIED IDEOGRAPH:'F3AE:62382:颾
CJK UNIFIED IDEOGRAPH:'F3AF:62383:饈
CJK UNIFIED IDEOGRAPH:'F3B0:62384:饇
CJK UNIFIED IDEOGRAPH:'F3B1:62385:饃
CJK UNIFIED IDEOGRAPH:'F3B2:62386:馦
CJK UNIFIED IDEOGRAPH:'F3B3:62387:馧
CJK UNIFIED IDEOGRAPH:'F3B4:62388:騚
CJK UNIFIED IDEOGRAPH:'F3B5:62389:騕
CJK UNIFIED IDEOGRAPH:'F3B6:62390:騥
CJK UNIFIED IDEOGRAPH:'F3B7:62391:騝
CJK UNIFIED IDEOGRAPH:'F3B8:62392:騤
CJK UNIFIED IDEOGRAPH:'F3B9:62393:騛
CJK UNIFIED IDEOGRAPH:'F3BA:62394:騢
CJK UNIFIED IDEOGRAPH:'F3BB:62395:騠
CJK UNIFIED IDEOGRAPH:'F3BC:62396:騧
CJK UNIFIED IDEOGRAPH:'F3BD:62397:騣
CJK UNIFIED IDEOGRAPH:'F3BE:62398:騞
CJK UNIFIED IDEOGRAPH:'F3BF:62399:騜
CJK UNIFIED IDEOGRAPH:'F3C0:62400:騔
CJK UNIFIED IDEOGRAPH:'F3C1:62401:髂
CJK UNIFIED IDEOGRAPH:'F3C2:62402:鬋
CJK UNIFIED IDEOGRAPH:'F3C3:62403:鬊
CJK UNIFIED IDEOGRAPH:'F3C4:62404:鬎
CJK UNIFIED IDEOGRAPH:'F3C5:62405:鬌
CJK UNIFIED IDEOGRAPH:'F3C6:62406:鬷
CJK UNIFIED IDEOGRAPH:'F3C7:62407:鯪
CJK UNIFIED IDEOGRAPH:'F3C8:62408:鯫
CJK UNIFIED IDEOGRAPH:'F3C9:62409:鯠
CJK UNIFIED IDEOGRAPH:'F3CA:62410:鯞
CJK UNIFIED IDEOGRAPH:'F3CB:62411:鯤
CJK UNIFIED IDEOGRAPH:'F3CC:62412:鯦
CJK UNIFIED IDEOGRAPH:'F3CD:62413:鯢
CJK UNIFIED IDEOGRAPH:'F3CE:62414:鯰
CJK UNIFIED IDEOGRAPH:'F3CF:62415:鯔
CJK UNIFIED IDEOGRAPH:'F3D0:62416:鯗
CJK UNIFIED IDEOGRAPH:'F3D1:62417:鯬
CJK UNIFIED IDEOGRAPH:'F3D2:62418:鯜
CJK UNIFIED IDEOGRAPH:'F3D3:62419:鯙
CJK UNIFIED IDEOGRAPH:'F3D4:62420:鯥
CJK UNIFIED IDEOGRAPH:'F3D5:62421:鯕
CJK UNIFIED IDEOGRAPH:'F3D6:62422:鯡
CJK UNIFIED IDEOGRAPH:'F3D7:62423:鯚
CJK UNIFIED IDEOGRAPH:'F3D8:62424:鵷
CJK UNIFIED IDEOGRAPH:'F3D9:62425:鶁
CJK UNIFIED IDEOGRAPH:'F3DA:62426:鶊
CJK UNIFIED IDEOGRAPH:'F3DB:62427:鶄
CJK UNIFIED IDEOGRAPH:'F3DC:62428:鶈
CJK UNIFIED IDEOGRAPH:'F3DD:62429:鵱
CJK UNIFIED IDEOGRAPH:'F3DE:62430:鶀
CJK UNIFIED IDEOGRAPH:'F3DF:62431:鵸
CJK UNIFIED IDEOGRAPH:'F3E0:62432:鶆
CJK UNIFIED IDEOGRAPH:'F3E1:62433:鶋
CJK UNIFIED IDEOGRAPH:'F3E2:62434:鶌
CJK UNIFIED IDEOGRAPH:'F3E3:62435:鵽
CJK UNIFIED IDEOGRAPH:'F3E4:62436:鵫
CJK UNIFIED IDEOGRAPH:'F3E5:62437:鵴
CJK UNIFIED IDEOGRAPH:'F3E6:62438:鵵
CJK UNIFIED IDEOGRAPH:'F3E7:62439:鵰
CJK UNIFIED IDEOGRAPH:'F3E8:62440:鵩
CJK UNIFIED IDEOGRAPH:'F3E9:62441:鶅
CJK UNIFIED IDEOGRAPH:'F3EA:62442:鵳
CJK UNIFIED IDEOGRAPH:'F3EB:62443:鵻
CJK UNIFIED IDEOGRAPH:'F3EC:62444:鶂
CJK UNIFIED IDEOGRAPH:'F3ED:62445:鵯
CJK UNIFIED IDEOGRAPH:'F3EE:62446:鵹
CJK UNIFIED IDEOGRAPH:'F3EF:62447:鵿
CJK UNIFIED IDEOGRAPH:'F3F0:62448:鶇
CJK UNIFIED IDEOGRAPH:'F3F1:62449:鵨
CJK UNIFIED IDEOGRAPH:'F3F2:62450:麔
CJK UNIFIED IDEOGRAPH:'F3F3:62451:麑
CJK UNIFIED IDEOGRAPH:'F3F4:62452:黀
CJK UNIFIED IDEOGRAPH:'F3F5:62453:黼
CJK UNIFIED IDEOGRAPH:'F3F6:62454:鼭
CJK UNIFIED IDEOGRAPH:'F3F7:62455:齀
CJK UNIFIED IDEOGRAPH:'F3F8:62456:齁
CJK UNIFIED IDEOGRAPH:'F3F9:62457:齍
CJK UNIFIED IDEOGRAPH:'F3FA:62458:齖
CJK UNIFIED IDEOGRAPH:'F3FB:62459:齗
CJK UNIFIED IDEOGRAPH:'F3FC:62460:齘
CJK UNIFIED IDEOGRAPH:'F3FD:62461:匷
CJK UNIFIED IDEOGRAPH:'F3FE:62462:嚲
CJK UNIFIED IDEOGRAPH:'F440:62528:嚵
CJK UNIFIED IDEOGRAPH:'F441:62529:嚳
CJK UNIFIED IDEOGRAPH:'F442:62530:壣
CJK UNIFIED IDEOGRAPH:'F443:62531:孅
CJK UNIFIED IDEOGRAPH:'F444:62532:巆
CJK UNIFIED IDEOGRAPH:'F445:62533:巇
CJK UNIFIED IDEOGRAPH:'F446:62534:廮
CJK UNIFIED IDEOGRAPH:'F447:62535:廯
CJK UNIFIED IDEOGRAPH:'F448:62536:忀
CJK UNIFIED IDEOGRAPH:'F449:62537:忁
CJK UNIFIED IDEOGRAPH:'F44A:62538:懹
CJK UNIFIED IDEOGRAPH:'F44B:62539:攗
CJK UNIFIED IDEOGRAPH:'F44C:62540:攖
CJK UNIFIED IDEOGRAPH:'F44D:62541:攕
CJK UNIFIED IDEOGRAPH:'F44E:62542:攓
CJK UNIFIED IDEOGRAPH:'F44F:62543:旟
CJK UNIFIED IDEOGRAPH:'F450:62544:曨
CJK UNIFIED IDEOGRAPH:'F451:62545:曣
CJK UNIFIED IDEOGRAPH:'F452:62546:曤
CJK UNIFIED IDEOGRAPH:'F453:62547:櫳
CJK UNIFIED IDEOGRAPH:'F454:62548:櫰
CJK UNIFIED IDEOGRAPH:'F455:62549:櫪
CJK UNIFIED IDEOGRAPH:'F456:62550:櫨
CJK UNIFIED IDEOGRAPH:'F457:62551:櫹
CJK UNIFIED IDEOGRAPH:'F458:62552:櫱
CJK UNIFIED IDEOGRAPH:'F459:62553:櫮
CJK UNIFIED IDEOGRAPH:'F45A:62554:櫯
CJK UNIFIED IDEOGRAPH:'F45B:62555:瀼
CJK UNIFIED IDEOGRAPH:'F45C:62556:瀵
CJK UNIFIED IDEOGRAPH:'F45D:62557:瀯
CJK UNIFIED IDEOGRAPH:'F45E:62558:瀷
CJK UNIFIED IDEOGRAPH:'F45F:62559:瀴
CJK UNIFIED IDEOGRAPH:'F460:62560:瀱
CJK UNIFIED IDEOGRAPH:'F461:62561:灂
CJK UNIFIED IDEOGRAPH:'F462:62562:瀸
CJK UNIFIED IDEOGRAPH:'F463:62563:瀿
CJK UNIFIED IDEOGRAPH:'F464:62564:瀺
CJK UNIFIED IDEOGRAPH:'F465:62565:瀹
CJK UNIFIED IDEOGRAPH:'F466:62566:灀
CJK UNIFIED IDEOGRAPH:'F467:62567:瀻
CJK UNIFIED IDEOGRAPH:'F468:62568:瀳
CJK UNIFIED IDEOGRAPH:'F469:62569:灁
CJK UNIFIED IDEOGRAPH:'F46A:62570:爓
CJK UNIFIED IDEOGRAPH:'F46B:62571:爔
CJK UNIFIED IDEOGRAPH:'F46C:62572:犨
CJK UNIFIED IDEOGRAPH:'F46D:62573:獽
CJK UNIFIED IDEOGRAPH:'F46E:62574:獼
CJK UNIFIED IDEOGRAPH:'F46F:62575:璺
CJK UNIFIED IDEOGRAPH:'F470:62576:皫
CJK UNIFIED IDEOGRAPH:'F471:62577:皪
CJK UNIFIED IDEOGRAPH:'F472:62578:皾
CJK UNIFIED IDEOGRAPH:'F473:62579:盭
CJK UNIFIED IDEOGRAPH:'F474:62580:矌
CJK UNIFIED IDEOGRAPH:'F475:62581:矎
CJK UNIFIED IDEOGRAPH:'F476:62582:矏
CJK UNIFIED IDEOGRAPH:'F477:62583:矍
CJK UNIFIED IDEOGRAPH:'F478:62584:矲
CJK UNIFIED IDEOGRAPH:'F479:62585:礥
CJK UNIFIED IDEOGRAPH:'F47A:62586:礣
CJK UNIFIED IDEOGRAPH:'F47B:62587:礧
CJK UNIFIED IDEOGRAPH:'F47C:62588:礨
CJK UNIFIED IDEOGRAPH:'F47D:62589:礤
CJK UNIFIED IDEOGRAPH:'F47E:62590:礩
CJK UNIFIED IDEOGRAPH:'F4A1:62625:禲
CJK UNIFIED IDEOGRAPH:'F4A2:62626:穮
CJK UNIFIED IDEOGRAPH:'F4A3:62627:穬
CJK UNIFIED IDEOGRAPH:'F4A4:62628:穭
CJK UNIFIED IDEOGRAPH:'F4A5:62629:竷
CJK UNIFIED IDEOGRAPH:'F4A6:62630:籉
CJK UNIFIED IDEOGRAPH:'F4A7:62631:籈
CJK UNIFIED IDEOGRAPH:'F4A8:62632:籊
CJK UNIFIED IDEOGRAPH:'F4A9:62633:籇
CJK UNIFIED IDEOGRAPH:'F4AA:62634:籅
CJK UNIFIED IDEOGRAPH:'F4AB:62635:糮
CJK UNIFIED IDEOGRAPH:'F4AC:62636:繻
CJK UNIFIED IDEOGRAPH:'F4AD:62637:繾
CJK UNIFIED IDEOGRAPH:'F4AE:62638:纁
CJK UNIFIED IDEOGRAPH:'F4AF:62639:纀
CJK UNIFIED IDEOGRAPH:'F4B0:62640:羺
CJK UNIFIED IDEOGRAPH:'F4B1:62641:翿
CJK UNIFIED IDEOGRAPH:'F4B2:62642:聹
CJK UNIFIED IDEOGRAPH:'F4B3:62643:臛
CJK UNIFIED IDEOGRAPH:'F4B4:62644:臙
CJK UNIFIED IDEOGRAPH:'F4B5:62645:舋
CJK UNIFIED IDEOGRAPH:'F4B6:62646:艨
CJK UNIFIED IDEOGRAPH:'F4B7:62647:艩
CJK UNIFIED IDEOGRAPH:'F4B8:62648:蘢
CJK UNIFIED IDEOGRAPH:'F4B9:62649:藿
CJK UNIFIED IDEOGRAPH:'F4BA:62650:蘁
CJK UNIFIED IDEOGRAPH:'F4BB:62651:藾
CJK UNIFIED IDEOGRAPH:'F4BC:62652:蘛
CJK UNIFIED IDEOGRAPH:'F4BD:62653:蘀
CJK UNIFIED IDEOGRAPH:'F4BE:62654:藶
CJK UNIFIED IDEOGRAPH:'F4BF:62655:蘄
CJK UNIFIED IDEOGRAPH:'F4C0:62656:蘉
CJK UNIFIED IDEOGRAPH:'F4C1:62657:蘅
CJK UNIFIED IDEOGRAPH:'F4C2:62658:蘌
CJK UNIFIED IDEOGRAPH:'F4C3:62659:藽
CJK UNIFIED IDEOGRAPH:'F4C4:62660:蠙
CJK UNIFIED IDEOGRAPH:'F4C5:62661:蠐
CJK UNIFIED IDEOGRAPH:'F4C6:62662:蠑
CJK UNIFIED IDEOGRAPH:'F4C7:62663:蠗
CJK UNIFIED IDEOGRAPH:'F4C8:62664:蠓
CJK UNIFIED IDEOGRAPH:'F4C9:62665:蠖
CJK UNIFIED IDEOGRAPH:'F4CA:62666:襣
CJK UNIFIED IDEOGRAPH:'F4CB:62667:襦
CJK UNIFIED IDEOGRAPH:'F4CC:62668:覹
CJK UNIFIED IDEOGRAPH:'F4CD:62669:觷
CJK UNIFIED IDEOGRAPH:'F4CE:62670:譠
CJK UNIFIED IDEOGRAPH:'F4CF:62671:譪
CJK UNIFIED IDEOGRAPH:'F4D0:62672:譝
CJK UNIFIED IDEOGRAPH:'F4D1:62673:譨
CJK UNIFIED IDEOGRAPH:'F4D2:62674:譣
CJK UNIFIED IDEOGRAPH:'F4D3:62675:譥
CJK UNIFIED IDEOGRAPH:'F4D4:62676:譧
CJK UNIFIED IDEOGRAPH:'F4D5:62677:譭
CJK UNIFIED IDEOGRAPH:'F4D6:62678:趮
CJK UNIFIED IDEOGRAPH:'F4D7:62679:躆
CJK UNIFIED IDEOGRAPH:'F4D8:62680:躈
CJK UNIFIED IDEOGRAPH:'F4D9:62681:躄
CJK UNIFIED IDEOGRAPH:'F4DA:62682:轙
CJK UNIFIED IDEOGRAPH:'F4DB:62683:轖
CJK UNIFIED IDEOGRAPH:'F4DC:62684:轗
CJK UNIFIED IDEOGRAPH:'F4DD:62685:轕
CJK UNIFIED IDEOGRAPH:'F4DE:62686:轘
CJK UNIFIED IDEOGRAPH:'F4DF:62687:轚
CJK UNIFIED IDEOGRAPH:'F4E0:62688:邍
CJK UNIFIED IDEOGRAPH:'F4E1:62689:酃
CJK UNIFIED IDEOGRAPH:'F4E2:62690:酁
CJK UNIFIED IDEOGRAPH:'F4E3:62691:醷
CJK UNIFIED IDEOGRAPH:'F4E4:62692:醵
CJK UNIFIED IDEOGRAPH:'F4E5:62693:醲
CJK UNIFIED IDEOGRAPH:'F4E6:62694:醳
CJK UNIFIED IDEOGRAPH:'F4E7:62695:鐋
CJK UNIFIED IDEOGRAPH:'F4E8:62696:鐓
CJK UNIFIED IDEOGRAPH:'F4E9:62697:鏻
CJK UNIFIED IDEOGRAPH:'F4EA:62698:鐠
CJK UNIFIED IDEOGRAPH:'F4EB:62699:鐏
CJK UNIFIED IDEOGRAPH:'F4EC:62700:鐔
CJK UNIFIED IDEOGRAPH:'F4ED:62701:鏾
CJK UNIFIED IDEOGRAPH:'F4EE:62702:鐕
CJK UNIFIED IDEOGRAPH:'F4EF:62703:鐐
CJK UNIFIED IDEOGRAPH:'F4F0:62704:鐨
CJK UNIFIED IDEOGRAPH:'F4F1:62705:鐙
CJK UNIFIED IDEOGRAPH:'F4F2:62706:鐍
CJK UNIFIED IDEOGRAPH:'F4F3:62707:鏵
CJK UNIFIED IDEOGRAPH:'F4F4:62708:鐀
CJK UNIFIED IDEOGRAPH:'F4F5:62709:鏷
CJK UNIFIED IDEOGRAPH:'F4F6:62710:鐇
CJK UNIFIED IDEOGRAPH:'F4F7:62711:鐎
CJK UNIFIED IDEOGRAPH:'F4F8:62712:鐖
CJK UNIFIED IDEOGRAPH:'F4F9:62713:鐒
CJK UNIFIED IDEOGRAPH:'F4FA:62714:鏺
CJK UNIFIED IDEOGRAPH:'F4FB:62715:鐉
CJK UNIFIED IDEOGRAPH:'F4FC:62716:鏸
CJK UNIFIED IDEOGRAPH:'F4FD:62717:鐊
CJK UNIFIED IDEOGRAPH:'F4FE:62718:鏿
CJK UNIFIED IDEOGRAPH:'F540:62784:鏼
CJK UNIFIED IDEOGRAPH:'F541:62785:鐌
CJK UNIFIED IDEOGRAPH:'F542:62786:鏶
CJK UNIFIED IDEOGRAPH:'F543:62787:鐑
CJK UNIFIED IDEOGRAPH:'F544:62788:鐆
CJK UNIFIED IDEOGRAPH:'F545:62789:闞
CJK UNIFIED IDEOGRAPH:'F546:62790:闠
CJK UNIFIED IDEOGRAPH:'F547:62791:闟
CJK UNIFIED IDEOGRAPH:'F548:62792:霮
CJK UNIFIED IDEOGRAPH:'F549:62793:霯
CJK UNIFIED IDEOGRAPH:'F54A:62794:鞹
CJK UNIFIED IDEOGRAPH:'F54B:62795:鞻
CJK UNIFIED IDEOGRAPH:'F54C:62796:韽
CJK UNIFIED IDEOGRAPH:'F54D:62797:韾
CJK UNIFIED IDEOGRAPH:'F54E:62798:顠
CJK UNIFIED IDEOGRAPH:'F54F:62799:顢
CJK UNIFIED IDEOGRAPH:'F550:62800:顣
CJK UNIFIED IDEOGRAPH:'F551:62801:顟
CJK UNIFIED IDEOGRAPH:'F552:62802:飁
CJK UNIFIED IDEOGRAPH:'F553:62803:飂
CJK UNIFIED IDEOGRAPH:'F554:62804:饐
CJK UNIFIED IDEOGRAPH:'F555:62805:饎
CJK UNIFIED IDEOGRAPH:'F556:62806:饙
CJK UNIFIED IDEOGRAPH:'F557:62807:饌
CJK UNIFIED IDEOGRAPH:'F558:62808:饋
CJK UNIFIED IDEOGRAPH:'F559:62809:饓
CJK UNIFIED IDEOGRAPH:'F55A:62810:騲
CJK UNIFIED IDEOGRAPH:'F55B:62811:騴
CJK UNIFIED IDEOGRAPH:'F55C:62812:騱
CJK UNIFIED IDEOGRAPH:'F55D:62813:騬
CJK UNIFIED IDEOGRAPH:'F55E:62814:騪
CJK UNIFIED IDEOGRAPH:'F55F:62815:騶
CJK UNIFIED IDEOGRAPH:'F560:62816:騩
CJK UNIFIED IDEOGRAPH:'F561:62817:騮
CJK UNIFIED IDEOGRAPH:'F562:62818:騸
CJK UNIFIED IDEOGRAPH:'F563:62819:騭
CJK UNIFIED IDEOGRAPH:'F564:62820:髇
CJK UNIFIED IDEOGRAPH:'F565:62821:髊
CJK UNIFIED IDEOGRAPH:'F566:62822:髆
CJK UNIFIED IDEOGRAPH:'F567:62823:鬐
CJK UNIFIED IDEOGRAPH:'F568:62824:鬒
CJK UNIFIED IDEOGRAPH:'F569:62825:鬑
CJK UNIFIED IDEOGRAPH:'F56A:62826:鰋
CJK UNIFIED IDEOGRAPH:'F56B:62827:鰈
CJK UNIFIED IDEOGRAPH:'F56C:62828:鯷
CJK UNIFIED IDEOGRAPH:'F56D:62829:鰅
CJK UNIFIED IDEOGRAPH:'F56E:62830:鰒
CJK UNIFIED IDEOGRAPH:'F56F:62831:鯸
CJK UNIFIED IDEOGRAPH:'F570:62832:鱀
CJK UNIFIED IDEOGRAPH:'F571:62833:鰇
CJK UNIFIED IDEOGRAPH:'F572:62834:鰎
CJK UNIFIED IDEOGRAPH:'F573:62835:鰆
CJK UNIFIED IDEOGRAPH:'F574:62836:鰗
CJK UNIFIED IDEOGRAPH:'F575:62837:鰔
CJK UNIFIED IDEOGRAPH:'F576:62838:鰉
CJK UNIFIED IDEOGRAPH:'F577:62839:鶟
CJK UNIFIED IDEOGRAPH:'F578:62840:鶙
CJK UNIFIED IDEOGRAPH:'F579:62841:鶤
CJK UNIFIED IDEOGRAPH:'F57A:62842:鶝
CJK UNIFIED IDEOGRAPH:'F57B:62843:鶒
CJK UNIFIED IDEOGRAPH:'F57C:62844:鶘
CJK UNIFIED IDEOGRAPH:'F57D:62845:鶐
CJK UNIFIED IDEOGRAPH:'F57E:62846:鶛
CJK UNIFIED IDEOGRAPH:'F5A1:62881:鶠
CJK UNIFIED IDEOGRAPH:'F5A2:62882:鶔
CJK UNIFIED IDEOGRAPH:'F5A3:62883:鶜
CJK UNIFIED IDEOGRAPH:'F5A4:62884:鶪
CJK UNIFIED IDEOGRAPH:'F5A5:62885:鶗
CJK UNIFIED IDEOGRAPH:'F5A6:62886:鶡
CJK UNIFIED IDEOGRAPH:'F5A7:62887:鶚
CJK UNIFIED IDEOGRAPH:'F5A8:62888:鶢
CJK UNIFIED IDEOGRAPH:'F5A9:62889:鶨
CJK UNIFIED IDEOGRAPH:'F5AA:62890:鶞
CJK UNIFIED IDEOGRAPH:'F5AB:62891:鶣
CJK UNIFIED IDEOGRAPH:'F5AC:62892:鶿
CJK UNIFIED IDEOGRAPH:'F5AD:62893:鶩
CJK UNIFIED IDEOGRAPH:'F5AE:62894:鶖
CJK UNIFIED IDEOGRAPH:'F5AF:62895:鶦
CJK UNIFIED IDEOGRAPH:'F5B0:62896:鶧
CJK UNIFIED IDEOGRAPH:'F5B1:62897:麙
CJK UNIFIED IDEOGRAPH:'F5B2:62898:麛
CJK UNIFIED IDEOGRAPH:'F5B3:62899:麚
CJK UNIFIED IDEOGRAPH:'F5B4:62900:黥
CJK UNIFIED IDEOGRAPH:'F5B5:62901:黤
CJK UNIFIED IDEOGRAPH:'F5B6:62902:黧
CJK UNIFIED IDEOGRAPH:'F5B7:62903:黦
CJK UNIFIED IDEOGRAPH:'F5B8:62904:鼰
CJK UNIFIED IDEOGRAPH:'F5B9:62905:鼮
CJK UNIFIED IDEOGRAPH:'F5BA:62906:齛
CJK UNIFIED IDEOGRAPH:'F5BB:62907:齠
CJK UNIFIED IDEOGRAPH:'F5BC:62908:齞
CJK UNIFIED IDEOGRAPH:'F5BD:62909:齝
CJK UNIFIED IDEOGRAPH:'F5BE:62910:齙
CJK UNIFIED IDEOGRAPH:'F5BF:62911:龑
CJK UNIFIED IDEOGRAPH:'F5C0:62912:儺
CJK UNIFIED IDEOGRAPH:'F5C1:62913:儹
CJK UNIFIED IDEOGRAPH:'F5C2:62914:劘
CJK UNIFIED IDEOGRAPH:'F5C3:62915:劗
CJK UNIFIED IDEOGRAPH:'F5C4:62916:囃
CJK UNIFIED IDEOGRAPH:'F5C5:62917:嚽
CJK UNIFIED IDEOGRAPH:'F5C6:62918:嚾
CJK UNIFIED IDEOGRAPH:'F5C7:62919:孈
CJK UNIFIED IDEOGRAPH:'F5C8:62920:孇
CJK UNIFIED IDEOGRAPH:'F5C9:62921:巋
CJK UNIFIED IDEOGRAPH:'F5CA:62922:巏
CJK UNIFIED IDEOGRAPH:'F5CB:62923:廱
CJK UNIFIED IDEOGRAPH:'F5CC:62924:懽
CJK UNIFIED IDEOGRAPH:'F5CD:62925:攛
CJK UNIFIED IDEOGRAPH:'F5CE:62926:欂
CJK UNIFIED IDEOGRAPH:'F5CF:62927:櫼
CJK UNIFIED IDEOGRAPH:'F5D0:62928:欃
CJK UNIFIED IDEOGRAPH:'F5D1:62929:櫸
CJK UNIFIED IDEOGRAPH:'F5D2:62930:欀
CJK UNIFIED IDEOGRAPH:'F5D3:62931:灃
CJK UNIFIED IDEOGRAPH:'F5D4:62932:灄
CJK UNIFIED IDEOGRAPH:'F5D5:62933:灊
CJK UNIFIED IDEOGRAPH:'F5D6:62934:灈
CJK UNIFIED IDEOGRAPH:'F5D7:62935:灉
CJK UNIFIED IDEOGRAPH:'F5D8:62936:灅
CJK UNIFIED IDEOGRAPH:'F5D9:62937:灆
CJK UNIFIED IDEOGRAPH:'F5DA:62938:爝
CJK UNIFIED IDEOGRAPH:'F5DB:62939:爚
CJK UNIFIED IDEOGRAPH:'F5DC:62940:爙
CJK UNIFIED IDEOGRAPH:'F5DD:62941:獾
CJK UNIFIED IDEOGRAPH:'F5DE:62942:甗
CJK UNIFIED IDEOGRAPH:'F5DF:62943:癪
CJK UNIFIED IDEOGRAPH:'F5E0:62944:矐
CJK UNIFIED IDEOGRAPH:'F5E1:62945:礭
CJK UNIFIED IDEOGRAPH:'F5E2:62946:礱
CJK UNIFIED IDEOGRAPH:'F5E3:62947:礯
CJK UNIFIED IDEOGRAPH:'F5E4:62948:籔
CJK UNIFIED IDEOGRAPH:'F5E5:62949:籓
CJK UNIFIED IDEOGRAPH:'F5E6:62950:糲
CJK UNIFIED IDEOGRAPH:'F5E7:62951:纊
CJK UNIFIED IDEOGRAPH:'F5E8:62952:纇
CJK UNIFIED IDEOGRAPH:'F5E9:62953:纈
CJK UNIFIED IDEOGRAPH:'F5EA:62954:纋
CJK UNIFIED IDEOGRAPH:'F5EB:62955:纆
CJK UNIFIED IDEOGRAPH:'F5EC:62956:纍
CJK UNIFIED IDEOGRAPH:'F5ED:62957:罍
CJK UNIFIED IDEOGRAPH:'F5EE:62958:羻
CJK UNIFIED IDEOGRAPH:'F5EF:62959:耰
CJK UNIFIED IDEOGRAPH:'F5F0:62960:臝
CJK UNIFIED IDEOGRAPH:'F5F1:62961:蘘
CJK UNIFIED IDEOGRAPH:'F5F2:62962:蘪
CJK UNIFIED IDEOGRAPH:'F5F3:62963:蘦
CJK UNIFIED IDEOGRAPH:'F5F4:62964:蘟
CJK UNIFIED IDEOGRAPH:'F5F5:62965:蘣
CJK UNIFIED IDEOGRAPH:'F5F6:62966:蘜
CJK UNIFIED IDEOGRAPH:'F5F7:62967:蘙
CJK UNIFIED IDEOGRAPH:'F5F8:62968:蘧
CJK UNIFIED IDEOGRAPH:'F5F9:62969:蘮
CJK UNIFIED IDEOGRAPH:'F5FA:62970:蘡
CJK UNIFIED IDEOGRAPH:'F5FB:62971:蘠
CJK UNIFIED IDEOGRAPH:'F5FC:62972:蘩
CJK UNIFIED IDEOGRAPH:'F5FD:62973:蘞
CJK UNIFIED IDEOGRAPH:'F5FE:62974:蘥
CJK UNIFIED IDEOGRAPH:'F640:63040:蠩
CJK UNIFIED IDEOGRAPH:'F641:63041:蠝
CJK UNIFIED IDEOGRAPH:'F642:63042:蠛
CJK UNIFIED IDEOGRAPH:'F643:63043:蠠
CJK UNIFIED IDEOGRAPH:'F644:63044:蠤
CJK UNIFIED IDEOGRAPH:'F645:63045:蠜
CJK UNIFIED IDEOGRAPH:'F646:63046:蠫
CJK UNIFIED IDEOGRAPH:'F647:63047:衊
CJK UNIFIED IDEOGRAPH:'F648:63048:襭
CJK UNIFIED IDEOGRAPH:'F649:63049:襩
CJK UNIFIED IDEOGRAPH:'F64A:63050:襮
CJK UNIFIED IDEOGRAPH:'F64B:63051:襫
CJK UNIFIED IDEOGRAPH:'F64C:63052:觺
CJK UNIFIED IDEOGRAPH:'F64D:63053:譹
CJK UNIFIED IDEOGRAPH:'F64E:63054:譸
CJK UNIFIED IDEOGRAPH:'F64F:63055:譅
CJK UNIFIED IDEOGRAPH:'F650:63056:譺
CJK UNIFIED IDEOGRAPH:'F651:63057:譻
CJK UNIFIED IDEOGRAPH:'F652:63058:贐
CJK UNIFIED IDEOGRAPH:'F653:63059:贔
CJK UNIFIED IDEOGRAPH:'F654:63060:趯
CJK UNIFIED IDEOGRAPH:'F655:63061:躎
CJK UNIFIED IDEOGRAPH:'F656:63062:躌
CJK UNIFIED IDEOGRAPH:'F657:63063:轞
CJK UNIFIED IDEOGRAPH:'F658:63064:轛
CJK UNIFIED IDEOGRAPH:'F659:63065:轝
CJK UNIFIED IDEOGRAPH:'F65A:63066:酆
CJK UNIFIED IDEOGRAPH:'F65B:63067:酄
CJK UNIFIED IDEOGRAPH:'F65C:63068:酅
CJK UNIFIED IDEOGRAPH:'F65D:63069:醹
CJK UNIFIED IDEOGRAPH:'F65E:63070:鐿
CJK UNIFIED IDEOGRAPH:'F65F:63071:鐻
CJK UNIFIED IDEOGRAPH:'F660:63072:鐶
CJK UNIFIED IDEOGRAPH:'F661:63073:鐩
CJK UNIFIED IDEOGRAPH:'F662:63074:鐽
CJK UNIFIED IDEOGRAPH:'F663:63075:鐼
CJK UNIFIED IDEOGRAPH:'F664:63076:鐰
CJK UNIFIED IDEOGRAPH:'F665:63077:鐹
CJK UNIFIED IDEOGRAPH:'F666:63078:鐪
CJK UNIFIED IDEOGRAPH:'F667:63079:鐷
CJK UNIFIED IDEOGRAPH:'F668:63080:鐬
CJK UNIFIED IDEOGRAPH:'F669:63081:鑀
CJK UNIFIED IDEOGRAPH:'F66A:63082:鐱
CJK UNIFIED IDEOGRAPH:'F66B:63083:闥
CJK UNIFIED IDEOGRAPH:'F66C:63084:闤
CJK UNIFIED IDEOGRAPH:'F66D:63085:闣
CJK UNIFIED IDEOGRAPH:'F66E:63086:霵
CJK UNIFIED IDEOGRAPH:'F66F:63087:霺
CJK UNIFIED IDEOGRAPH:'F670:63088:鞿
CJK UNIFIED IDEOGRAPH:'F671:63089:韡
CJK UNIFIED IDEOGRAPH:'F672:63090:顤
CJK UNIFIED IDEOGRAPH:'F673:63091:飉
CJK UNIFIED IDEOGRAPH:'F674:63092:飆
CJK UNIFIED IDEOGRAPH:'F675:63093:飀
CJK UNIFIED IDEOGRAPH:'F676:63094:饘
CJK UNIFIED IDEOGRAPH:'F677:63095:饖
CJK UNIFIED IDEOGRAPH:'F678:63096:騹
CJK UNIFIED IDEOGRAPH:'F679:63097:騽
CJK UNIFIED IDEOGRAPH:'F67A:63098:驆
CJK UNIFIED IDEOGRAPH:'F67B:63099:驄
CJK UNIFIED IDEOGRAPH:'F67C:63100:驂
CJK UNIFIED IDEOGRAPH:'F67D:63101:驁
CJK UNIFIED IDEOGRAPH:'F67E:63102:騺
CJK UNIFIED IDEOGRAPH:'F6A1:63137:騿
CJK UNIFIED IDEOGRAPH:'F6A2:63138:髍
CJK UNIFIED IDEOGRAPH:'F6A3:63139:鬕
CJK UNIFIED IDEOGRAPH:'F6A4:63140:鬗
CJK UNIFIED IDEOGRAPH:'F6A5:63141:鬘
CJK UNIFIED IDEOGRAPH:'F6A6:63142:鬖
CJK UNIFIED IDEOGRAPH:'F6A7:63143:鬺
CJK UNIFIED IDEOGRAPH:'F6A8:63144:魒
CJK UNIFIED IDEOGRAPH:'F6A9:63145:鰫
CJK UNIFIED IDEOGRAPH:'F6AA:63146:鰝
CJK UNIFIED IDEOGRAPH:'F6AB:63147:鰜
CJK UNIFIED IDEOGRAPH:'F6AC:63148:鰬
CJK UNIFIED IDEOGRAPH:'F6AD:63149:鰣
CJK UNIFIED IDEOGRAPH:'F6AE:63150:鰨
CJK UNIFIED IDEOGRAPH:'F6AF:63151:鰩
CJK UNIFIED IDEOGRAPH:'F6B0:63152:鰤
CJK UNIFIED IDEOGRAPH:'F6B1:63153:鰡
CJK UNIFIED IDEOGRAPH:'F6B2:63154:鶷
CJK UNIFIED IDEOGRAPH:'F6B3:63155:鶶
CJK UNIFIED IDEOGRAPH:'F6B4:63156:鶼
CJK UNIFIED IDEOGRAPH:'F6B5:63157:鷁
CJK UNIFIED IDEOGRAPH:'F6B6:63158:鷇
CJK UNIFIED IDEOGRAPH:'F6B7:63159:鷊
CJK UNIFIED IDEOGRAPH:'F6B8:63160:鷏
CJK UNIFIED IDEOGRAPH:'F6B9:63161:鶾
CJK UNIFIED IDEOGRAPH:'F6BA:63162:鷅
CJK UNIFIED IDEOGRAPH:'F6BB:63163:鷃
CJK UNIFIED IDEOGRAPH:'F6BC:63164:鶻
CJK UNIFIED IDEOGRAPH:'F6BD:63165:鶵
CJK UNIFIED IDEOGRAPH:'F6BE:63166:鷎
CJK UNIFIED IDEOGRAPH:'F6BF:63167:鶹
CJK UNIFIED IDEOGRAPH:'F6C0:63168:鶺
CJK UNIFIED IDEOGRAPH:'F6C1:63169:鶬
CJK UNIFIED IDEOGRAPH:'F6C2:63170:鷈
CJK UNIFIED IDEOGRAPH:'F6C3:63171:鶱
CJK UNIFIED IDEOGRAPH:'F6C4:63172:鶭
CJK UNIFIED IDEOGRAPH:'F6C5:63173:鷌
CJK UNIFIED IDEOGRAPH:'F6C6:63174:鶳
CJK UNIFIED IDEOGRAPH:'F6C7:63175:鷍
CJK UNIFIED IDEOGRAPH:'F6C8:63176:鶲
CJK UNIFIED IDEOGRAPH:'F6C9:63177:鹺
CJK UNIFIED IDEOGRAPH:'F6CA:63178:麜
CJK UNIFIED IDEOGRAPH:'F6CB:63179:黫
CJK UNIFIED IDEOGRAPH:'F6CC:63180:黮
CJK UNIFIED IDEOGRAPH:'F6CD:63181:黭
CJK UNIFIED IDEOGRAPH:'F6CE:63182:鼛
CJK UNIFIED IDEOGRAPH:'F6CF:63183:鼘
CJK UNIFIED IDEOGRAPH:'F6D0:63184:鼚
CJK UNIFIED IDEOGRAPH:'F6D1:63185:鼱
CJK UNIFIED IDEOGRAPH:'F6D2:63186:齎
CJK UNIFIED IDEOGRAPH:'F6D3:63187:齥
CJK UNIFIED IDEOGRAPH:'F6D4:63188:齤
CJK UNIFIED IDEOGRAPH:'F6D5:63189:龒
CJK UNIFIED IDEOGRAPH:'F6D6:63190:亹
CJK UNIFIED IDEOGRAPH:'F6D7:63191:囆
CJK UNIFIED IDEOGRAPH:'F6D8:63192:囅
CJK UNIFIED IDEOGRAPH:'F6D9:63193:囋
CJK UNIFIED IDEOGRAPH:'F6DA:63194:奱
CJK UNIFIED IDEOGRAPH:'F6DB:63195:孋
CJK UNIFIED IDEOGRAPH:'F6DC:63196:孌
CJK UNIFIED IDEOGRAPH:'F6DD:63197:巕
CJK UNIFIED IDEOGRAPH:'F6DE:63198:巑
CJK UNIFIED IDEOGRAPH:'F6DF:63199:廲
CJK UNIFIED IDEOGRAPH:'F6E0:63200:攡
CJK UNIFIED IDEOGRAPH:'F6E1:63201:攠
CJK UNIFIED IDEOGRAPH:'F6E2:63202:攦
CJK UNIFIED IDEOGRAPH:'F6E3:63203:攢
CJK UNIFIED IDEOGRAPH:'F6E4:63204:欋
CJK UNIFIED IDEOGRAPH:'F6E5:63205:欈
CJK UNIFIED IDEOGRAPH:'F6E6:63206:欉
CJK UNIFIED IDEOGRAPH:'F6E7:63207:氍
CJK UNIFIED IDEOGRAPH:'F6E8:63208:灕
CJK UNIFIED IDEOGRAPH:'F6E9:63209:灖
CJK UNIFIED IDEOGRAPH:'F6EA:63210:灗
CJK UNIFIED IDEOGRAPH:'F6EB:63211:灒
CJK UNIFIED IDEOGRAPH:'F6EC:63212:爞
CJK UNIFIED IDEOGRAPH:'F6ED:63213:爟
CJK UNIFIED IDEOGRAPH:'F6EE:63214:犩
CJK UNIFIED IDEOGRAPH:'F6EF:63215:獿
CJK UNIFIED IDEOGRAPH:'F6F0:63216:瓘
CJK UNIFIED IDEOGRAPH:'F6F1:63217:瓕
CJK UNIFIED IDEOGRAPH:'F6F2:63218:瓙
CJK UNIFIED IDEOGRAPH:'F6F3:63219:瓗
CJK UNIFIED IDEOGRAPH:'F6F4:63220:癭
CJK UNIFIED IDEOGRAPH:'F6F5:63221:皭
CJK UNIFIED IDEOGRAPH:'F6F6:63222:礵
CJK UNIFIED IDEOGRAPH:'F6F7:63223:禴
CJK UNIFIED IDEOGRAPH:'F6F8:63224:穰
CJK UNIFIED IDEOGRAPH:'F6F9:63225:穱
CJK UNIFIED IDEOGRAPH:'F6FA:63226:籗
CJK UNIFIED IDEOGRAPH:'F6FB:63227:籜
CJK UNIFIED IDEOGRAPH:'F6FC:63228:籙
CJK UNIFIED IDEOGRAPH:'F6FD:63229:籛
CJK UNIFIED IDEOGRAPH:'F6FE:63230:籚
CJK UNIFIED IDEOGRAPH:'F740:63296:糴
CJK UNIFIED IDEOGRAPH:'F741:63297:糱
CJK UNIFIED IDEOGRAPH:'F742:63298:纑
CJK UNIFIED IDEOGRAPH:'F743:63299:罏
CJK UNIFIED IDEOGRAPH:'F744:63300:羇
CJK UNIFIED IDEOGRAPH:'F745:63301:臞
CJK UNIFIED IDEOGRAPH:'F746:63302:艫
CJK UNIFIED IDEOGRAPH:'F747:63303:蘴
CJK UNIFIED IDEOGRAPH:'F748:63304:蘵
CJK UNIFIED IDEOGRAPH:'F749:63305:蘳
CJK UNIFIED IDEOGRAPH:'F74A:63306:蘬
CJK UNIFIED IDEOGRAPH:'F74B:63307:蘲
CJK UNIFIED IDEOGRAPH:'F74C:63308:蘶
CJK UNIFIED IDEOGRAPH:'F74D:63309:蠬
CJK UNIFIED IDEOGRAPH:'F74E:63310:蠨
CJK UNIFIED IDEOGRAPH:'F74F:63311:蠦
CJK UNIFIED IDEOGRAPH:'F750:63312:蠪
CJK UNIFIED IDEOGRAPH:'F751:63313:蠥
CJK UNIFIED IDEOGRAPH:'F752:63314:襱
CJK UNIFIED IDEOGRAPH:'F753:63315:覿
CJK UNIFIED IDEOGRAPH:'F754:63316:覾
CJK UNIFIED IDEOGRAPH:'F755:63317:觻
CJK UNIFIED IDEOGRAPH:'F756:63318:譾
CJK UNIFIED IDEOGRAPH:'F757:63319:讄
CJK UNIFIED IDEOGRAPH:'F758:63320:讂
CJK UNIFIED IDEOGRAPH:'F759:63321:讆
CJK UNIFIED IDEOGRAPH:'F75A:63322:讅
CJK UNIFIED IDEOGRAPH:'F75B:63323:譿
CJK UNIFIED IDEOGRAPH:'F75C:63324:贕
CJK UNIFIED IDEOGRAPH:'F75D:63325:躕
CJK UNIFIED IDEOGRAPH:'F75E:63326:躔
CJK UNIFIED IDEOGRAPH:'F75F:63327:躚
CJK UNIFIED IDEOGRAPH:'F760:63328:躒
CJK UNIFIED IDEOGRAPH:'F761:63329:躐
CJK UNIFIED IDEOGRAPH:'F762:63330:躖
CJK UNIFIED IDEOGRAPH:'F763:63331:躗
CJK UNIFIED IDEOGRAPH:'F764:63332:轠
CJK UNIFIED IDEOGRAPH:'F765:63333:轢
CJK UNIFIED IDEOGRAPH:'F766:63334:酇
CJK UNIFIED IDEOGRAPH:'F767:63335:鑌
CJK UNIFIED IDEOGRAPH:'F768:63336:鑐
CJK UNIFIED IDEOGRAPH:'F769:63337:鑊
CJK UNIFIED IDEOGRAPH:'F76A:63338:鑋
CJK UNIFIED IDEOGRAPH:'F76B:63339:鑏
CJK UNIFIED IDEOGRAPH:'F76C:63340:鑇
CJK UNIFIED IDEOGRAPH:'F76D:63341:鑅
CJK UNIFIED IDEOGRAPH:'F76E:63342:鑈
CJK UNIFIED IDEOGRAPH:'F76F:63343:鑉
CJK UNIFIED IDEOGRAPH:'F770:63344:鑆
CJK UNIFIED IDEOGRAPH:'F771:63345:霿
CJK UNIFIED IDEOGRAPH:'F772:63346:韣
CJK UNIFIED IDEOGRAPH:'F773:63347:顪
CJK UNIFIED IDEOGRAPH:'F774:63348:顩
CJK UNIFIED IDEOGRAPH:'F775:63349:飋
CJK UNIFIED IDEOGRAPH:'F776:63350:饔
CJK UNIFIED IDEOGRAPH:'F777:63351:饛
CJK UNIFIED IDEOGRAPH:'F778:63352:驎
CJK UNIFIED IDEOGRAPH:'F779:63353:驓
CJK UNIFIED IDEOGRAPH:'F77A:63354:驔
CJK UNIFIED IDEOGRAPH:'F77B:63355:驌
CJK UNIFIED IDEOGRAPH:'F77C:63356:驏
CJK UNIFIED IDEOGRAPH:'F77D:63357:驈
CJK UNIFIED IDEOGRAPH:'F77E:63358:驊
CJK UNIFIED IDEOGRAPH:'F7A1:63393:驉
CJK UNIFIED IDEOGRAPH:'F7A2:63394:驒
CJK UNIFIED IDEOGRAPH:'F7A3:63395:驐
CJK UNIFIED IDEOGRAPH:'F7A4:63396:髐
CJK UNIFIED IDEOGRAPH:'F7A5:63397:鬙
CJK UNIFIED IDEOGRAPH:'F7A6:63398:鬫
CJK UNIFIED IDEOGRAPH:'F7A7:63399:鬻
CJK UNIFIED IDEOGRAPH:'F7A8:63400:魖
CJK UNIFIED IDEOGRAPH:'F7A9:63401:魕
CJK UNIFIED IDEOGRAPH:'F7AA:63402:鱆
CJK UNIFIED IDEOGRAPH:'F7AB:63403:鱈
CJK UNIFIED IDEOGRAPH:'F7AC:63404:鰿
CJK UNIFIED IDEOGRAPH:'F7AD:63405:鱄
CJK UNIFIED IDEOGRAPH:'F7AE:63406:鰹
CJK UNIFIED IDEOGRAPH:'F7AF:63407:鰳
CJK UNIFIED IDEOGRAPH:'F7B0:63408:鱁
CJK UNIFIED IDEOGRAPH:'F7B1:63409:鰼
CJK UNIFIED IDEOGRAPH:'F7B2:63410:鰷
CJK UNIFIED IDEOGRAPH:'F7B3:63411:鰴
CJK UNIFIED IDEOGRAPH:'F7B4:63412:鰲
CJK UNIFIED IDEOGRAPH:'F7B5:63413:鰽
CJK UNIFIED IDEOGRAPH:'F7B6:63414:鰶
CJK UNIFIED IDEOGRAPH:'F7B7:63415:鷛
CJK UNIFIED IDEOGRAPH:'F7B8:63416:鷒
CJK UNIFIED IDEOGRAPH:'F7B9:63417:鷞
CJK UNIFIED IDEOGRAPH:'F7BA:63418:鷚
CJK UNIFIED IDEOGRAPH:'F7BB:63419:鷋
CJK UNIFIED IDEOGRAPH:'F7BC:63420:鷐
CJK UNIFIED IDEOGRAPH:'F7BD:63421:鷜
CJK UNIFIED IDEOGRAPH:'F7BE:63422:鷑
CJK UNIFIED IDEOGRAPH:'F7BF:63423:鷟
CJK UNIFIED IDEOGRAPH:'F7C0:63424:鷩
CJK UNIFIED IDEOGRAPH:'F7C1:63425:鷙
CJK UNIFIED IDEOGRAPH:'F7C2:63426:鷘
CJK UNIFIED IDEOGRAPH:'F7C3:63427:鷖
CJK UNIFIED IDEOGRAPH:'F7C4:63428:鷵
CJK UNIFIED IDEOGRAPH:'F7C5:63429:鷕
CJK UNIFIED IDEOGRAPH:'F7C6:63430:鷝
CJK UNIFIED IDEOGRAPH:'F7C7:63431:麶
CJK UNIFIED IDEOGRAPH:'F7C8:63432:黰
CJK UNIFIED IDEOGRAPH:'F7C9:63433:鼵
CJK UNIFIED IDEOGRAPH:'F7CA:63434:鼳
CJK UNIFIED IDEOGRAPH:'F7CB:63435:鼲
CJK UNIFIED IDEOGRAPH:'F7CC:63436:齂
CJK UNIFIED IDEOGRAPH:'F7CD:63437:齫
CJK UNIFIED IDEOGRAPH:'F7CE:63438:龕
CJK UNIFIED IDEOGRAPH:'F7CF:63439:龢
CJK UNIFIED IDEOGRAPH:'F7D0:63440:儽
CJK UNIFIED IDEOGRAPH:'F7D1:63441:劙
CJK UNIFIED IDEOGRAPH:'F7D2:63442:壨
CJK UNIFIED IDEOGRAPH:'F7D3:63443:壧
CJK UNIFIED IDEOGRAPH:'F7D4:63444:奲
CJK UNIFIED IDEOGRAPH:'F7D5:63445:孍
CJK UNIFIED IDEOGRAPH:'F7D6:63446:巘
CJK UNIFIED IDEOGRAPH:'F7D7:63447:蠯
CJK UNIFIED IDEOGRAPH:'F7D8:63448:彏
CJK UNIFIED IDEOGRAPH:'F7D9:63449:戁
CJK UNIFIED IDEOGRAPH:'F7DA:63450:戃
CJK UNIFIED IDEOGRAPH:'F7DB:63451:戄
CJK UNIFIED IDEOGRAPH:'F7DC:63452:攩
CJK UNIFIED IDEOGRAPH:'F7DD:63453:攥
CJK UNIFIED IDEOGRAPH:'F7DE:63454:斖
CJK UNIFIED IDEOGRAPH:'F7DF:63455:曫
CJK UNIFIED IDEOGRAPH:'F7E0:63456:欑
CJK UNIFIED IDEOGRAPH:'F7E1:63457:欒
CJK UNIFIED IDEOGRAPH:'F7E2:63458:欏
CJK UNIFIED IDEOGRAPH:'F7E3:63459:毊
CJK UNIFIED IDEOGRAPH:'F7E4:63460:灛
CJK UNIFIED IDEOGRAPH:'F7E5:63461:灚
CJK UNIFIED IDEOGRAPH:'F7E6:63462:爢
CJK UNIFIED IDEOGRAPH:'F7E7:63463:玂
CJK UNIFIED IDEOGRAPH:'F7E8:63464:玁
CJK UNIFIED IDEOGRAPH:'F7E9:63465:玃
CJK UNIFIED IDEOGRAPH:'F7EA:63466:癰
CJK UNIFIED IDEOGRAPH:'F7EB:63467:矔
CJK UNIFIED IDEOGRAPH:'F7EC:63468:籧
CJK UNIFIED IDEOGRAPH:'F7ED:63469:籦
CJK UNIFIED IDEOGRAPH:'F7EE:63470:纕
CJK UNIFIED IDEOGRAPH:'F7EF:63471:艬
CJK UNIFIED IDEOGRAPH:'F7F0:63472:蘺
CJK UNIFIED IDEOGRAPH:'F7F1:63473:虀
CJK UNIFIED IDEOGRAPH:'F7F2:63474:蘹
CJK UNIFIED IDEOGRAPH:'F7F3:63475:蘼
CJK UNIFIED IDEOGRAPH:'F7F4:63476:蘱
CJK UNIFIED IDEOGRAPH:'F7F5:63477:蘻
CJK UNIFIED IDEOGRAPH:'F7F6:63478:蘾
CJK UNIFIED IDEOGRAPH:'F7F7:63479:蠰
CJK UNIFIED IDEOGRAPH:'F7F8:63480:蠲
CJK UNIFIED IDEOGRAPH:'F7F9:63481:蠮
CJK UNIFIED IDEOGRAPH:'F7FA:63482:蠳
CJK UNIFIED IDEOGRAPH:'F7FB:63483:襶
CJK UNIFIED IDEOGRAPH:'F7FC:63484:襴
CJK UNIFIED IDEOGRAPH:'F7FD:63485:襳
CJK UNIFIED IDEOGRAPH:'F7FE:63486:觾
CJK UNIFIED IDEOGRAPH:'F840:63552:讌
CJK UNIFIED IDEOGRAPH:'F841:63553:讎
CJK UNIFIED IDEOGRAPH:'F842:63554:讋
CJK UNIFIED IDEOGRAPH:'F843:63555:讈
CJK UNIFIED IDEOGRAPH:'F844:63556:豅
CJK UNIFIED IDEOGRAPH:'F845:63557:贙
CJK UNIFIED IDEOGRAPH:'F846:63558:躘
CJK UNIFIED IDEOGRAPH:'F847:63559:轤
CJK UNIFIED IDEOGRAPH:'F848:63560:轣
CJK UNIFIED IDEOGRAPH:'F849:63561:醼
CJK UNIFIED IDEOGRAPH:'F84A:63562:鑢
CJK UNIFIED IDEOGRAPH:'F84B:63563:鑕
CJK UNIFIED IDEOGRAPH:'F84C:63564:鑝
CJK UNIFIED IDEOGRAPH:'F84D:63565:鑗
CJK UNIFIED IDEOGRAPH:'F84E:63566:鑞
CJK UNIFIED IDEOGRAPH:'F84F:63567:韄
CJK UNIFIED IDEOGRAPH:'F850:63568:韅
CJK UNIFIED IDEOGRAPH:'F851:63569:頀
CJK UNIFIED IDEOGRAPH:'F852:63570:驖
CJK UNIFIED IDEOGRAPH:'F853:63571:驙
CJK UNIFIED IDEOGRAPH:'F854:63572:鬞
CJK UNIFIED IDEOGRAPH:'F855:63573:鬟
CJK UNIFIED IDEOGRAPH:'F856:63574:鬠
CJK UNIFIED IDEOGRAPH:'F857:63575:鱒
CJK UNIFIED IDEOGRAPH:'F858:63576:鱘
CJK UNIFIED IDEOGRAPH:'F859:63577:鱐
CJK UNIFIED IDEOGRAPH:'F85A:63578:鱊
CJK UNIFIED IDEOGRAPH:'F85B:63579:鱍
CJK UNIFIED IDEOGRAPH:'F85C:63580:鱋
CJK UNIFIED IDEOGRAPH:'F85D:63581:鱕
CJK UNIFIED IDEOGRAPH:'F85E:63582:鱙
CJK UNIFIED IDEOGRAPH:'F85F:63583:鱌
CJK UNIFIED IDEOGRAPH:'F860:63584:鱎
CJK UNIFIED IDEOGRAPH:'F861:63585:鷻
CJK UNIFIED IDEOGRAPH:'F862:63586:鷷
CJK UNIFIED IDEOGRAPH:'F863:63587:鷯
CJK UNIFIED IDEOGRAPH:'F864:63588:鷣
CJK UNIFIED IDEOGRAPH:'F865:63589:鷫
CJK UNIFIED IDEOGRAPH:'F866:63590:鷸
CJK UNIFIED IDEOGRAPH:'F867:63591:鷤
CJK UNIFIED IDEOGRAPH:'F868:63592:鷶
CJK UNIFIED IDEOGRAPH:'F869:63593:鷡
CJK UNIFIED IDEOGRAPH:'F86A:63594:鷮
CJK UNIFIED IDEOGRAPH:'F86B:63595:鷦
CJK UNIFIED IDEOGRAPH:'F86C:63596:鷲
CJK UNIFIED IDEOGRAPH:'F86D:63597:鷰
CJK UNIFIED IDEOGRAPH:'F86E:63598:鷢
CJK UNIFIED IDEOGRAPH:'F86F:63599:鷬
CJK UNIFIED IDEOGRAPH:'F870:63600:鷴
CJK UNIFIED IDEOGRAPH:'F871:63601:鷳
CJK UNIFIED IDEOGRAPH:'F872:63602:鷨
CJK UNIFIED IDEOGRAPH:'F873:63603:鷭
CJK UNIFIED IDEOGRAPH:'F874:63604:黂
CJK UNIFIED IDEOGRAPH:'F875:63605:黐
CJK UNIFIED IDEOGRAPH:'F876:63606:黲
CJK UNIFIED IDEOGRAPH:'F877:63607:黳
CJK UNIFIED IDEOGRAPH:'F878:63608:鼆
CJK UNIFIED IDEOGRAPH:'F879:63609:鼜
CJK UNIFIED IDEOGRAPH:'F87A:63610:鼸
CJK UNIFIED IDEOGRAPH:'F87B:63611:鼷
CJK UNIFIED IDEOGRAPH:'F87C:63612:鼶
CJK UNIFIED IDEOGRAPH:'F87D:63613:齃
CJK UNIFIED IDEOGRAPH:'F87E:63614:齏
CJK UNIFIED IDEOGRAPH:'F8A1:63649:齱
CJK UNIFIED IDEOGRAPH:'F8A2:63650:齰
CJK UNIFIED IDEOGRAPH:'F8A3:63651:齮
CJK UNIFIED IDEOGRAPH:'F8A4:63652:齯
CJK UNIFIED IDEOGRAPH:'F8A5:63653:囓
CJK UNIFIED IDEOGRAPH:'F8A6:63654:囍
CJK UNIFIED IDEOGRAPH:'F8A7:63655:孎
CJK UNIFIED IDEOGRAPH:'F8A8:63656:屭
CJK UNIFIED IDEOGRAPH:'F8A9:63657:攭
CJK UNIFIED IDEOGRAPH:'F8AA:63658:曭
CJK UNIFIED IDEOGRAPH:'F8AB:63659:曮
CJK UNIFIED IDEOGRAPH:'F8AC:63660:欓
CJK UNIFIED IDEOGRAPH:'F8AD:63661:灟
CJK UNIFIED IDEOGRAPH:'F8AE:63662:灡
CJK UNIFIED IDEOGRAPH:'F8AF:63663:灝
CJK UNIFIED IDEOGRAPH:'F8B0:63664:灠
CJK UNIFIED IDEOGRAPH:'F8B1:63665:爣
CJK UNIFIED IDEOGRAPH:'F8B2:63666:瓛
CJK UNIFIED IDEOGRAPH:'F8B3:63667:瓥
CJK UNIFIED IDEOGRAPH:'F8B4:63668:矕
CJK UNIFIED IDEOGRAPH:'F8B5:63669:礸
CJK UNIFIED IDEOGRAPH:'F8B6:63670:禷
CJK UNIFIED IDEOGRAPH:'F8B7:63671:禶
CJK UNIFIED IDEOGRAPH:'F8B8:63672:籪
CJK UNIFIED IDEOGRAPH:'F8B9:63673:纗
CJK UNIFIED IDEOGRAPH:'F8BA:63674:羉
CJK UNIFIED IDEOGRAPH:'F8BB:63675:艭
CJK UNIFIED IDEOGRAPH:'F8BC:63676:虃
CJK UNIFIED IDEOGRAPH:'F8BD:63677:蠸
CJK UNIFIED IDEOGRAPH:'F8BE:63678:蠷
CJK UNIFIED IDEOGRAPH:'F8BF:63679:蠵
CJK UNIFIED IDEOGRAPH:'F8C0:63680:衋
CJK UNIFIED IDEOGRAPH:'F8C1:63681:讔
CJK UNIFIED IDEOGRAPH:'F8C2:63682:讕
CJK UNIFIED IDEOGRAPH:'F8C3:63683:躞
CJK UNIFIED IDEOGRAPH:'F8C4:63684:躟
CJK UNIFIED IDEOGRAPH:'F8C5:63685:躠
CJK UNIFIED IDEOGRAPH:'F8C6:63686:躝
CJK UNIFIED IDEOGRAPH:'F8C7:63687:醾
CJK UNIFIED IDEOGRAPH:'F8C8:63688:醽
CJK UNIFIED IDEOGRAPH:'F8C9:63689:釂
CJK UNIFIED IDEOGRAPH:'F8CA:63690:鑫
CJK UNIFIED IDEOGRAPH:'F8CB:63691:鑨
CJK UNIFIED IDEOGRAPH:'F8CC:63692:鑩
CJK UNIFIED IDEOGRAPH:'F8CD:63693:雥
CJK UNIFIED IDEOGRAPH:'F8CE:63694:靆
CJK UNIFIED IDEOGRAPH:'F8CF:63695:靃
CJK UNIFIED IDEOGRAPH:'F8D0:63696:靇
CJK UNIFIED IDEOGRAPH:'F8D1:63697:韇
CJK UNIFIED IDEOGRAPH:'F8D2:63698:韥
CJK UNIFIED IDEOGRAPH:'F8D3:63699:驞
CJK UNIFIED IDEOGRAPH:'F8D4:63700:髕
CJK UNIFIED IDEOGRAPH:'F8D5:63701:魙
CJK UNIFIED IDEOGRAPH:'F8D6:63702:鱣
CJK UNIFIED IDEOGRAPH:'F8D7:63703:鱧
CJK UNIFIED IDEOGRAPH:'F8D8:63704:鱦
CJK UNIFIED IDEOGRAPH:'F8D9:63705:鱢
CJK UNIFIED IDEOGRAPH:'F8DA:63706:鱞
CJK UNIFIED IDEOGRAPH:'F8DB:63707:鱠
CJK UNIFIED IDEOGRAPH:'F8DC:63708:鸂
CJK UNIFIED IDEOGRAPH:'F8DD:63709:鷾
CJK UNIFIED IDEOGRAPH:'F8DE:63710:鸇
CJK UNIFIED IDEOGRAPH:'F8DF:63711:鸃
CJK UNIFIED IDEOGRAPH:'F8E0:63712:鸆
CJK UNIFIED IDEOGRAPH:'F8E1:63713:鸅
CJK UNIFIED IDEOGRAPH:'F8E2:63714:鸀
CJK UNIFIED IDEOGRAPH:'F8E3:63715:鸁
CJK UNIFIED IDEOGRAPH:'F8E4:63716:鸉
CJK UNIFIED IDEOGRAPH:'F8E5:63717:鷿
CJK UNIFIED IDEOGRAPH:'F8E6:63718:鷽
CJK UNIFIED IDEOGRAPH:'F8E7:63719:鸄
CJK UNIFIED IDEOGRAPH:'F8E8:63720:麠
CJK UNIFIED IDEOGRAPH:'F8E9:63721:鼞
CJK UNIFIED IDEOGRAPH:'F8EA:63722:齆
CJK UNIFIED IDEOGRAPH:'F8EB:63723:齴
CJK UNIFIED IDEOGRAPH:'F8EC:63724:齵
CJK UNIFIED IDEOGRAPH:'F8ED:63725:齶
CJK UNIFIED IDEOGRAPH:'F8EE:63726:囔
CJK UNIFIED IDEOGRAPH:'F8EF:63727:攮
CJK UNIFIED IDEOGRAPH:'F8F0:63728:斸
CJK UNIFIED IDEOGRAPH:'F8F1:63729:欘
CJK UNIFIED IDEOGRAPH:'F8F2:63730:欙
CJK UNIFIED IDEOGRAPH:'F8F3:63731:欗
CJK UNIFIED IDEOGRAPH:'F8F4:63732:欚
CJK UNIFIED IDEOGRAPH:'F8F5:63733:灢
CJK UNIFIED IDEOGRAPH:'F8F6:63734:爦
CJK UNIFIED IDEOGRAPH:'F8F7:63735:犪
CJK UNIFIED IDEOGRAPH:'F8F8:63736:矘
CJK UNIFIED IDEOGRAPH:'F8F9:63737:矙
CJK UNIFIED IDEOGRAPH:'F8FA:63738:礹
CJK UNIFIED IDEOGRAPH:'F8FB:63739:籩
CJK UNIFIED IDEOGRAPH:'F8FC:63740:籫
CJK UNIFIED IDEOGRAPH:'F8FD:63741:糶
CJK UNIFIED IDEOGRAPH:'F8FE:63742:纚
CJK UNIFIED IDEOGRAPH:'F940:63808:纘
CJK UNIFIED IDEOGRAPH:'F941:63809:纛
CJK UNIFIED IDEOGRAPH:'F942:63810:纙
CJK UNIFIED IDEOGRAPH:'F943:63811:臠
CJK UNIFIED IDEOGRAPH:'F944:63812:臡
CJK UNIFIED IDEOGRAPH:'F945:63813:虆
CJK UNIFIED IDEOGRAPH:'F946:63814:虇
CJK UNIFIED IDEOGRAPH:'F947:63815:虈
CJK UNIFIED IDEOGRAPH:'F948:63816:襹
CJK UNIFIED IDEOGRAPH:'F949:63817:襺
CJK UNIFIED IDEOGRAPH:'F94A:63818:襼
CJK UNIFIED IDEOGRAPH:'F94B:63819:襻
CJK UNIFIED IDEOGRAPH:'F94C:63820:觿
CJK UNIFIED IDEOGRAPH:'F94D:63821:讘
CJK UNIFIED IDEOGRAPH:'F94E:63822:讙
CJK UNIFIED IDEOGRAPH:'F94F:63823:躥
CJK UNIFIED IDEOGRAPH:'F950:63824:躤
CJK UNIFIED IDEOGRAPH:'F951:63825:躣
CJK UNIFIED IDEOGRAPH:'F952:63826:鑮
CJK UNIFIED IDEOGRAPH:'F953:63827:鑭
CJK UNIFIED IDEOGRAPH:'F954:63828:鑯
CJK UNIFIED IDEOGRAPH:'F955:63829:鑱
CJK UNIFIED IDEOGRAPH:'F956:63830:鑳
CJK UNIFIED IDEOGRAPH:'F957:63831:靉
CJK UNIFIED IDEOGRAPH:'F958:63832:顲
CJK UNIFIED IDEOGRAPH:'F959:63833:饟
CJK UNIFIED IDEOGRAPH:'F95A:63834:鱨
CJK UNIFIED IDEOGRAPH:'F95B:63835:鱮
CJK UNIFIED IDEOGRAPH:'F95C:63836:鱭
CJK UNIFIED IDEOGRAPH:'F95D:63837:鸋
CJK UNIFIED IDEOGRAPH:'F95E:63838:鸍
CJK UNIFIED IDEOGRAPH:'F95F:63839:鸐
CJK UNIFIED IDEOGRAPH:'F960:63840:鸏
CJK UNIFIED IDEOGRAPH:'F961:63841:鸒
CJK UNIFIED IDEOGRAPH:'F962:63842:鸑
CJK UNIFIED IDEOGRAPH:'F963:63843:麡
CJK UNIFIED IDEOGRAPH:'F964:63844:黵
CJK UNIFIED IDEOGRAPH:'F965:63845:鼉
CJK UNIFIED IDEOGRAPH:'F966:63846:齇
CJK UNIFIED IDEOGRAPH:'F967:63847:齸
CJK UNIFIED IDEOGRAPH:'F968:63848:齻
CJK UNIFIED IDEOGRAPH:'F969:63849:齺
CJK UNIFIED IDEOGRAPH:'F96A:63850:齹
CJK UNIFIED IDEOGRAPH:'F96B:63851:圞
CJK UNIFIED IDEOGRAPH:'F96C:63852:灦
CJK UNIFIED IDEOGRAPH:'F96D:63853:籯
CJK UNIFIED IDEOGRAPH:'F96E:63854:蠼
CJK UNIFIED IDEOGRAPH:'F96F:63855:趲
CJK UNIFIED IDEOGRAPH:'F970:63856:躦
CJK UNIFIED IDEOGRAPH:'F971:63857:釃
CJK UNIFIED IDEOGRAPH:'F972:63858:鑴
CJK UNIFIED IDEOGRAPH:'F973:63859:鑸
CJK UNIFIED IDEOGRAPH:'F974:63860:鑶
CJK UNIFIED IDEOGRAPH:'F975:63861:鑵
CJK UNIFIED IDEOGRAPH:'F976:63862:驠
CJK UNIFIED IDEOGRAPH:'F977:63863:鱴
CJK UNIFIED IDEOGRAPH:'F978:63864:鱳
CJK UNIFIED IDEOGRAPH:'F979:63865:鱱
CJK UNIFIED IDEOGRAPH:'F97A:63866:鱵
CJK UNIFIED IDEOGRAPH:'F97B:63867:鸔
CJK UNIFIED IDEOGRAPH:'F97C:63868:鸓
CJK UNIFIED IDEOGRAPH:'F97D:63869:黶
CJK UNIFIED IDEOGRAPH:'F97E:63870:鼊
CJK UNIFIED IDEOGRAPH:'F9A1:63905:龤
CJK UNIFIED IDEOGRAPH:'F9A2:63906:灨
CJK UNIFIED IDEOGRAPH:'F9A3:63907:灥
CJK UNIFIED IDEOGRAPH:'F9A4:63908:糷
CJK UNIFIED IDEOGRAPH:'F9A5:63909:虪
CJK UNIFIED IDEOGRAPH:'F9A6:63910:蠾
CJK UNIFIED IDEOGRAPH:'F9A7:63911:蠽
CJK UNIFIED IDEOGRAPH:'F9A8:63912:蠿
CJK UNIFIED IDEOGRAPH:'F9A9:63913:讞
CJK UNIFIED IDEOGRAPH:'F9AA:63914:貜
CJK UNIFIED IDEOGRAPH:'F9AB:63915:躩
CJK UNIFIED IDEOGRAPH:'F9AC:63916:軉
CJK UNIFIED IDEOGRAPH:'F9AD:63917:靋
CJK UNIFIED IDEOGRAPH:'F9AE:63918:顳
CJK UNIFIED IDEOGRAPH:'F9AF:63919:顴
CJK UNIFIED IDEOGRAPH:'F9B0:63920:飌
CJK UNIFIED IDEOGRAPH:'F9B1:63921:饡
CJK UNIFIED IDEOGRAPH:'F9B2:63922:馫
CJK UNIFIED IDEOGRAPH:'F9B3:63923:驤
CJK UNIFIED IDEOGRAPH:'F9B4:63924:驦
CJK UNIFIED IDEOGRAPH:'F9B5:63925:驧
CJK UNIFIED IDEOGRAPH:'F9B6:63926:鬤
CJK UNIFIED IDEOGRAPH:'F9B7:63927:鸕
CJK UNIFIED IDEOGRAPH:'F9B8:63928:鸗
CJK UNIFIED IDEOGRAPH:'F9B9:63929:齈
CJK UNIFIED IDEOGRAPH:'F9BA:63930:戇
CJK UNIFIED IDEOGRAPH:'F9BB:63931:欞
CJK UNIFIED IDEOGRAPH:'F9BC:63932:爧
CJK UNIFIED IDEOGRAPH:'F9BD:63933:虌
CJK UNIFIED IDEOGRAPH:'F9BE:63934:躨
CJK UNIFIED IDEOGRAPH:'F9BF:63935:钂
CJK UNIFIED IDEOGRAPH:'F9C0:63936:钀
CJK UNIFIED IDEOGRAPH:'F9C1:63937:钁
CJK UNIFIED IDEOGRAPH:'F9C2:63938:驩
CJK UNIFIED IDEOGRAPH:'F9C3:63939:驨
CJK UNIFIED IDEOGRAPH:'F9C4:63940:鬮
CJK UNIFIED IDEOGRAPH:'F9C5:63941:鸙
CJK UNIFIED IDEOGRAPH:'F9C6:63942:爩
CJK UNIFIED IDEOGRAPH:'F9C7:63943:虋
CJK UNIFIED IDEOGRAPH:'F9C8:63944:讟
CJK UNIFIED IDEOGRAPH:'F9C9:63945:钃
CJK UNIFIED IDEOGRAPH:'F9CA:63946:鱹
CJK UNIFIED IDEOGRAPH:'F9CB:63947:麷
CJK UNIFIED IDEOGRAPH:'F9CC:63948:癵
CJK UNIFIED IDEOGRAPH:'F9CD:63949:驫
CJK UNIFIED IDEOGRAPH:'F9CE:63950:鱺
CJK UNIFIED IDEOGRAPH:'F9CF:63951:鸝
CJK UNIFIED IDEOGRAPH:'F9D0:63952:灩
CJK UNIFIED IDEOGRAPH:'F9D1:63953:灪
CJK UNIFIED IDEOGRAPH:'F9D2:63954:麤
CJK UNIFIED IDEOGRAPH:'F9D3:63955:齾
CJK UNIFIED IDEOGRAPH:'F9D4:63956:齉
CJK UNIFIED IDEOGRAPH:'F9D5:63957:龘
CJK UNIFIED IDEOGRAPH:'F9D6:63958:碁
CJK UNIFIED IDEOGRAPH:'F9D7:63959:銹
CJK UNIFIED IDEOGRAPH:'F9D8:63960:裏
CJK UNIFIED IDEOGRAPH:'F9D9:63961:墻
CJK UNIFIED IDEOGRAPH:'F9DA:63962:恒
CJK UNIFIED IDEOGRAPH:'F9DB:63963:粧
CJK UNIFIED IDEOGRAPH:'F9DC:63964:嫺
BOX DRAWINGS DOUBLE DOWN AND RIGHT:'F9DD:63965:╔
BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL:'F9DE:63966:╦
BOX DRAWINGS DOUBLE DOWN AND LEFT:'F9DF:63967:╗
BOX DRAWINGS DOUBLE VERTICAL AND RIGHT:'F9E0:63968:╠
BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL:'F9E1:63969:╬
BOX DRAWINGS DOUBLE VERTICAL AND LEFT:'F9E2:63970:╣
BOX DRAWINGS DOUBLE UP AND RIGHT:'F9E3:63971:╚
BOX DRAWINGS DOUBLE UP AND HORIZONTAL:'F9E4:63972:╩
BOX DRAWINGS DOUBLE UP AND LEFT:'F9E5:63973:╝
BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE:'F9E6:63974:╒
BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE:'F9E7:63975:╤
BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE:'F9E8:63976:╕
BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE:'F9E9:63977:╞
BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE:'F9EA:63978:╪
BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE:'F9EB:63979:╡
BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE:'F9EC:63980:╘
BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE:'F9ED:63981:╧
BOX DRAWINGS UP SINGLE AND LEFT DOUBLE:'F9EE:63982:╛
BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE:'F9EF:63983:╓
BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE:'F9F0:63984:╥
BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE:'F9F1:63985:╖
BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE:'F9F2:63986:╟
BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE:'F9F3:63987:╫
BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE:'F9F4:63988:╢
BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE:'F9F5:63989:╙
BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE:'F9F6:63990:╨
BOX DRAWINGS UP DOUBLE AND LEFT SINGLE:'F9F7:63991:╜
BOX DRAWINGS DOUBLE VERTICAL:'F9F8:63992:║
BOX DRAWINGS DOUBLE HORIZONTAL:'F9F9:63993:═
BOX DRAWINGS LIGHT ARC DOWN AND RIGHT:'F9FA:63994:╭
BOX DRAWINGS LIGHT ARC DOWN AND LEFT:'F9FB:63995:╮
BOX DRAWINGS LIGHT ARC UP AND RIGHT:'F9FC:63996:╰
BOX DRAWINGS LIGHT ARC UP AND LEFT:'F9FD:63997:╯
DARK SHADE:'F9FE:63998:▓
</ansicpg950>
<ansicpg1250>
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
PER MILLE SIGN:'89:8240:‰
LATIN CAPITAL LETTER S WITH CARON:'8A:352:Š
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LATIN CAPITAL LETTER S WITH ACUTE:'8C:346:Ś
LATIN CAPITAL LETTER T WITH CARON:'8D:356:Ť
LATIN CAPITAL LETTER Z WITH CARON:'8E:381:Ž
LATIN CAPITAL LETTER Z WITH ACUTE:'8F:377:Ź
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
TRADE MARK SIGN:'99:8482:™
LATIN SMALL LETTER S WITH CARON:'9A:353:š
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
LATIN SMALL LETTER S WITH ACUTE:'9C:347:ś
LATIN SMALL LETTER T WITH CARON:'9D:357:ť
LATIN SMALL LETTER Z WITH CARON:'9E:382:ž
LATIN SMALL LETTER Z WITH ACUTE:'9F:378:ź
NO-BREAK SPACE:'A0:160: 
CARON (MANDARIN CHINESE THIRD TONE):'A1:711:ˇ
BREVE:'A2:728:˘
LATIN CAPITAL LETTER L WITH STROKE:'A3:321:Ł
CURRENCY SIGN:'A4:164:¤
LATIN CAPITAL LETTER A WITH OGONEK:'A5:260:Ą
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
DIAERESIS:'A8:168:¨
COPYRIGHT SIGN:'A9:169:©
LATIN CAPITAL LETTER S WITH CEDILLA:'AA:350:Ş
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
LATIN CAPITAL LETTER Z WITH DOT ABOVE:'AF:379:Ż
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
OGONEK:'B2:731:˛
LATIN SMALL LETTER L WITH STROKE:'B3:322:ł
ACUTE ACCENT:'B4:180:´
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
CEDILLA:'B8:184:¸
LATIN SMALL LETTER A WITH OGONEK:'B9:261:ą
LATIN SMALL LETTER S WITH CEDILLA:'BA:351:ş
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
LATIN CAPITAL LETTER L WITH CARON:'BC:317:Ľ
DOUBLE ACUTE ACCENT:'BD:733:˝
LATIN SMALL LETTER L WITH CARON:'BE:318:ľ
LATIN SMALL LETTER Z WITH DOT ABOVE:'BF:380:ż
LATIN CAPITAL LETTER R WITH ACUTE:'C0:340:Ŕ
LATIN CAPITAL LETTER A WITH ACUTE:'C1:193:Á
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:'C2:194:Â
LATIN CAPITAL LETTER A WITH BREVE:'C3:258:Ă
LATIN CAPITAL LETTER A WITH DIAERESIS:'C4:196:Ä
LATIN CAPITAL LETTER L WITH ACUTE:'C5:313:Ĺ
LATIN CAPITAL LETTER C WITH ACUTE:'C6:262:Ć
LATIN CAPITAL LETTER C WITH CEDILLA:'C7:199:Ç
LATIN CAPITAL LETTER C WITH CARON:'C8:268:Č
LATIN CAPITAL LETTER E WITH ACUTE:'C9:201:É
LATIN CAPITAL LETTER E WITH OGONEK:'CA:280:Ę
LATIN CAPITAL LETTER E WITH DIAERESIS:'CB:203:Ë
LATIN CAPITAL LETTER E WITH CARON:'CC:282:Ě
LATIN CAPITAL LETTER I WITH ACUTE:'CD:205:Í
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:'CE:206:Î
LATIN CAPITAL LETTER D WITH CARON:'CF:270:Ď
LATIN CAPITAL LETTER D WITH STROKE:'D0:272:Đ
LATIN CAPITAL LETTER N WITH ACUTE:'D1:323:Ń
LATIN CAPITAL LETTER N WITH CARON:'D2:327:Ň
LATIN CAPITAL LETTER O WITH ACUTE:'D3:211:Ó
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:'D4:212:Ô
LATIN CAPITAL LETTER O WITH DOUBLE ACUTE:'D5:336:Ő
LATIN CAPITAL LETTER O WITH DIAERESIS:'D6:214:Ö
MULTIPLICATION SIGN:'D7:215:×
LATIN CAPITAL LETTER R WITH CARON:'D8:344:Ř
LATIN CAPITAL LETTER U WITH RING ABOVE:'D9:366:Ů
LATIN CAPITAL LETTER U WITH ACUTE:'DA:218:Ú
LATIN CAPITAL LETTER U WITH DOUBLE ACUTE:'DB:368:Ű
LATIN CAPITAL LETTER U WITH DIAERESIS:'DC:220:Ü
LATIN CAPITAL LETTER Y WITH ACUTE:'DD:221:Ý
LATIN CAPITAL LETTER T WITH CEDILLA:'DE:354:Ţ
LATIN SMALL LETTER SHARP S (GERMAN):'DF:223:ß
LATIN SMALL LETTER R WITH ACUTE:'E0:341:ŕ
LATIN SMALL LETTER A WITH ACUTE:'E1:225:á
LATIN SMALL LETTER A WITH CIRCUMFLEX:'E2:226:â
LATIN SMALL LETTER A WITH BREVE:'E3:259:ă
LATIN SMALL LETTER A WITH DIAERESIS:'E4:228:ä
LATIN SMALL LETTER L WITH ACUTE:'E5:314:ĺ
LATIN SMALL LETTER C WITH ACUTE:'E6:263:ć
LATIN SMALL LETTER C WITH CEDILLA:'E7:231:ç
LATIN SMALL LETTER C WITH CARON:'E8:269:č
LATIN SMALL LETTER E WITH ACUTE:'E9:233:é
LATIN SMALL LETTER E WITH OGONEK:'EA:281:ę
LATIN SMALL LETTER E WITH DIAERESIS:'EB:235:ë
LATIN SMALL LETTER E WITH CARON:'EC:283:ě
LATIN SMALL LETTER I WITH ACUTE:'ED:237:í
LATIN SMALL LETTER I WITH CIRCUMFLEX:'EE:238:î
LATIN SMALL LETTER D WITH CARON:'EF:271:ď
LATIN SMALL LETTER D WITH STROKE:'F0:273:đ
LATIN SMALL LETTER N WITH ACUTE:'F1:324:ń
LATIN SMALL LETTER N WITH CARON:'F2:328:ň
LATIN SMALL LETTER O WITH ACUTE:'F3:243:ó
LATIN SMALL LETTER O WITH CIRCUMFLEX:'F4:244:ô
LATIN SMALL LETTER O WITH DOUBLE ACUTE:'F5:337:ő
LATIN SMALL LETTER O WITH DIAERESIS:'F6:246:ö
DIVISION SIGN:'F7:247:÷
LATIN SMALL LETTER R WITH CARON:'F8:345:ř
LATIN SMALL LETTER U WITH RING ABOVE:'F9:367:ů
LATIN SMALL LETTER U WITH ACUTE:'FA:250:ú
LATIN SMALL LETTER U WITH DOUBLE ACUTE:'FB:369:ű
LATIN SMALL LETTER U WITH DIAERESIS:'FC:252:ü
LATIN SMALL LETTER Y WITH ACUTE:'FD:253:ý
LATIN SMALL LETTER T WITH CEDILLA:'FE:355:ţ
DOT ABOVE (MANDARIN CHINESE LIGHT TONE):'FF:729:˙
</ansicpg1250>
<ansicpg1251>
CYRILLIC CAPITAL LETTER DJE (SERBOCROATIAN):'80:1026:Ђ
CYRILLIC CAPITAL LETTER GJE:'81:1027:Ѓ
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
CYRILLIC SMALL LETTER GJE:'83:1107:ѓ
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
PER MILLE SIGN:'89:8240:‰
CYRILLIC CAPITAL LETTER LJE:'8A:1033:Љ
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
CYRILLIC CAPITAL LETTER NJE:'8C:1034:Њ
CYRILLIC CAPITAL LETTER KJE:'8D:1036:Ќ
CYRILLIC CAPITAL LETTER TSHE (SERBOCROATIAN):'8E:1035:Ћ
CYRILLIC CAPITAL LETTER DZHE:'8F:1039:Џ
CYRILLIC SMALL LETTER DJE (SERBOCROATIAN):'90:1106:ђ
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
TRADE MARK SIGN:'99:8482:™
CYRILLIC SMALL LETTER LJE:'9A:1113:љ
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
CYRILLIC SMALL LETTER NJE:'9C:1114:њ
CYRILLIC SMALL LETTER KJE:'9D:1116:ќ
CYRILLIC SMALL LETTER TSHE (SERBOCROATIAN):'9E:1115:ћ
CYRILLIC SMALL LETTER DZHE:'9F:1119:џ
NO-BREAK SPACE:'A0:160: 
CYRILLIC CAPITAL LETTER SHORT U (BYELORUSSIAN):'A1:1038:Ў
CYRILLIC SMALL LETTER SHORT U (BYELORUSSIAN):'A2:1118:ў
CYRILLIC CAPITAL LETTER JE:'A3:1032:Ј
CURRENCY SIGN:'A4:164:¤
CYRILLIC CAPITAL LETTER GHE WITH UPTURN:'A5:1168:Ґ
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
CYRILLIC CAPITAL LETTER IO:'A8:1025:Ё
COPYRIGHT SIGN:'A9:169:©
CYRILLIC CAPITAL LETTER UKRAINIAN IE:'AA:1028:Є
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
CYRILLIC CAPITAL LETTER YI (UKRAINIAN):'AF:1031:Ї
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I:'B2:1030:І
CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I:'B3:1110:і
CYRILLIC SMALL LETTER GHE WITH UPTURN:'B4:1169:ґ
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
CYRILLIC SMALL LETTER IO:'B8:1105:ё
NUMERO SIGN:'B9:8470:№
CYRILLIC SMALL LETTER UKRAINIAN IE:'BA:1108:є
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
CYRILLIC SMALL LETTER JE:'BC:1112:ј
CYRILLIC CAPITAL LETTER DZE:'BD:1029:Ѕ
CYRILLIC SMALL LETTER DZE:'BE:1109:ѕ
CYRILLIC SMALL LETTER YI (UKRAINIAN):'BF:1111:ї
CYRILLIC CAPITAL LETTER A:'C0:1040:А
CYRILLIC CAPITAL LETTER BE:'C1:1041:Б
CYRILLIC CAPITAL LETTER VE:'C2:1042:В
CYRILLIC CAPITAL LETTER GHE:'C3:1043:Г
CYRILLIC CAPITAL LETTER DE:'C4:1044:Д
CYRILLIC CAPITAL LETTER IE:'C5:1045:Е
CYRILLIC CAPITAL LETTER ZHE:'C6:1046:Ж
CYRILLIC CAPITAL LETTER ZE:'C7:1047:З
CYRILLIC CAPITAL LETTER I:'C8:1048:И
CYRILLIC CAPITAL LETTER SHORT I:'C9:1049:Й
CYRILLIC CAPITAL LETTER KA:'CA:1050:К
CYRILLIC CAPITAL LETTER EL:'CB:1051:Л
CYRILLIC CAPITAL LETTER EM:'CC:1052:М
CYRILLIC CAPITAL LETTER EN:'CD:1053:Н
CYRILLIC CAPITAL LETTER O:'CE:1054:О
CYRILLIC CAPITAL LETTER PE:'CF:1055:П
CYRILLIC CAPITAL LETTER ER:'D0:1056:Р
CYRILLIC CAPITAL LETTER ES:'D1:1057:С
CYRILLIC CAPITAL LETTER TE:'D2:1058:Т
CYRILLIC CAPITAL LETTER U:'D3:1059:У
CYRILLIC CAPITAL LETTER EF:'D4:1060:Ф
CYRILLIC CAPITAL LETTER HA:'D5:1061:Х
CYRILLIC CAPITAL LETTER TSE:'D6:1062:Ц
CYRILLIC CAPITAL LETTER CHE:'D7:1063:Ч
CYRILLIC CAPITAL LETTER SHA:'D8:1064:Ш
CYRILLIC CAPITAL LETTER SHCHA:'D9:1065:Щ
CYRILLIC CAPITAL LETTER HARD SIGN:'DA:1066:Ъ
CYRILLIC CAPITAL LETTER YERU:'DB:1067:Ы
CYRILLIC CAPITAL LETTER SOFT SIGN:'DC:1068:Ь
CYRILLIC CAPITAL LETTER E:'DD:1069:Э
CYRILLIC CAPITAL LETTER YU:'DE:1070:Ю
CYRILLIC CAPITAL LETTER YA:'DF:1071:Я
CYRILLIC SMALL LETTER A:'E0:1072:а
CYRILLIC SMALL LETTER BE:'E1:1073:б
CYRILLIC SMALL LETTER VE:'E2:1074:в
CYRILLIC SMALL LETTER GHE:'E3:1075:г
CYRILLIC SMALL LETTER DE:'E4:1076:д
CYRILLIC SMALL LETTER IE:'E5:1077:е
CYRILLIC SMALL LETTER ZHE:'E6:1078:ж
CYRILLIC SMALL LETTER ZE:'E7:1079:з
CYRILLIC SMALL LETTER I:'E8:1080:и
CYRILLIC SMALL LETTER SHORT I:'E9:1081:й
CYRILLIC SMALL LETTER KA:'EA:1082:к
CYRILLIC SMALL LETTER EL:'EB:1083:л
CYRILLIC SMALL LETTER EM:'EC:1084:м
CYRILLIC SMALL LETTER EN:'ED:1085:н
CYRILLIC SMALL LETTER O:'EE:1086:о
CYRILLIC SMALL LETTER PE:'EF:1087:п
CYRILLIC SMALL LETTER ER:'F0:1088:р
CYRILLIC SMALL LETTER ES:'F1:1089:с
CYRILLIC SMALL LETTER TE:'F2:1090:т
CYRILLIC SMALL LETTER U:'F3:1091:у
CYRILLIC SMALL LETTER EF:'F4:1092:ф
CYRILLIC SMALL LETTER HA:'F5:1093:х
CYRILLIC SMALL LETTER TSE:'F6:1094:ц
CYRILLIC SMALL LETTER CHE:'F7:1095:ч
CYRILLIC SMALL LETTER SHA:'F8:1096:ш
CYRILLIC SMALL LETTER SHCHA:'F9:1097:щ
CYRILLIC SMALL LETTER HARD SIGN:'FA:1098:ъ
CYRILLIC SMALL LETTER YERU:'FB:1099:ы
CYRILLIC SMALL LETTER SOFT SIGN:'FC:1100:ь
CYRILLIC SMALL LETTER E:'FD:1101:э
CYRILLIC SMALL LETTER YU:'FE:1102:ю
CYRILLIC SMALL LETTER YA:'FF:1103:я
</ansicpg1251>
<ansicpg1252>
LATIN SMALL LETTER Y WITH DIAERESIS:'00:00:ÿ
EURO SIGN:'80:8364:€
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
LATIN SMALL LETTER F WITH HOOK:'83:402:ƒ
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
MODIFIER LETTER CIRCUMFLEX ACCENT:'88:710:ˆ
PER MILLE SIGN:'89:8240:‰
LATIN CAPITAL LETTER S WITH CARON:'8A:352:Š
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LATIN CAPITAL LIGATURE OE:'8C:338:Œ
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
SMALL TILDE:'98:732:˜
TRADE MARK SIGN:'99:8482:™
LATIN SMALL LETTER S WITH CARON:'9A:353:š
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
LATIN SMALL LIGATURE OE:'9C:339:œ
LATIN CAPITAL LETTER Y WITH DIAERESIS:'9F:376:Ÿ
NO-BREAK SPACE:'A0:160: 
INVERTED EXCLAMATION MARK:'A1:161:¡
CENT SIGN:'A2:162:¢
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
YEN SIGN:'A5:165:¥
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
DIAERESIS:'A8:168:¨
COPYRIGHT SIGN:'A9:169:©
FEMININE ORDINAL INDICATOR:'AA:170:ª
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
MACRON:'AF:175:¯
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
SUPERSCRIPT TWO:'B2:178:²
SUPERSCRIPT THREE:'B3:179:³
ACUTE ACCENT:'B4:180:´
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
CEDILLA:'B8:184:¸
SUPERSCRIPT ONE:'B9:185:¹
MASCULINE ORDINAL INDICATOR:'BA:186:º
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
VULGAR FRACTION ONE QUARTER:'BC:188:¼
VULGAR FRACTION ONE HALF:'BD:189:½
VULGAR FRACTION THREE QUARTERS:'BE:190:¾
INVERTED QUESTION MARK:'BF:191:¿
LATIN CAPITAL LETTER A WITH GRAVE:'C0:192:À
LATIN CAPITAL LETTER A WITH ACUTE:'C1:193:Á
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:'C2:194:Â
LATIN CAPITAL LETTER A WITH TILDE:'C3:195:Ã
LATIN CAPITAL LETTER A WITH DIAERESIS:'C4:196:Ä
LATIN CAPITAL LETTER A WITH RING ABOVE:'C5:197:Å
LATIN CAPITAL LETTER AE:'C6:198:Æ
LATIN CAPITAL LETTER C WITH CEDILLA:'C7:199:Ç
LATIN CAPITAL LETTER E WITH GRAVE:'C8:200:È
LATIN CAPITAL LETTER E WITH ACUTE:'C9:201:É
LATIN CAPITAL LETTER E WITH CIRCUMFLEX:'CA:202:Ê
LATIN CAPITAL LETTER E WITH DIAERESIS:'CB:203:Ë
LATIN CAPITAL LETTER I WITH GRAVE:'CC:204:Ì
LATIN CAPITAL LETTER I WITH ACUTE:'CD:205:Í
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:'CE:206:Î
LATIN CAPITAL LETTER I WITH DIAERESIS:'CF:207:Ï
LATIN CAPITAL LETTER ETH (ICELANDIC):'D0:208:Ð
LATIN CAPITAL LETTER N WITH TILDE:'D1:209:Ñ
LATIN CAPITAL LETTER O WITH GRAVE:'D2:210:Ò
LATIN CAPITAL LETTER O WITH ACUTE:'D3:211:Ó
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:'D4:212:Ô
LATIN CAPITAL LETTER O WITH TILDE:'D5:213:Õ
LATIN CAPITAL LETTER O WITH DIAERESIS:'D6:214:Ö
MULTIPLICATION SIGN:'D7:215:×
LATIN CAPITAL LETTER O WITH STROKE:'D8:216:Ø
LATIN CAPITAL LETTER U WITH GRAVE:'D9:217:Ù
LATIN CAPITAL LETTER U WITH ACUTE:'DA:218:Ú
LATIN CAPITAL LETTER U WITH CIRCUMFLEX:'DB:219:Û
LATIN CAPITAL LETTER U WITH DIAERESIS:'DC:220:Ü
LATIN CAPITAL LETTER Y WITH ACUTE:'DD:221:Ý
LATIN CAPITAL LETTER THORN (ICELANDIC):'DE:222:Þ
LATIN SMALL LETTER SHARP S (GERMAN):'DF:223:ß
LATIN SMALL LETTER A WITH GRAVE:'E0:224:à
LATIN SMALL LETTER A WITH ACUTE:'E1:225:á
LATIN SMALL LETTER A WITH CIRCUMFLEX:'E2:226:â
LATIN SMALL LETTER A WITH TILDE:'E3:227:ã
LATIN SMALL LETTER A WITH DIAERESIS:'E4:228:ä
LATIN SMALL LETTER A WITH RING ABOVE:'E5:229:å
LATIN SMALL LETTER AE:'E6:230:æ
LATIN SMALL LETTER C WITH CEDILLA:'E7:231:ç
LATIN SMALL LETTER E WITH GRAVE:'E8:232:è
LATIN SMALL LETTER E WITH ACUTE:'E9:233:é
LATIN SMALL LETTER E WITH CIRCUMFLEX:'EA:234:ê
LATIN SMALL LETTER E WITH DIAERESIS:'EB:235:ë
LATIN SMALL LETTER I WITH GRAVE:'EC:236:ì
LATIN SMALL LETTER I WITH ACUTE:'ED:237:í
LATIN SMALL LETTER I WITH CIRCUMFLEX:'EE:238:î
LATIN SMALL LETTER I WITH DIAERESIS:'EF:239:ï
LATIN SMALL LETTER ETH (ICELANDIC):'F0:240:ð
LATIN SMALL LETTER N WITH TILDE:'F1:241:ñ
LATIN SMALL LETTER O WITH GRAVE:'F2:242:ò
LATIN SMALL LETTER O WITH ACUTE:'F3:243:ó
LATIN SMALL LETTER O WITH CIRCUMFLEX:'F4:244:ô
LATIN SMALL LETTER O WITH TILDE:'F5:245:õ
LATIN SMALL LETTER O WITH DIAERESIS:'F6:246:ö
DIVISION SIGN:'F7:247:÷
LATIN SMALL LETTER O WITH STROKE:'F8:248:ø
LATIN SMALL LETTER U WITH GRAVE:'F9:249:ù
LATIN SMALL LETTER U WITH ACUTE:'FA:250:ú
LATIN SMALL LETTER U WITH CIRCUMFLEX:'FB:251:û
LATIN SMALL LETTER U WITH DIAERESIS:'FC:252:ü
LATIN SMALL LETTER Y WITH ACUTE:'FD:253:ý
LATIN SMALL LETTER THORN (ICELANDIC):'FE:254:þ
LATIN SMALL LETTER Y WITH DIAERESIS:'FF:255:ÿ
MY UNDEFINED SYMBOL:\'8D:141:<udef_symbol num="141"/>
MY UNDEFINED SYMBOL:\'8E:142:<udef_symbol num="142"/>
MY UNDEFINED SYMBOL:\'8F:143:<udef_symbol num="143"/>
MY UNDEFINED SYMBOL:\'90:144:<udef_symbol num="144"/>
MY UNDEFINED SYMBOL:\'9D:157:<udef_symbol num="157"/>
MY UNDEFINED SYMBOL:\'9E:158:<udef_symbol num="158"/>
</ansicpg1252>
<ansicpg1253>
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
LATIN SMALL LETTER F WITH HOOK:'83:402:ƒ
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
PER MILLE SIGN:'89:8240:‰
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
TRADE MARK SIGN:'99:8482:™
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
NO-BREAK SPACE:'A0:160: 
GREEK DIALYTIKA TONOS:'A1:901:΅
GREEK CAPITAL LETTER ALPHA WITH TONOS:'A2:902:Ά
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
YEN SIGN:'A5:165:¥
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
DIAERESIS:'A8:168:¨
COPYRIGHT SIGN:'A9:169:©
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
HORIZONTAL BAR:'AF:8213:―
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
SUPERSCRIPT TWO:'B2:178:²
SUPERSCRIPT THREE:'B3:179:³
GREEK TONOS:'B4:900:΄
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
GREEK CAPITAL LETTER EPSILON WITH TONOS:'B8:904:Έ
GREEK CAPITAL LETTER ETA WITH TONOS:'B9:905:Ή
GREEK CAPITAL LETTER IOTA WITH TONOS:'BA:906:Ί
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
GREEK CAPITAL LETTER OMICRON WITH TONOS:'BC:908:Ό
VULGAR FRACTION ONE HALF:'BD:189:½
GREEK CAPITAL LETTER UPSILON WITH TONOS:'BE:910:Ύ
GREEK CAPITAL LETTER OMEGA WITH TONOS:'BF:911:Ώ
GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS:'C0:912:ΐ
GREEK CAPITAL LETTER ALPHA:'C1:913:Α
GREEK CAPITAL LETTER BETA:'C2:914:Β
GREEK CAPITAL LETTER GAMMA:'C3:915:Γ
GREEK CAPITAL LETTER DELTA:'C4:916:Δ
GREEK CAPITAL LETTER EPSILON:'C5:917:Ε
GREEK CAPITAL LETTER ZETA:'C6:918:Ζ
GREEK CAPITAL LETTER ETA:'C7:919:Η
GREEK CAPITAL LETTER THETA:'C8:920:Θ
GREEK CAPITAL LETTER IOTA:'C9:921:Ι
GREEK CAPITAL LETTER KAPPA:'CA:922:Κ
GREEK CAPITAL LETTER LAMDA:'CB:923:Λ
GREEK CAPITAL LETTER MU:'CC:924:Μ
GREEK CAPITAL LETTER NU:'CD:925:Ν
GREEK CAPITAL LETTER XI:'CE:926:Ξ
GREEK CAPITAL LETTER OMICRON:'CF:927:Ο
GREEK CAPITAL LETTER PI:'D0:928:Π
GREEK CAPITAL LETTER RHO:'D1:929:Ρ
GREEK CAPITAL LETTER SIGMA:'D3:931:Σ
GREEK CAPITAL LETTER TAU:'D4:932:Τ
GREEK CAPITAL LETTER UPSILON:'D5:933:Υ
GREEK CAPITAL LETTER PHI:'D6:934:Φ
GREEK CAPITAL LETTER CHI:'D7:935:Χ
GREEK CAPITAL LETTER PSI:'D8:936:Ψ
GREEK CAPITAL LETTER OMEGA:'D9:937:Ω
GREEK CAPITAL LETTER IOTA WITH DIALYTIKA:'DA:938:Ϊ
GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA:'DB:939:Ϋ
GREEK SMALL LETTER ALPHA WITH TONOS:'DC:940:ά
GREEK SMALL LETTER EPSILON WITH TONOS:'DD:941:έ
GREEK SMALL LETTER ETA WITH TONOS:'DE:942:ή
GREEK SMALL LETTER IOTA WITH TONOS:'DF:943:ί
GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS:'E0:944:ΰ
GREEK SMALL LETTER ALPHA:'E1:945:α
GREEK SMALL LETTER BETA:'E2:946:β
GREEK SMALL LETTER GAMMA:'E3:947:γ
GREEK SMALL LETTER DELTA:'E4:948:δ
GREEK SMALL LETTER EPSILON:'E5:949:ε
GREEK SMALL LETTER ZETA:'E6:950:ζ
GREEK SMALL LETTER ETA:'E7:951:η
GREEK SMALL LETTER THETA:'E8:952:θ
GREEK SMALL LETTER IOTA:'E9:953:ι
GREEK SMALL LETTER KAPPA:'EA:954:κ
GREEK SMALL LETTER LAMDA:'EB:955:λ
GREEK SMALL LETTER MU:'EC:956:μ
GREEK SMALL LETTER NU:'ED:957:ν
GREEK SMALL LETTER XI:'EE:958:ξ
GREEK SMALL LETTER OMICRON:'EF:959:ο
GREEK SMALL LETTER PI:'F0:960:π
GREEK SMALL LETTER RHO:'F1:961:ρ
GREEK SMALL LETTER FINAL SIGMA:'F2:962:ς
GREEK SMALL LETTER SIGMA:'F3:963:σ
GREEK SMALL LETTER TAU:'F4:964:τ
GREEK SMALL LETTER UPSILON:'F5:965:υ
GREEK SMALL LETTER PHI:'F6:966:φ
GREEK SMALL LETTER CHI:'F7:967:χ
GREEK SMALL LETTER PSI:'F8:968:ψ
GREEK SMALL LETTER OMEGA:'F9:969:ω
GREEK SMALL LETTER IOTA WITH DIALYTIKA:'FA:970:ϊ
GREEK SMALL LETTER UPSILON WITH DIALYTIKA:'FB:971:ϋ
GREEK SMALL LETTER OMICRON WITH TONOS:'FC:972:ό
GREEK SMALL LETTER UPSILON WITH TONOS:'FD:973:ύ
GREEK SMALL LETTER OMEGA WITH TONOS:'FE:974:ώ
</ansicpg1253>
<ansicpg1254>
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
LATIN SMALL LETTER F WITH HOOK:'83:402:ƒ
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
MODIFIER LETTER CIRCUMFLEX ACCENT:'88:710:ˆ
PER MILLE SIGN:'89:8240:‰
LATIN CAPITAL LETTER S WITH CARON:'8A:352:Š
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LATIN CAPITAL LIGATURE OE:'8C:338:Œ
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
SMALL TILDE:'98:732:˜
TRADE MARK SIGN:'99:8482:™
LATIN SMALL LETTER S WITH CARON:'9A:353:š
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
LATIN SMALL LIGATURE OE:'9C:339:œ
LATIN CAPITAL LETTER Y WITH DIAERESIS:'9F:376:Ÿ
NO-BREAK SPACE:'A0:160: 
INVERTED EXCLAMATION MARK:'A1:161:¡
CENT SIGN:'A2:162:¢
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
YEN SIGN:'A5:165:¥
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
DIAERESIS:'A8:168:¨
COPYRIGHT SIGN:'A9:169:©
FEMININE ORDINAL INDICATOR:'AA:170:ª
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
MACRON:'AF:175:¯
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
SUPERSCRIPT TWO:'B2:178:²
SUPERSCRIPT THREE:'B3:179:³
ACUTE ACCENT:'B4:180:´
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
CEDILLA:'B8:184:¸
SUPERSCRIPT ONE:'B9:185:¹
MASCULINE ORDINAL INDICATOR:'BA:186:º
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
VULGAR FRACTION ONE QUARTER:'BC:188:¼
VULGAR FRACTION ONE HALF:'BD:189:½
VULGAR FRACTION THREE QUARTERS:'BE:190:¾
INVERTED QUESTION MARK:'BF:191:¿
LATIN CAPITAL LETTER A WITH GRAVE:'C0:192:À
LATIN CAPITAL LETTER A WITH ACUTE:'C1:193:Á
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:'C2:194:Â
LATIN CAPITAL LETTER A WITH TILDE:'C3:195:Ã
LATIN CAPITAL LETTER A WITH DIAERESIS:'C4:196:Ä
LATIN CAPITAL LETTER A WITH RING ABOVE:'C5:197:Å
LATIN CAPITAL LETTER AE:'C6:198:Æ
LATIN CAPITAL LETTER C WITH CEDILLA:'C7:199:Ç
LATIN CAPITAL LETTER E WITH GRAVE:'C8:200:È
LATIN CAPITAL LETTER E WITH ACUTE:'C9:201:É
LATIN CAPITAL LETTER E WITH CIRCUMFLEX:'CA:202:Ê
LATIN CAPITAL LETTER E WITH DIAERESIS:'CB:203:Ë
LATIN CAPITAL LETTER I WITH GRAVE:'CC:204:Ì
LATIN CAPITAL LETTER I WITH ACUTE:'CD:205:Í
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:'CE:206:Î
LATIN CAPITAL LETTER I WITH DIAERESIS:'CF:207:Ï
LATIN CAPITAL LETTER G WITH BREVE:'D0:286:Ğ
LATIN CAPITAL LETTER N WITH TILDE:'D1:209:Ñ
LATIN CAPITAL LETTER O WITH GRAVE:'D2:210:Ò
LATIN CAPITAL LETTER O WITH ACUTE:'D3:211:Ó
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:'D4:212:Ô
LATIN CAPITAL LETTER O WITH TILDE:'D5:213:Õ
LATIN CAPITAL LETTER O WITH DIAERESIS:'D6:214:Ö
MULTIPLICATION SIGN:'D7:215:×
LATIN CAPITAL LETTER O WITH STROKE:'D8:216:Ø
LATIN CAPITAL LETTER U WITH GRAVE:'D9:217:Ù
LATIN CAPITAL LETTER U WITH ACUTE:'DA:218:Ú
LATIN CAPITAL LETTER U WITH CIRCUMFLEX:'DB:219:Û
LATIN CAPITAL LETTER U WITH DIAERESIS:'DC:220:Ü
LATIN CAPITAL LETTER I WITH DOT ABOVE:'DD:304:İ
LATIN CAPITAL LETTER S WITH CEDILLA:'DE:350:Ş
LATIN SMALL LETTER SHARP S (GERMAN):'DF:223:ß
LATIN SMALL LETTER A WITH GRAVE:'E0:224:à
LATIN SMALL LETTER A WITH ACUTE:'E1:225:á
LATIN SMALL LETTER A WITH CIRCUMFLEX:'E2:226:â
LATIN SMALL LETTER A WITH TILDE:'E3:227:ã
LATIN SMALL LETTER A WITH DIAERESIS:'E4:228:ä
LATIN SMALL LETTER A WITH RING ABOVE:'E5:229:å
LATIN SMALL LETTER AE:'E6:230:æ
LATIN SMALL LETTER C WITH CEDILLA:'E7:231:ç
LATIN SMALL LETTER E WITH GRAVE:'E8:232:è
LATIN SMALL LETTER E WITH ACUTE:'E9:233:é
LATIN SMALL LETTER E WITH OGONEK:'EA:281:ę
LATIN SMALL LETTER E WITH DIAERESIS:'EB:235:ë
LATIN SMALL LETTER E WITH DOT ABOVE:'EC:279:ė
LATIN SMALL LETTER I WITH ACUTE:'ED:237:í
LATIN SMALL LETTER I WITH CIRCUMFLEX:'EE:238:î
LATIN SMALL LETTER I WITH MACRON:'EF:299:ī
LATIN SMALL LETTER G WITH BREVE:'F0:287:ğ
LATIN SMALL LETTER N WITH TILDE:'F1:241:ñ
LATIN SMALL LETTER O WITH GRAVE:'F2:242:ò
LATIN SMALL LETTER O WITH ACUTE:'F3:243:ó
LATIN SMALL LETTER O WITH CIRCUMFLEX:'F4:244:ô
LATIN SMALL LETTER O WITH TILDE:'F5:245:õ
LATIN SMALL LETTER O WITH DIAERESIS:'F6:246:ö
DIVISION SIGN:'F7:247:÷
LATIN SMALL LETTER O WITH STROKE:'F8:248:ø
LATIN SMALL LETTER U WITH GRAVE:'F9:249:ù
LATIN SMALL LETTER U WITH ACUTE:'FA:250:ú
LATIN SMALL LETTER U WITH CIRCUMFLEX:'FB:251:û
LATIN SMALL LETTER U WITH DIAERESIS:'FC:252:ü
LATIN SMALL LETTER DOTLESS I:'FD:305:ı
LATIN SMALL LETTER S WITH CEDILLA:'FE:351:ş
LATIN SMALL LETTER Y WITH DIAERESIS:'FF:255:ÿ
</ansicpg1254>
<ansicpg1255>
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
LATIN SMALL LETTER F WITH HOOK:'83:402:ƒ
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
PER MILLE SIGN:'89:8240:‰
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
TRADE MARK SIGN:'99:8482:™
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
NO-BREAK SPACE:'A0:160: 
CENT SIGN:'A2:162:¢
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
YEN SIGN:'A5:165:¥
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
DIAERESIS:'A8:168:¨
COPYRIGHT SIGN:'A9:169:©
MULTIPLICATION SIGN:'AA:215:×
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
OVERLINE:'AF:8254:‾
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
SUPERSCRIPT TWO:'B2:178:²
SUPERSCRIPT THREE:'B3:179:³
ACUTE ACCENT:'B4:180:´
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
CEDILLA:'B8:184:¸
SUPERSCRIPT ONE:'B9:185:¹
DIVISION SIGN:'BA:247:÷
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
VULGAR FRACTION ONE QUARTER:'BC:188:¼
VULGAR FRACTION ONE HALF:'BD:189:½
VULGAR FRACTION THREE QUARTERS:'BE:190:¾
DOUBLE LOW LINE:'DF:8215:‗
HEBREW LETTER ALEF:'E0:1488:א
HEBREW LETTER BET:'E1:1489:ב
HEBREW LETTER GIMEL:'E2:1490:ג
HEBREW LETTER DALET:'E3:1491:ד
HEBREW LETTER HE:'E4:1492:ה
HEBREW LETTER VAV:'E5:1493:ו
HEBREW LETTER ZAYIN:'E6:1494:ז
HEBREW LETTER HET:'E7:1495:ח
HEBREW LETTER TET:'E8:1496:ט
HEBREW LETTER YOD:'E9:1497:י
HEBREW LETTER FINAL KAF:'EA:1498:ך
HEBREW LETTER KAF:'EB:1499:כ
HEBREW LETTER LAMED:'EC:1500:ל
HEBREW LETTER FINAL MEM:'ED:1501:ם
HEBREW LETTER MEM:'EE:1502:מ
HEBREW LETTER FINAL NUN:'EF:1503:ן
HEBREW LETTER NUN:'F0:1504:נ
HEBREW LETTER SAMEKH:'F1:1505:ס
HEBREW LETTER AYIN:'F2:1506:ע
HEBREW LETTER FINAL PE:'F3:1507:ף
HEBREW LETTER PE:'F4:1508:פ
HEBREW LETTER FINAL TSADI:'F5:1509:ץ
HEBREW LETTER TSADI:'F6:1510:צ
HEBREW LETTER QOF:'F7:1511:ק
HEBREW LETTER RESH:'F8:1512:ר
HEBREW LETTER SHIN:'F9:1513:ש
HEBREW LETTER TAV:'FA:1514:ת
LEFT-TO-RIGHT MARK:'FD:8206:‎
RIGHT-TO-LEFT MARK:'FE:8207:‏
NUL:'00:0:�
</ansicpg1255>
<ansicpg1256>
ARABIC COMMA:'80:1548:،
ARABIC-INDIC DIGIT ZERO:'81:1632:٠
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
ARABIC-INDIC DIGIT ONE:'83:1633:١
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
ARABIC-INDIC DIGIT TWO:'88:1634:٢
ARABIC-INDIC DIGIT THREE:'89:1635:٣
ARABIC-INDIC DIGIT FOUR:'8A:1636:٤
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
ARABIC-INDIC DIGIT FIVE:'8C:1637:٥
ARABIC-INDIC DIGIT SIX:'8D:1638:٦
ARABIC-INDIC DIGIT SEVEN:'8E:1639:٧
ARABIC-INDIC DIGIT EIGHT:'8F:1640:٨
ARABIC-INDIC DIGIT NINE:'90:1641:٩
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
ARABIC SEMICOLON:'98:1563:؛
TRADE MARK SIGN:'99:8482:™
ARABIC QUESTION MARK:'9A:1567:؟
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
ARABIC LETTER HAMZA:'9C:1569:ء
ARABIC LETTER ALEF WITH MADDA ABOVE:'9D:1570:آ
ARABIC LETTER ALEF WITH HAMZA ABOVE:'9E:1571:أ
LATIN CAPITAL LETTER Y WITH DIAERESIS:'9F:376:Ÿ
NO-BREAK SPACE:'A0:160: 
ARABIC LETTER WAW WITH HAMZA ABOVE:'A1:1572:ؤ
ARABIC LETTER ALEF WITH HAMZA BELOW:'A2:1573:إ
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
ARABIC LETTER YEH WITH HAMZA ABOVE:'A5:1574:ئ
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
ARABIC LETTER ALEF:'A8:1575:ا
COPYRIGHT SIGN:'A9:169:©
ARABIC LETTER BEH:'AA:1576:ب
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
ARABIC LETTER PEH:'AF:1662:پ
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
ARABIC LETTER TEH MARBUTA:'B2:1577:ة
ARABIC LETTER TEH:'B3:1578:ت
ARABIC LETTER THEH:'B4:1579:ث
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
ARABIC LETTER JEEM:'B8:1580:ج
ARABIC LETTER TCHEH:'B9:1670:چ
ARABIC LETTER HAH:'BA:1581:ح
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
ARABIC LETTER KHAH:'BC:1582:خ
ARABIC LETTER DAL:'BD:1583:د
ARABIC LETTER THAL:'BE:1584:ذ
ARABIC LETTER REH:'BF:1585:ر
LATIN CAPITAL LETTER A WITH GRAVE:'C0:192:À
ARABIC LETTER ZAIN:'C1:1586:ز
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:'C2:194:Â
ARABIC LETTER JEH:'C3:1688:ژ
ARABIC LETTER SEEN:'C4:1587:س
ARABIC LETTER SHEEN:'C5:1588:ش
ARABIC LETTER SAD:'C6:1589:ص
LATIN CAPITAL LETTER C WITH CEDILLA:'C7:199:Ç
LATIN CAPITAL LETTER E WITH GRAVE:'C8:200:È
LATIN CAPITAL LETTER E WITH ACUTE:'C9:201:É
LATIN CAPITAL LETTER E WITH CIRCUMFLEX:'CA:202:Ê
LATIN CAPITAL LETTER E WITH DIAERESIS:'CB:203:Ë
ARABIC LETTER DAD:'CC:1590:ض
ARABIC LETTER TAH:'CD:1591:ط
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:'CE:206:Î
LATIN CAPITAL LETTER I WITH DIAERESIS:'CF:207:Ï
BOPOMOFO LETTER ZH:'D0:12563:ㄓ
ARABIC LETTER AIN:'D1:1593:ع
ARABIC LETTER GHAIN:'D2:1594:غ
ARABIC TATWEEL:'D3:1600:ـ
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:'D4:212:Ô
ARABIC LETTER FEH:'D5:1601:ف
ARABIC LETTER QAF:'D6:1602:ق
MULTIPLICATION SIGN:'D7:215:×
ARABIC LETTER KAF:'D8:1603:ك
LATIN CAPITAL LETTER U WITH GRAVE:'D9:217:Ù
ARABIC LETTER GAF:'DA:1711:گ
LATIN CAPITAL LETTER U WITH CIRCUMFLEX:'DB:219:Û
LATIN CAPITAL LETTER U WITH DIAERESIS:'DC:220:Ü
ARABIC LETTER LAM:'DD:1604:ل
ARABIC LETTER MEEM:'DE:1605:م
ARABIC LETTER NOON:'DF:1606:ن
LATIN SMALL LETTER A WITH GRAVE:'E0:224:à
ARABIC LETTER HEH:'E1:1607:ه
LATIN SMALL LETTER A WITH CIRCUMFLEX:'E2:226:â
ARABIC LETTER HAH WITH HAMZA ABOVE:'E3:1665:ځ
ARABIC LETTER WAW:'E4:1608:و
ARABIC LETTER ALEF MAKSURA:'E5:1609:ى
ARABIC LETTER YEH:'E6:1610:ي
LATIN SMALL LETTER C WITH CEDILLA:'E7:231:ç
LATIN SMALL LETTER E WITH GRAVE:'E8:232:è
LATIN SMALL LETTER E WITH ACUTE:'E9:233:é
LATIN SMALL LETTER E WITH CIRCUMFLEX:'EA:234:ê
LATIN SMALL LETTER E WITH DIAERESIS:'EB:235:ë
ARABIC FATHATAN:'EC:1611:ً
ARABIC DAMMATAN:'ED:1612:ٌ
LATIN SMALL LETTER I WITH CIRCUMFLEX:'EE:238:î
LATIN SMALL LETTER I WITH DIAERESIS:'EF:239:ï
ARABIC KASRATAN:'F0:1613:ٍ
ARABIC FATHA:'F1:1614:َ
ARABIC DAMMA:'F2:1615:ُ
ARABIC KASRA:'F3:1616:ِ
LATIN SMALL LETTER O WITH CIRCUMFLEX:'F4:244:ô
ARABIC SHADDA:'F5:1617:ّ
ARABIC SUKUN:'F6:1618:ْ
DIVISION SIGN:'F7:247:÷
LATIN SMALL LETTER U WITH GRAVE:'F9:249:ù
LATIN SMALL LETTER U WITH CIRCUMFLEX:'FB:251:û
LATIN SMALL LETTER U WITH DIAERESIS:'FC:252:ü
LEFT-TO-RIGHT MARK:'FD:8206:‎
RIGHT-TO-LEFT MARK:'FE:8207:‏
LATIN SMALL LETTER Y WITH DIAERESIS:'FF:255:ÿ
</ansicpg1256>
<ansicpg1257>
SINGLE LOW-9 QUOTATION MARK:'82:8218:‚
DOUBLE LOW-9 QUOTATION MARK:'84:8222:„
HORIZONTAL ELLIPSIS:'85:8230:…
DAGGER:'86:8224:†
DOUBLE DAGGER:'87:8225:‡
PER MILLE SIGN:'89:8240:‰
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'8B:8249:‹
LEFT SINGLE QUOTATION MARK:'91:8216:‘
RIGHT SINGLE QUOTATION MARK:'92:8217:’
LEFT DOUBLE QUOTATION MARK:'93:8220:“
RIGHT DOUBLE QUOTATION MARK:'94:8221:”
BULLET:'95:8226:•
EN DASH:'96:8211:–
EM DASH:'97:8212:—
TRADE MARK SIGN:'99:8482:™
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'9B:8250:›
NO-BREAK SPACE:'A0:160: 
CENT SIGN:'A2:162:¢
POUND SIGN:'A3:163:£
CURRENCY SIGN:'A4:164:¤
BROKEN BAR:'A6:166:¦
SECTION SIGN:'A7:167:§
LATIN CAPITAL LETTER O WITH STROKE:'A8:216:Ø
COPYRIGHT SIGN:'A9:169:©
LATIN CAPITAL LETTER R WITH CEDILLA:'AA:342:Ŗ
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'AB:171:«
NOT SIGN:'AC:172:¬
SOFT HYPHEN:'AD:173:­
REGISTERED SIGN:'AE:174:®
LATIN CAPITAL LETTER AE:'AF:198:Æ
DEGREE SIGN:'B0:176:°
PLUS-MINUS SIGN:'B1:177:±
SUPERSCRIPT TWO:'B2:178:²
SUPERSCRIPT THREE:'B3:179:³
MICRO SIGN:'B5:181:µ
PILCROW SIGN:'B6:182:¶
MIDDLE DOT:'B7:183:·
LATIN SMALL LETTER O WITH STROKE:'B8:248:ø
SUPERSCRIPT ONE:'B9:185:¹
LATIN SMALL LETTER R WITH CEDILLA:'BA:343:ŗ
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'BB:187:»
VULGAR FRACTION ONE QUARTER:'BC:188:¼
VULGAR FRACTION ONE HALF:'BD:189:½
VULGAR FRACTION THREE QUARTERS:'BE:190:¾
LATIN SMALL LETTER AE:'BF:230:æ
LATIN CAPITAL LETTER A WITH OGONEK:'C0:260:Ą
LATIN CAPITAL LETTER I WITH OGONEK:'C1:302:Į
LATIN CAPITAL LETTER A WITH MACRON:'C2:256:Ā
LATIN CAPITAL LETTER C WITH ACUTE:'C3:262:Ć
LATIN CAPITAL LETTER A WITH DIAERESIS:'C4:196:Ä
LATIN CAPITAL LETTER A WITH RING ABOVE:'C5:197:Å
LATIN CAPITAL LETTER E WITH OGONEK:'C6:280:Ę
LATIN CAPITAL LETTER E WITH MACRON:'C7:274:Ē
LATIN CAPITAL LETTER C WITH CARON:'C8:268:Č
LATIN CAPITAL LETTER E WITH ACUTE:'C9:201:É
LATIN CAPITAL LETTER Z WITH ACUTE:'CA:377:Ź
LATIN CAPITAL LETTER E WITH DOT ABOVE:'CB:278:Ė
LATIN CAPITAL LETTER G WITH CEDILLA:'CC:290:Ģ
LATIN CAPITAL LETTER K WITH CEDILLA:'CD:310:Ķ
LATIN CAPITAL LETTER I WITH MACRON:'CE:298:Ī
LATIN CAPITAL LETTER L WITH CEDILLA:'CF:315:Ļ
LATIN CAPITAL LETTER S WITH CARON:'D0:352:Š
LATIN CAPITAL LETTER N WITH ACUTE:'D1:323:Ń
LATIN CAPITAL LETTER N WITH CEDILLA:'D2:325:Ņ
LATIN CAPITAL LETTER O WITH ACUTE:'D3:211:Ó
LATIN CAPITAL LETTER O WITH MACRON:'D4:332:Ō
LATIN CAPITAL LETTER O WITH TILDE:'D5:213:Õ
LATIN CAPITAL LETTER O WITH DIAERESIS:'D6:214:Ö
MULTIPLICATION SIGN:'D7:215:×
LATIN CAPITAL LETTER U WITH OGONEK:'D8:370:Ų
LATIN CAPITAL LETTER L WITH STROKE:'D9:321:Ł
LATIN CAPITAL LETTER S WITH ACUTE:'DA:346:Ś
LATIN CAPITAL LETTER U WITH MACRON:'DB:362:Ū
LATIN CAPITAL LETTER U WITH DIAERESIS:'DC:220:Ü
LATIN CAPITAL LETTER Z WITH DOT ABOVE:'DD:379:Ż
LATIN CAPITAL LETTER Z WITH CARON:'DE:381:Ž
LATIN SMALL LETTER SHARP S (GERMAN):'DF:223:ß
LATIN SMALL LETTER A WITH OGONEK:'E0:261:ą
LATIN SMALL LETTER I WITH OGONEK:'E1:303:į
LATIN SMALL LETTER A WITH MACRON:'E2:257:ā
LATIN SMALL LETTER C WITH ACUTE:'E3:263:ć
LATIN SMALL LETTER A WITH DIAERESIS:'E4:228:ä
LATIN SMALL LETTER A WITH RING ABOVE:'E5:229:å
LATIN SMALL LETTER E WITH OGONEK:'E6:281:ę
LATIN SMALL LETTER E WITH MACRON:'E7:275:ē
LATIN SMALL LETTER C WITH CARON:'E8:269:č
LATIN SMALL LETTER E WITH ACUTE:'E9:233:é
LATIN SMALL LETTER Z WITH ACUTE:'EA:378:ź
LATIN SMALL LETTER E WITH DOT ABOVE:'EB:279:ė
LATIN SMALL LETTER G WITH CEDILLA:'EC:291:ģ
LATIN SMALL LETTER K WITH CEDILLA:'ED:311:ķ
LATIN SMALL LETTER I WITH MACRON:'EE:299:ī
LATIN SMALL LETTER L WITH CEDILLA:'EF:316:ļ
LATIN SMALL LETTER S WITH CARON:'F0:353:š
LATIN SMALL LETTER N WITH ACUTE:'F1:324:ń
LATIN SMALL LETTER N WITH CEDILLA:'F2:326:ņ
LATIN SMALL LETTER O WITH ACUTE:'F3:243:ó
LATIN SMALL LETTER O WITH MACRON:'F4:333:ō
LATIN SMALL LETTER O WITH TILDE:'F5:245:õ
LATIN SMALL LETTER O WITH DIAERESIS:'F6:246:ö
DIVISION SIGN:'F7:247:÷
LATIN SMALL LETTER U WITH OGONEK:'F8:371:ų
LATIN SMALL LETTER L WITH STROKE:'F9:322:ł
LATIN SMALL LETTER S WITH ACUTE:'FA:347:ś
LATIN SMALL LETTER U WITH MACRON:'FB:363:ū
LATIN SMALL LETTER U WITH DIAERESIS:'FC:252:ü
LATIN SMALL LETTER Z WITH DOT ABOVE:'FD:380:ż
LATIN SMALL LETTER Z WITH CARON:'FE:382:ž
</ansicpg1257>
#mac_roman
<ansicpg10000>
LATIN CAPITAL LETTER A WITH DIAERESIS:'80:196:Ä
LATIN CAPITAL LETTER A WITH RING ABOVE:'81:197:Å
LATIN CAPITAL LETTER C WITH CEDILLA:'82:199:Ç
LATIN CAPITAL LETTER E WITH ACUTE:'83:201:É
LATIN CAPITAL LETTER N WITH TILDE:'84:209:Ñ
LATIN CAPITAL LETTER O WITH DIAERESIS:'85:214:Ö
LATIN CAPITAL LETTER U WITH DIAERESIS:'86:220:Ü
LATIN SMALL LETTER A WITH ACUTE:'87:225:á
LATIN SMALL LETTER A WITH GRAVE:'88:224:à
LATIN SMALL LETTER A WITH CIRCUMFLEX:'89:226:â
LATIN SMALL LETTER A WITH DIAERESIS:'8A:228:ä
LATIN SMALL LETTER A WITH TILDE:'8B:227:ã
LATIN SMALL LETTER A WITH RING ABOVE:'8C:229:å
LATIN SMALL LETTER C WITH CEDILLA:'8D:231:ç
LATIN SMALL LETTER E WITH ACUTE:'8E:233:é
LATIN SMALL LETTER E WITH GRAVE:'8F:232:è
LATIN SMALL LETTER E WITH CIRCUMFLEX:'90:234:ê
LATIN SMALL LETTER E WITH DIAERESIS:'91:235:ë
LATIN SMALL LETTER I WITH ACUTE:'92:237:í
LATIN SMALL LETTER I WITH GRAVE:'93:236:ì
LATIN SMALL LETTER I WITH CIRCUMFLEX:'94:238:î
LATIN SMALL LETTER I WITH DIAERESIS:'95:239:ï
LATIN SMALL LETTER N WITH TILDE:'96:241:ñ
LATIN SMALL LETTER O WITH ACUTE:'97:243:ó
LATIN SMALL LETTER O WITH GRAVE:'98:242:ò
LATIN SMALL LETTER O WITH CIRCUMFLEX:'99:244:ô
LATIN SMALL LETTER O WITH DIAERESIS:'9A:246:ö
LATIN SMALL LETTER O WITH TILDE:'9B:245:õ
LATIN SMALL LETTER U WITH ACUTE:'9C:250:ú
LATIN SMALL LETTER U WITH GRAVE:'9D:249:ù
LATIN SMALL LETTER U WITH CIRCUMFLEX:'9E:251:û
LATIN SMALL LETTER U WITH DIAERESIS:'9F:252:ü
DAGGER:'A0:8224:†
DEGREE SIGN:'A1:176:°
CENT SIGN:'A2:162:¢
POUND SIGN:'A3:163:£
SECTION SIGN:'A4:167:§
BULLET:'A5:8226:•
PILCROW SIGN:'A6:182:¶
LATIN SMALL LETTER SHARP S:'A7:223:ß
REGISTERED SIGN:'A8:174:®
COPYRIGHT SIGN:'A9:169:©
TRADE MARK SIGN:'AA:8482:™
ACUTE ACCENT:'AB:180:´
DIAERESIS:'AC:168:¨
NOT EQUAL TO:'AD:8800:≠
LATIN CAPITAL LETTER AE:'AE:198:Æ
LATIN CAPITAL LETTER O WITH STROKE:'AF:216:Ø
INFINITY:'B0:8734:∞
PLUS-MINUS SIGN:'B1:177:±
LESS-THAN OR EQUAL TO:'B2:8804:≤
GREATER-THAN OR EQUAL TO:'B3:8805:≥
YEN SIGN:'B4:165:¥
MICRO SIGN:'B5:181:µ
PARTIAL DIFFERENTIAL:'B6:8706:∂
BULLET:'B7:8226:•
N-ARY PRODUCT:'B8:8719:∏
GREEK SMALL LETTER PI:'B9:960:π
INTEGRAL:'BA:8747:∫
FEMININE ORDINAL INDICATOR:'BB:170:ª
MASCULINE ORDINAL INDICATOR:'BC:186:º
GREEK CAPITAL LETTER OMEGA:'BD:937:Ω
LATIN SMALL LETTER AE:'BE:230:æ
LATIN SMALL LETTER O WITH STROKE:'BF:248:ø
INVERTED QUESTION MARK:'C0:191:¿
INVERTED EXCLAMATION MARK:'C1:161:¡
NOT SIGN:'C2:172:¬
SQUARE ROOT:'C3:8730:√
LATIN SMALL LETTER F WITH HOOK:'C4:402:ƒ
ALMOST EQUAL TO:'C5:8776:≈
INCREMENT:'C6:8710:∆
LEFT-POINTING DOUBLE ANGLE QUOTATION MARK:'C7:171:«
RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK:'C8:187:»
HORIZONTAL ELLIPSIS:'C9:8230:…
NO-BREAK SPACE:'CA:160: 
LATIN CAPITAL LETTER A WITH GRAVE:'CB:192:À
LATIN CAPITAL LETTER A WITH TILDE:'CC:195:Ã
LATIN CAPITAL LETTER O WITH TILDE:'CD:213:Õ
LATIN CAPITAL LIGATURE OE:'CE:338:Œ
LATIN SMALL LIGATURE OE:'CF:339:œ
EN DASH:'D0:8211:–
EM DASH:'D1:8212:—
LEFT DOUBLE QUOTATION MARK:'D2:8220:“
RIGHT DOUBLE QUOTATION MARK:'D3:8221:”
LEFT SINGLE QUOTATION MARK:'D4:8216:‘
RIGHT SINGLE QUOTATION MARK:'D5:8217:’
DIVISION SIGN:'D6:247:÷
LOZENGE:'D7:9674:◊
LATIN SMALL LETTER Y WITH DIAERESIS:'D8:255:ÿ
LATIN CAPITAL LETTER Y WITH DIAERESIS:'D9:376:Ÿ
FRACTION SLASH:'DA:8260:⁄
EURO SIGN:'DB:8364:€
SINGLE LEFT-POINTING ANGLE QUOTATION MARK:'DC:8249:‹
SINGLE RIGHT-POINTING ANGLE QUOTATION MARK:'DD:8250:›
LATIN SMALL LIGATURE FI:'DE:64257:fi
LATIN SMALL LIGATURE FL:'DF:64258:fl
DOUBLE DAGGER:'E0:8225:‡
MIDDLE DOT:'E1:183:·
SINGLE LOW-9 QUOTATION MARK:'E2:8218:‚
DOUBLE LOW-9 QUOTATION MARK:'E3:8222:„
PER MILLE SIGN:'E4:8240:‰
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:'E5:194:Â
LATIN CAPITAL LETTER E WITH CIRCUMFLEX:'E6:202:Ê
LATIN CAPITAL LETTER A WITH ACUTE:'E7:193:Á
LATIN CAPITAL LETTER E WITH DIAERESIS:'E8:203:Ë
LATIN CAPITAL LETTER E WITH GRAVE:'E9:200:È
LATIN CAPITAL LETTER I WITH ACUTE:'EA:205:Í
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:'EB:206:Î
LATIN CAPITAL LETTER I WITH DIAERESIS:'EC:207:Ï
LATIN CAPITAL LETTER I WITH GRAVE:'ED:204:Ì
LATIN CAPITAL LETTER O WITH ACUTE:'EE:211:Ó
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:'EF:212:Ô
APPLE LOGO:'F0:63743:
LATIN CAPITAL LETTER O WITH GRAVE:'F1:210:Ò
LATIN CAPITAL LETTER U WITH ACUTE:'F2:218:Ú
LATIN CAPITAL LETTER U WITH CIRCUMFLEX:'F3:219:Û
LATIN CAPITAL LETTER U WITH GRAVE:'F4:217:Ù
LATIN SMALL LETTER DOTLESS I:'F5:305:ı
MODIFIER LETTER CIRCUMFLEX ACCENT:'F6:710:ˆ
SMALL TILDE:'F7:732:˜
MACRON:'F8:175:¯
BREVE:'F9:728:˘
DOT ABOVE:'FA:729:˙
RING ABOVE:'FB:730:˚
CEDILLA:'FC:184:¸
DOUBLE ACUTE ACCENT:'FD:733:˝
OGONEK:'FE:731:˛
CARON:'FF:711:ˇ
</ansicpg10000>
<caps_hex>
LATIN SMALL LETTER A:'61:97:'41
LATIN SMALL LETTER B:'62:98:'42
LATIN SMALL LETTER C:'63:99:'43
LATIN SMALL LETTER D:'64:100:'44
LATIN SMALL LETTER E:'65:101:'45
LATIN SMALL LETTER F:'66:102:'46
LATIN SMALL LETTER G:'67:103:'47
LATIN SMALL LETTER H:'68:104:48
LATIN SMALL LETTER I:'69:105:'49
LATIN SMALL LETTER J:'6A:106:'4a
LATIN SMALL LETTER K:'6B:107:'4b
LATIN SMALL LETTER L:'6C:108:'4c
LATIN SMALL LETTER M:'6D:109:'4d
LATIN SMALL LETTER N:'6E:110:'4e
LATIN SMALL LETTER O:'6F:111:'4f
LATIN SMALL LETTER P:'70:112:'50
LATIN SMALL LETTER Q:'71:113:'51
LATIN SMALL LETTER R:'72:114:'52
LATIN SMALL LETTER S:'73:115:'53
LATIN SMALL LETTER T:'74:116:'54
LATIN SMALL LETTER U:'75:117:'55
LATIN SMALL LETTER V:'76:118:'56
LATIN SMALL LETTER W:'77:119:'57
LATIN SMALL LETTER X:'78:120:'58
LATIN SMALL LETTER Y:'79:121:'59
LATIN SMALL LETTER Z:'7A:122:'5a
NO UNICODE VALUE:'87:135:\'E7
NO UNICODE VALUE:'8E:142:\'83
NO UNICODE VALUE:'EA:234:\'92
NO UNICODE VALUE:'97:151:\'EE
NO UNICODE VALUE:'9C:156:\'F2
NO UNICODE VALUE:'88:136:\'CB
NO UNICODE VALUE:'8F:143:\'E9
NO UNICODE VALUE:'93:147:\'ED
NO UNICODE VALUE:'98:152:\'F1
NO UNICODE VALUE:'9D:157:\'F4
NO UNICODE VALUE:'89:137:\'D5
NO UNICODE VALUE:'90:144:\'E6
NO UNICODE VALUE:'94:148:\'EB
NO UNICODE VALUE:'99:153:\'EF
NO UNICODE VALUE:'9E:158:\'F3
NO UNICODE VALUE:'BF:191:\'AF
NO UNICODE VALUE:'96:150:\'84
NO UNICODE VALUE:'9B:155:\'CD
NO UNICODE VALUE:'8B:139:\'CC
NO UNICODE VALUE:'8A:138:\'80
NO UNICODE VALUE:'91:145:\'E8
NO UNICODE VALUE:'95:149:\'EC
NO UNICODE VALUE:'9A:154:\'85
NO UNICODE VALUE:'9F:159:\'86
NO UNICODE VALUE:'8D:141:\'82
NO UNICODE VALUE:'8C:140:\'81
</caps_hex>
<caps_letters>
LATIN SMALL LETTER A:a:97:A
LATIN SMALL LETTER B:b:98:B
LATIN SMALL LETTER C:c:99:C
LATIN SMALL LETTER D:d:100:D
LATIN SMALL LETTER E:e:101:E
LATIN SMALL LETTER F:f:102:F
LATIN SMALL LETTER G:g:103:G
LATIN SMALL LETTER H:h:104:H
LATIN SMALL LETTER I:i:105:I
LATIN SMALL LETTER J:j:106:J
LATIN SMALL LETTER K:k:107:K
LATIN SMALL LETTER L:l:108:L
LATIN SMALL LETTER M:m:109:M
LATIN SMALL LETTER N:n:110:N
LATIN SMALL LETTER O:o:111:O
LATIN SMALL LETTER P:p:112:P
LATIN SMALL LETTER Q:q:113:Q
LATIN SMALL LETTER R:r:114:R
LATIN SMALL LETTER S:s:115:S
LATIN SMALL LETTER T:t:116:T
LATIN SMALL LETTER U:u:117:U
LATIN SMALL LETTER V:v:118:V
LATIN SMALL LETTER W:w:119:W
LATIN SMALL LETTER X:x:120:X
LATIN SMALL LETTER Y:y:121:Y
LATIN SMALL LETTER Z:z:122:Z
</caps_letters>
<SYMBOL>
MY UNDEFINED SYMBOL:'C3:00:<udef_symbol num="195"/>
SPACE:'20:32:
EXCLAMATION MARK:'21:33:!
FOR ALL:'22:8704:∀
NUMBER SIGN:'23:35:#
THERE EXISTS:'24:8707:∃
PERCENTAGE SIGN:'25:37:%
AMPERSAND:'26:38:&
CONTAINS AS A MEMBER:'27:8715:∋
LEFT PARENTHESIS:'28:40:(
RIGHT PERENTHESIS:'29:41:)
ASTERISK OPERATOR:'2A:8727:∗
PLUS SIGN:'2B:43:+
COMMA:'2C:44:,
MINUS SIGN:'2D:8722:−
FULL STOP:'2E:46:.
DIVISION SLASH:'2F:8725:∕
DIGIT ZERO:'30:48:0
DIGIT ONE:'31:49:1
DIGIT TWO:'32:50:2
DIGIT THREE:'33:51:3
DIGIT FOUR:'34:52:4
DIGIT FIVE:'35:53:5
DIGIT SIX:'36:54:6
DIGIT SEVEN:'37:55:7
DIGIT EIGHT:'38:56:8
DIGIT NINE:'39:57:9
RATIO:'3A:8758:∶
SEMICOLON:'3B:59:;
LESS-THAN SIGN:'3C:60:<
EQUALS SIGN TO:'3D:61:=
GREATER-THAN SIGN:'3E:62:>
QUESTION MARK:'3F:63:?
APPROXTIMATELY EQUAL TO:'40:8773:≅
GREEK CAPITOL LETTER ALPHA:'41:913:Α
GREEK CAPAITOL LETTER BETA:'42:914:Β
GREEK CAPITOL LETTER CHI:'43:935:Χ
GREEK CAPITOL LETTER DELTA:'44:916:Δ
GREEK CAPITOL LETTER EPSILON:'45:917:Ε
GREEK CAPITOL LETTER PHI:'46:934:Φ
GREEK CAPITOL LETTER GAMMA:'47:915:Γ
GREEK CAPITOL LETTER ETA:'48:919:Η
GREEK CAPITOL LETTER ITOA:'49:913:Α
GREEK THETA SYMBOL:'4A:977:ϑ
GREEK CAPITOL LETTER KAPPA:'4B:922:Κ
GREEK CAPITOL LETTER LAMBDA:'4C:923:Λ
GREEK CAPITOL LETTER MU:'4D:924:Μ
GREEK CAPITOL LETTER NU:'4E:925:Ν
GREEK CAPITOL LETTER OMICRON:'4F:927:Ο
GREEK CAPITAL LETTER PI:'50:928:Π
GREEK CAPITOL LETTER THETA:'51:920:Θ
GREEK CAPITOL LETTER RHO:'52:929:Ρ
GREEK CAPITOL LETTER SIGMA:'53:931:Σ
GREEK CAPITOL LETTER TAU:'54:932:Τ
GREEK CAPITOL LETTER UPSILON:'55:933:Υ
GREEK LETTER STIGMA:'56:986:Ϛ
GREEK CAPITOL LETTER OMEGA:'57:937:Ω
GREEK CAPITOL LETTER XI:'58:926:Ξ
GREEK CAPITOL LETTER PSI:'59:936:Ψ
GREEK CAPITOL LETTER ZETA:'5A:918:Ζ
LEFT SQUARE BRACKET:'5B:91:[
THEREFORE:'5C:8756:∴
RIGHT SQUARE BRACKET:'5D:93:]
UP TACK:'5E:8869:⊥
MODIFIER LETTER LOW MACRON:'5F:717:ˍ
MODIFIER LETTER MACRON:'60:713:ˉ
GREEK SMALL LETTER ALPHA:'61:945:α
GREEK SMALL LETTER BETA:'62:946:β
GREEK SMALL LETTER CHI:'63:967:χ
GREEK SMALL LETTER DELTA:'64:948:δ
GREEK SMALL LETTER EPSILON:'65:949:ε
GREEK PHI SYMBOL:'66:981:ϕ
GREEK MSALL LETTER DELTA:'67:947:γ
GREEK SMALL LETTER ETA:'68:951:η
GREEK SMALL LETTER IOTA:'69:953:ι
GREEK SMALL LETTER PHI:'6A:966:φ
GREEK SMALL LETTER KAPPA:'6B:954:κ
GREEK SMALL LETTER LAMDA:'6C:955:λ
GREEK SMALL LETTER MU:'6D:956:μ
GREEK SMALL LETTER NU:'6E:957:ν
GREEK SMALL LETTER OMICRON:'6F:959:ο
GREEK SMALL LETTER PI:'70:960:π
GREEK SMALL LETTER THETA:'71:952:θ
GREEK SMALL LETTER RHO:'72:961:ρ
GREEK SMALL LETTER SIGMA:'73:963:σ
GREEK SMALL LETTER TAU:'74:964:τ
GREEK SMALL LETTER UPSILON:'75:965:υ
GREEK PI SYMBOL:'76:982:ϖ
GREEK SMALL LETTER OMEGA:'77:969:ω
GREEK SMALL LETTER XI:'78:958:ξ
GREEK SMALL LETTER PHI:'79:966:φ
GREEK SMALL LETTER ZETA:'7A:950:ζ
LEFT CURLY BRACKET:'7B:123:{
DIVIDES:'7C:8739:∣
RIGHT CURLY BRACKET:'7D:125:}
TILDE OPERATOR:'7E:8764:∼
GREEK UPSILON WITH HOOK SYMBOL:'A1:978:ϒ
COMBINING ACUTE TONE MARK:'A2:833:́
LESS THAN OR EQUAL TO:'A3:8804:≤
DIVISION SLASH:'A4:8725:∕
INFINITY:'A5:8734:∞
LATIN SMALL LETTER F WITH HOOK:'A6:402:ƒ
BLACK CLUB SUIT:'A7:9827:♣
BLACK DIAMOND SUIT:'A8:9830:♦
BLACK HEART SUIT:'A9:9829:♥
BLACK SPADE SUIT:'AA:9824:♠
LEFT RIGHT ARROW:'AB:8596:↔
LEFTWARDS ARROW:'AC:8592:←
UPWARDS ARROW:'AD:8593:↑
RIGHTWARDS ARROW:'AE:8594:→
DOWNWARDS ARROW:'AF:8595:↓
DEGREE SIGN:'B0:176:°
PLUS OR MINUS SIGN:'B1:177:±
DOUBLE ACUTE ACCENT:'B2:733:˝
GREATER THAN OR EQUAL TO:'B3:8805:≥
MULTIPLICATION SIGN:'B4:215:×
DON'T KNOW:'B5:8733:∝
PARTIAL DIFFERENTIAL:'B6:8706:∂
BULLET:'B7:183:·
DIVISION:'B8:247:÷
NOT EQUAL TO:'B9:8800:≠
IDENTICAL TO:'BA:8801:≡
ALMOST EQUAL TO:'BB:8776:≈
MIDLINE HORIZONTAL ELLIPSES:'BC:8943:⋯
DIVIDES:'BD:8739:∣
BOX DRAWINGS LIGHT HORIZONTAL:'BE:9472:─
DOWNWARDS ARROW WITH TIP LEFTWARDS:'BF:8626:↲
CIRCLED TIMES:'C4:8855:⊗
CIRCLED PLUS:'C5:8853:⊕
EMPTY SET:'C6:8709:∅
INTERSECTION:'C7:8745:∩
UNION:'C8:8746:∪
SUPERSET OF:'C9:8835:⊃
SUPERSET OF OR EQUAL TO:'CA:8839:⊇
NIETHER A SUBSET OR EQUAL TO:'CB:8836:⊄
SUBSET OF:'CC:8834:⊂
SUBSET OR EQUAL TO:'CD:8838:⊆
ELEMENT OF:'CE:8712:∈
NOT AN ELEMENT OF:'CF:8713:∉
ANGLE:'D0:8736:∠
WHITE DOWN POINTING TRIANBLE:'D1:9661:▽
REGISTERED SIGN:'D2:174:®
COPYRIGHT:'D3:169:©
TRADEMARK SYMBOL:'D4:8482:™
NARY OPERATOR:'D5:8719:∏
SQUARE ROOT:'D6:8730:√
BULLET OPERATOR:'D7:8729:∙
NOT SIGN:'D8:172:¬
LOGICAL AND:'D9:8743:∧
LOGICAL OR:'DA:8744:∨
LEFT RIGHT DOUBLE ARROW:'DB:8660:⇔
LEFTWARDS DOUBLE ARROW:'DC:8656:⇐
UPWARDS DOUBLE ARROW:'DD:8657:⇑
RIGHTWARDS DOUBLE ARROW:'DE:8658:⇒
DOWNWARDS DOUBLE ARROW:'DF:8659:⇓
BETWEEN:'E0:8812:≬
MATHEMATICAL LEFT ANGELBRACKET:'E1:10216:⟨
REGISTERED SIGN:'E2:174:®
COPYRIGHT:'E3:169:©
TRADEMARK SYMBOL:'E4:8482:™
N-ARY SUMMATION:'E5:8721:∑
LARGE LEFT PARENTHESIS PART1:'E6:0:<udef_symbol num="0xE6" description="left_paraenthesis part 1"/>
LARGE LEFT PARENTHESIS PART2:'E7:0:<udef_symbol num="0xE7" description="left_parenthesis part 2"/>
LARGE LEFT PARENTHESIS PART3:'E8:0:<udef_symbol num="0xE8" description="left_paranethesis part 3"/>
LARGE LEFT SQUARE BRACKET PART1:'E9:0:<udef_symbol num="0xE9" description="left_square_bracket part 1"/>
LARGE LEFT SQUARE BRACKET PART2:'EA:0:<udef_symbol num="0xEA" description="left_square_bracket part 2"/>
LARGE LEFT SQUARE BRACKET PART3:'EB:0:<udef_symbol num="0xEF" description="left_square_bracket part 3"/>
LARGE LEFT BRACKET PART1:'EC:0:<udef_symbol num="0xEC" description="right_bracket part 1"/>
LARGE LEFT BRACKET PART2:'ED:0:<udef_symbol num="0xED" description="right_bracke tpart 2"/>
LARGE LEFT BRACKET PART3:'EE:0:<udef_symbol num="0xEE" description="right_bracket part 3"/>
DIVIDES:'EF:8739:∣
MATHEMATICAL RIGHT ANGLE BRACKET:'F1:10217:⟩
INTEGRAL:'F2:8747:∫
LARGE INTEGRAL PART 1:'F3:0:<udef_symbol num="0xF3" description="integral part 1"/>
LARGE INTEGRAL PART 2:'F4:0:<udef_symbol num="0xF4" description="integral part 2"/>
LARGE INTEGRAL PART 3:'F5:0:<udef_symbol num="0xF5" description="integral part 3"/>
LARGE RIGHT PARENTHESIS PART1:'F6:0:<udef_symbol num="0xF6" description="right_parenthesis part 1"/>
LARGE RIGHT PARENTHESIS PART2:'F7:0:<udef_symbol num="0xF7" description="right_parenthesis part 2"/>
LARGE RIGHT PARENTHESIS PART3:'F8:0:<udef_symbol num="0xF8" description="right_parenthesis part 3"/>
LARGE RIGHT SQUARE BRACKET PART1:'F9:0:<udef_symbol num="0xF9" description="right_square_bracket part 1"/>
LARGE RIGHT SQUARE BRACKET PART2:'FA:0:<udef_symbol num="0xFA" description="right_square_bracket part 2"/>
LARGE RIGHT SQUARE BRACKETPART3:'FB:0:<udef_symbol num="0xFB" description="right_square_bracket part 3"/>
LARGE RIGHT BRACKET PART1:'FC:0:<udef_symbol num="0xFC" description="right_bracket part 1"/>
LARGE RIGHT BRACKETPART2:'FD:0:<udef_symbol num="0xFD" description="right_bracket part 2"/>
LARGE RIGHT BRACKETPART3:'FE:0:<udef_symbol num="0xFE" description="right_bracket part 3"/>
DOUBLE ACUTE ACCENT:'B2:733:˝
MY UNDEFINED SYMBOL:'7F:127:<udef_symbol num="0x7f"/>
MY UNDEFINED SYMBOL:'80:128:<udef_symbol num="0x80"/>
MY UNDEFINED SYMBOL:'81:129:<udef_symbol num="0x81"/>
MY UNDEFINED SYMBOL:'82:130:<udef_symbol num="130"/>
MY UNDEFINED SYMBOL:'83:131:<udef_symbol num="131"/>
MY UNDEFINED SYMBOL:'84:132:<udef_symbol num="132"/>
MY UNDEFINED SYMBOL:'85:133:<udef_symbol num="133"/>
MY UNDEFINED SYMBOL:'86:134:<udef_symbol num="134"/>
MY UNDEFINED SYMBOL:'87:135:<udef_symbol num="135"/>
MY UNDEFINED SYMBOL:'88:136:<udef_symbol num="136"/>
MY UNDEFINED SYMBOL:'89:137:<udef_symbol num="137"/>
MY UNDEFINED SYMBOL:'8A:138:<udef_symbol num="138"/>
MY UNDEFINED SYMBOL:'8B:139:<udef_symbol num="139"/>
MY UNDEFINED SYMBOL:'8C:140:<udef_symbol num="140"/>
MY UNDEFINED SYMBOL:'8D:141:<udef_symbol num="141"/>
MY UNDEFINED SYMBOL:'8E:142:<udef_symbol num="142"/>
MY UNDEFINED SYMBOL:'8F:143:<udef_symbol num="143"/>
MY UNDEFINED SYMBOL:'90:144:<udef_symbol num="144"/>
MY UNDEFINED SYMBOL:'91:145:<udef_symbol num="145"/>
MY UNDEFINED SYMBOL:'92:146:<udef_symbol num="146"/>
MY UNDEFINED SYMBOL:'93:147:<udef_symbol num="147"/>
MY UNDEFINED SYMBOL:'94:148:<udef_symbol num="148"/>
MY UNDEFINED SYMBOL:'95:149:<udef_symbol num="149"/>
MY UNDEFINED SYMBOL:'96:150:<udef_symbol num="150"/>
MY UNDEFINED SYMBOL:'97:151:<udef_symbol num="151"/>
MY UNDEFINED SYMBOL:'98:152:<udef_symbol num="152"/>
MY UNDEFINED SYMBOL:'99:153:<udef_symbol num="153"/>
MY UNDEFINED SYMBOL:'9A:154:<udef_symbol num="154"/>
MY UNDEFINED SYMBOL:'9B:155:<udef_symbol num="155"/>
MY UNDEFINED SYMBOL:'9C:156:<udef_symbol num="156"/>
MY UNDEFINED SYMBOL:'9D:157:<udef_symbol num="157"/>
MY UNDEFINED SYMBOL:'9E:158:<udef_symbol num="158"/>
MY UNDEFINED SYMBOL:'9F:159:<udef_symbol num="159"/>
MY UNDEFINED SYMBOL:'A0:160:<udef_symbol num="160"/>
MY UNDEFINED SYMBOL:'F0:160:<udef_symbol num="240"/>
</SYMBOL>
<ascii_to_hex>
SPACE: :32:\'20
EXCLAMATION MARK:!:33:\'21
QUOTATION MARK:":34:\'22
NUMBER SIGN:#:35:\'23
DOLLAR SIGN:$:36:\'24
PERCENT SIGN:%:37:\'25
AMPERSAND:&:38:\'26
APOSTROPHE:':39:\'27
LEFT PARENTHESIS:(:40:\'28
RIGHT PARENTHESIS:):41:\'29
ASTERISK:*:42:\'2A
PLUS SIGN:+:43:\'2B
COMMA:,:44:\'2C
HYPHEN-MINUS:-:45:\'2D
FULL STOP:.:46:\'2E
SOLIDUS:/:47:\'2F
DIGIT ZERO:0:48:\'30
DIGIT ONE:1:49:\'31
DIGIT TWO:2:50:\'32
DIGIT THREE:3:51:\'33
DIGIT FOUR:4:52:\'34
DIGIT FIVE:5:53:\'35
DIGIT SIX:6:54:\'36
DIGIT SEVEN:7:55:\'37
DIGIT EIGHT:8:56:\'38
DIGIT NINE:9:57:\'39
COLON:\\colon:58:\'3A
SEMICOLON:;:59:\'3B
EQUALS SIGN:=:61:\'3D
QUESTION MARK:?:63:\'3F
LATIN CAPITAL LETTER A:A:65:\'41
LATIN CAPITAL LETTER B:B:66:\'42
LATIN CAPITAL LETTER C:C:67:\'43
LATIN CAPITAL LETTER D:D:68:\'44
LATIN CAPITAL LETTER E:E:69:\'45
LATIN CAPITAL LETTER F:F:70:\'46
LATIN CAPITAL LETTER G:G:71:\'47
LATIN CAPITAL LETTER H:H:72:\'48
LATIN CAPITAL LETTER I:I:73:\'49
LATIN CAPITAL LETTER J:J:74:\'4A
LATIN CAPITAL LETTER K:K:75:\'4B
LATIN CAPITAL LETTER L:L:76:\'4C
LATIN CAPITAL LETTER M:M:77:\'4D
LATIN CAPITAL LETTER N:N:78:\'4E
LATIN CAPITAL LETTER O:O:79:\'4F
LATIN CAPITAL LETTER P:P:80:\'50
LATIN CAPITAL LETTER Q:Q:81:\'51
LATIN CAPITAL LETTER R:R:82:\'52
LATIN CAPITAL LETTER S:S:83:\'53
LATIN CAPITAL LETTER T:T:84:\'54
LATIN CAPITAL LETTER U:U:85:\'55
LATIN CAPITAL LETTER V:V:86:\'56
LATIN CAPITAL LETTER W:W:87:\'57
LATIN CAPITAL LETTER X:X:88:\'58
LATIN CAPITAL LETTER Y:Y:89:\'59
LATIN CAPITAL LETTER Z:Z:90:\'5A
LEFT SQUARE BRACKET:[:91:\'5B
REVERSE SOLIDUS:\\:92:\'5C
RIGHT SQUARE BRACKET:]:93:\'5D
LATIN SMALL LETTER A:a:97:\'61
LATIN SMALL LETTER B:b:98:\'62
LATIN SMALL LETTER C:c:99:\'63
LATIN SMALL LETTER D:d:100:\'64
LATIN SMALL LETTER E:e:101:\'65
LATIN SMALL LETTER F:f:102:\'66
LATIN SMALL LETTER G:g:103:\'67
LATIN SMALL LETTER H:h:104:\'68
LATIN SMALL LETTER I:i:105:\'69
LATIN SMALL LETTER J:j:106:\'6A
LATIN SMALL LETTER K:k:107:\'6B
LATIN SMALL LETTER L:l:108:\'6C
LATIN SMALL LETTER M:m:109:\'6D
LATIN SMALL LETTER N:n:110:\'6E
LATIN SMALL LETTER O:o:111:\'6F
LATIN SMALL LETTER P:p:112:\'70
LATIN SMALL LETTER Q:q:113:\'71
LATIN SMALL LETTER R:r:114:\'72
LATIN SMALL LETTER S:s:115:\'73
LATIN SMALL LETTER T:t:116:\'74
LATIN SMALL LETTER U:u:117:\'75
LATIN SMALL LETTER V:v:118:\'76
LATIN SMALL LETTER W:w:119:\'77
LATIN SMALL LETTER X:x:120:\'78
LATIN SMALL LETTER Y:y:121:\'79
LATIN SMALL LETTER Z:z:122:\'7A
LEFT CURLY BRACKET:{:123:\'7B
VERTICAL LINE:|:124:\'7C
RIGHT CURLY BRACKET:}:125:\'7D
TILDE:~:126:\'7E
</ascii_to_hex>
<wingdings>
SPACE:'20:32: 
LOWER RIGHT PENCIL:'21:9998:✎
BLACK SCISSORS:'22:9986:✂
UPPER BLADE SCISSORS:'23:9985:✁
PROPOSE "LOWER LEFT SPECTACLES":'24:none:<udef_symbol num="0x24" description="lower_left_spectacles"/>
PROPOSE "BELL":'25:none:<udef_symbol num="0x25" description="bell"/>
PROPOSE "OPEN BOOK":'26:none:<udef_symbol num="0x26" description="open_book"/>
PROPOSE "LIGHTED CANDLE":'27:none:<udef_symbol num="0x27" description="lighted_candle"/>
BLACK TELEPHONE:'28:9742:☎
TELEPHONE LOCATION SIGN:'29:9990:✆
ENVELOPE:'2A:9993:✉
ENVELOPE:'2B:9993:✉
PROPOSE "MAIL FLAG DOWN":'2C:none:<udef_symbol num="0x2C" description="mail_flag_down"/>
PROPOSE "MAIL FLAG UP":'2D:none:<udef_symbol num="0x2D" description="mail_flag_up"/>
PROPOSE "MAIL FULL":'2E:none:<udef_symbol num="0x2E" description="mail_full"/>
PROPOSE "MAIL EMPTY":'2F:none:<udef_symbol num="0x2F" description="mail_empty"/>
PROPOSE "FOLDER CLOSE":'30:none:<udef_symbol num="0x30" description="folder_close"/>
PROPOSE "FOLDER OPEN":'31:none:<udef_symbol num="0x31" description="folder_open"/>
PROPOSE "DOCUMENT FOLDED":'32:none:<udef_symbol num="0x32" description="document_folded"/>
PROPOSE "DOCUMENT":'33:none:<udef_symbol num="0x33" description="document"/>
PROPOSE "MULTIPLE DOCUMENTS":'34:none:<udef_symbol num="0x34" description="multiple_documents"/>
PROPOSE "FILE CABINET":'35:none:<udef_symbol num="0x35" description="file_cabinet"/>
HOURGLASS:'36:8987:⌛
KEYBOARD:'37:9000:⌨
PROPOSE "MOUSE":'38:none:<udef_symbol num="0x38" description="mouse"/>
PROPOSE "QUICKCAM CAMERA":'39:none:<udef_symbol num="0x39" description="quickcam_camera"/>
PROPOSE "COMPUTER":'3A:none:<udef_symbol num="0x3A" description="computer"/>
PROPOSE "HARD DRIVE":'3B:none:<udef_symbol num="3B" description="hard_drive"/>
PROPOSE "THREE AND A HALF FLOPPY":'3C:none:<udef_symbol num="0x3c" description = "three_and_a_half_floppy"/>
PROPOSE "FIVE AND A QUARTER FLOPPY":'3D:none:<udef_symbol num="0x3D" description="five_and_a_quarter_floppy"/>
TAPE DRIVE:'3E:9991:✇
WRITING HAND:'3F:9997:✍
WRITING HAND:'40:9997:✍
VICTORY HAND:'41:9996:✌
PROPOSE "PICKING HAND(OR OMMAT)":'42:none:<udef_symbol num="0x42" description="picking_hand_or_ommat"/>
PROPOSE "WHITE UP POINTING THUMB":'43:none:<udef_symbol num="0x43" description="white_up_pointing_thumb"/>
PROPOSE "WHITE DOWN POINTING THUMB":'44:none:<udef_symbol num="0x44" description="white_down_pointing_thumb"/>
WHITE LEFT POINTING INDEX:'45:9756:☜
WHITE RIGHT POINTING INDEX:'46:9758:☞
WHITE UP POINTING INDEX:'47:9757:☝
WHITE DOWN POINTING INDEX:'48:9759:☟
PROPOSE "WHITE PALM":'49:none:<udef_symbol num="0x49" description="white_palm"/>
WHITE SMILING FACE:'4A:9786:☺
WHITE SMILING FACE":'4B:9786:☺
WHITE FROWNING FACE:'4C:9785:☹
PROPOSE "BLACK BOMB WITH FUSE":'4D:none:<udef_symbol num="0x4D" description="black_bomb_with_fuse"/>
SKULL AND CROSSBONES:'4E:9760:☠
PROPOSE "WHITE BILLOWING SQUARE FLAG":'4F:none:<udef_symbol num="0x4F" description="white_billowing_square_flag"/>
PROPOSE "WHITE BILLOWING TRIANGLE FLAG":'50:none:<udef_symbol num="0x50" description="white_billowing_triangle_flag"/>
AIRPLANE:'51:9992:✈
WHITE SUN WITH RAYS:'52:9788:☼
PROPOSE "INK BLOT":'53:none:<udef_symbol num="0x53" description="ink_blot"/>
SNOWFLAKE:'54:10052:❄
SHADOWED WHITE LATIN CROSS:'55:10014:✞
SHADOWED WHITE LATIN CROSS:'56:10014:✞
LATIN CROSS:'57:10013:✝
MALTESE CROSS:'58:10016:✠
STAR OF DAVID:'59:10017:✡
STAR AND CRESCENT:'5A:9770:☪
YIN YANG:'5B:9775:☯
DEVANGARI OM CORRECT:'5C:2384:ॐ
WHEEL OF DHARMA:'5D:9784:☸
ARIES:'5E:9800:♈
TAURUS:'5F:9801:♉
GEMINI:'60:9802:♊
CANCER:'61:9803:♋
LEO:'62:9804:♌
VIRGO:'63:9805:♍
LIBRA:'64:9806:♎
SCORPIUS:'65:9807:♏
SAGITTARIUS:'66:9808:♐
CAPRICORN:'67:9809:♑
AQUARIUS:'68:9810:♒
PISCES:'69:9811:♓
AMPERSAND:'6A:38:&
AMPERSAND:'6B:38:&
BLACK CIRCLE:'6C:9679:●
SHADOWED WHITE CIRCLE:'6D:10061:❍
BLACK SQUARE:'6E:9632:■
WHITE SQUARE:'6F:9633:□
WHITE SQUARE:'70:9633:□
LOWER RIGHT SHADOWED WHITE SQUARE:'71:10065:❑
UPPER RIGHT SHADOWED WHITE SQUARE:'72:10066:❒
LOZENGE:'73:9674:◊
LOZENGE:'74:9674:◊
BLACK DIAMOND:'75:9670:◆
BLACK DIAMOND MINUS WHITE X:'76:10070:❖
BLACK DIAMOND:'77:9670:◆
X IN A RECTANGLE BOX:'78:8999:⌧
APL FUNCTIONAL SYMBOL QUAD UP CARET:'79:9043:⍓
PLACE OF INTEREST SIGN:'7A:8984:⌘
WHITE FLORETTE:'7B:10048:❀
BLACK FLORETTE:'7C:10047:✿
HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT:'7D:10077:❝
HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT:'7E:10078:❞
"UNUSED":'7F:none:udef_symbol/>
CIRCLED DIGIT ZERO:'80:9450:⓪
CIRCLED DIGIT ONE:'81:9312:①
CIRCLED DIGIT TWO:'82:9313:②
CIRCLED DIGIT THREE:'83:9314:③
CIRCLED DIGIT FOUR:'84:9315:④
CIRCLED DIGIT FIVE:'85:9316:⑤
CIRCLED DIGIT SIX:'86:9317:⑥
CIRCLED DIGIT SEVEN:'87:9318:⑦
CIRCLED DIGIT EIGHT:'88:9319:⑧
CIRCLED DIGIT NINE:'89:9320:⑨
CIRCLED NUMBER TEN:'8A:9321:⑩
PROPOSE "DINGBAT NEGATIVE CIRCLED DIGIT ZERO":'8B:none:<udef_symbol num="0x8B" description="dingbat_negative_circled_digit_zero"/>
DINGBAT NEGATIVE CIRCLED DIGIT ONE:'8C:10102:❶
DINGBAT NEGATIVE CIRCLED DIGIT TWO:'8D:10103:❷
DINGBAT NEGATIVE CIRCLED DIGIT THREE:'8E:10104:❸
DINGBAT NEGATIVE CIRCLED DIGIT FOUR:'8F:10105:❹
DINGBAT NEGATIVE CIRCLED DIGIT FIVE:'90:10106:❺
DINGBAT NEGATIVE CIRCLED DIGIT SIX:'91:10107:❻
DINGBAT NEGATIVE CIRCLED DIGIT SEVEN:'92:10108:❼
DINGBAT NEGATIVE CIRCLED DIGIT EIGHT:'93:10109:❽
DINGBAT NEGATIVE CIRCLED DIGIT NINE:'94:10110:❾
DINGBAT NEGATIVE CIRCLED NUMBER TEN:'95:10111:❿
ROTATED FLORAL HEART BULLET:'96:10087:❧
REVERSED ROTATED FLORAL HEART BULLET:'97:9753:☙
REVERSED ROTATED FLORAL HEART BULLET:'98:9753:☙
ROTATED FLORAL HEART BULLET:'99:10087:❧
ROTATED FLORAL HEART BULLET:'9A:10087:❧
REVERSED ROTATED FLORAL HEART BULLET:'9B:9753:☙
REVERSED ROTATED FLORAL HEART BULLET:'9C:9753:☙
ROTATED FLORAL HEART BULLET:'9D:10087:❧
BULLET:'9E:8226:•
BLACK CIRCLE:'9F:9679:●
DON'T KNOW:'A0:160: 
WHITE CIRCLE:'A1:9675:○
WHITE CIRCLE:'A2:9675:○
WHITE CIRCLE:'A3:9675:○
SUN:'A4:9737:☉
SUN:'A5:9737:☉
SHADOWED WHITE CIRCLE:'A6:10061:❍
BLACK SMALL SQUARE:'A7:9642:▪
WHITE SQUARE:'A8:9633:□
PROPOSE "THEE MIGHT BE IN THERE SOMEWHERE":'A9:none:<udef_symbol num="0xA8" description="thee_might_be_in_there_somewhere"/>
BLACK FOUR POINTED STAR MAYBE:'AA:10022:✦
BLACK STAR:'AB:9733:★
SIX POINTED BLACK STAR:'AC:10038:✶
EIGHT POINTED RECTILINEAR BLACK STAR:'AD:10039:✷
TWELVE POINTED BLACK STAR:'AE:10040:✸
EIGHT POINTED PINWHEEL STAR:'AF:10037:✵
PROPOSE "CROSSHAIR SQUARE":'B0:none:<udef_symbol num="0xB0" description="crosshair_square"/>
PROPOSE "CROSSHAIR CIRCLE":'B1:none:<udef_symbol num="0xB1" description="crosshair_circle"/>
WHITE FOUR POINTED STAR:'B2:10023:✧
PROPOSE "THIS HAS TO BE A KNOWN SYMBOL":'B3:none:<udef_symbol num="0xB3" description="this_has_to_be_a_known_symbol"/>
REPLACEMENT CHARACTER:'B4:65533:�
CIRCLED WHITE STAR:'B5:10026:✪
SHADOWED WHITE STAR:'B6:10032:✰
PROPOSE "1 OCLOCK":'B7:none:<udef_symbol num="0xB7" description="one_oclock"/>
PROPOSE "2 OCLOCK":'B8:none:<udef_symbol num="0xB8" description="two_oclock"/>
PROPOSE "3 OCLOCK":'B9:none:<udef_symbol num="0xB9" description="three_oclock"/>
PROPOSE "4 OCLOCK":'BA:none:<udef_symbol num="0xBA" description="four_oclock"/>
PROPOSE "5 OCLOCK":'BB:none:<udef_symbol num="0xBB" description="five_oclock"/>
PROPOSE "6 OCLOCK":'BC:none:<udef_symbol num="0xBC" description="six_oclock"/>
PROPOSE "7 OCLOCK":'BD:none:<udef_symbol num="0xBD" description="seven_oclock"/>
PROPOSE "8 OCLOCK":'BE:none:<udef_symbol num="0xBE" description="eight_oclock"/>
PROPOSE "9 OCLOCK":'BF:none:<udef_symbol num="0xBF" description="nine_oclock"/>
PROPOSE "10 OCLOCK":'C0:none:<udef_symbol num="0xC0" description="ten_oclock"/>
PROPOSE "11 OCLOCK":'C1:none:<udef_symbol num="0xC1" description="eleven_oclock"/>
PROPOSE "12 OCLOCK":'C2:none:<udef_symbol num="0xC2" description="twelve_oclock"/>
PROPOSE "NOTCHED DOWNWARDS DOUBLE ARROW WITH TIP LEFTWARDS":'C3:none:<udef_symbol num="0xC3" description="notched_downwards_double_arrow_with_tip_leftwards"/>
PROPOSE "NOTCHED DOWNWARDS DOUBLE ARROW WITH TIP RIGHTWARDS":'C4:none:<udef_symbol num="0xC4" description="notched_downwards_double_arrow_with_tip_rightwards"/>
PROPOSE "NOTCHED UPWARDS DOUBLE ARROW WITH TIP LEFTWARDS":'C5:none:<udef_symbol num="0xC5" description="notched_upwards_double_arrow_with_tip_leftwards"/>
PROPOSE "NOTCHED UPWARDS DOUBLE ARROW WITH TIP RIGHTWARDS":'C6:none:<udef_symbol num="0xC6" description="notched_upwards_double_arrow_with_tip_rightwards"/>
PROPOSE "NOTCHED LEFTWARDS DOUBLE ARROW WITH TIP UPWARDS":'C7:none:<udef_symbol num="0xC7" description="notched_leftwards_double_arrow_with_tip_upwards"/>
PROPOSE "NOTCHED RIGHTWARDS DOUBLE ARROW WITH TIP UPWARDS":'C8:none:<udef_symbol num="0xC8" description="notched_rightwards_double_arrow_with_tip_upwards"/>
PROPOSE "NOTCHED LEFTWARDS DOUBLE ARROW WITH TIP DOWNWARDS":'C9:none:<udef_symbol num="0xC0" description="notched_leftwards_double_arrow_with_tip_downwards"/>
PROPOSE "NOTCHED RIGHTWARDS DOUBLE ARROW WITH TIP DOWNWARDS":'CA:none:<udef_symbol num="0xCA" description="notched_rightwards_double_arrow_with_tip_downwards"/>
PROPOSE "NO IDEA":'CB:none:<udef_symbol num="0xCB" description="no_idea"/>
PROPOSE "REVERSE OF ABOVE":'CC:none:<udef_symbol num="0xCC" description="reverse_of_above"/>
PROPOSE "HEDERA LOWER LEFT":'CD:none:<udef_symbol num="0xCD" description="hedera_lower_left"/>
PROPOSE "HEDERA UPPER LEFT REVERSED":'CE:none:<udef_symbol num="0xCE" description="hedera_upper_left_reversed"/>
PROPOSE "HEDERA LOWER RIGHT REVERSED":'CF:none:<udef_symbol num="0xCF" description="hedera_lower_right_reversed"/>
PROPOSE "HEDERA UPPER RIGHT":'D0:none:<udef_symbol num="0xD0" description="hedera_upper_right"/>
PROPOSE "HEDERA UPPER LEFT":'D1:none:<udef_symbol num="0xD1" description="hedera_upper_left"/>
PROPOSE "HEDERA LOWER LEFT REVERSED":'D2:none:<udef_symbol num="0xD2" description="hedera_lower_left_reversed"/>
PROPOSE "HEDERA UPPER RIGHT REVERSED":'D3:none:<udef_symbol num="0xD3" description="hedera_upper_right_reversed"/>
PROPOSE "HEDERA LOWER RIGHT":'D4:none:<udef_symbol num="0xD4" description="hedera_lower_right"/>
ERASE TO THE LEFT:'D5:9003:⌫
ERASE TO THE RIGHT:'D6:8998:⌦
PROPOSE "THREE-D TOP-LIGHTED LEFTWARDS ARROWHEAD":'D7:none:<udef_symbol num="0xD7" description="three-d_top-lighted_leftwards_arrowhead"/>
THREE-D TOP-LIGHTED RIGHTWARDS ARROWHEAD:'D8:10146:➢
PROPOSE "THREE-D RIGHT-LIGHTED UPWARDS ARROWHEAD":'D9:none:<udef_symbol num="0xD0" description="three-d_right-lighted_upwards_arrowhead"/>
PROPOSE "THREE-D LEFT-LIGHTED DOWNWARDS ARROWHEAD":'DA:none:<udef_symbol num="0xDA" description="three-d_left-lighted_downwards_arrowhead"/>
PROPOSE "CIRCLED HEAVY WHITE LEFTWARDS ARROW":'DB:none:<udef_symbol num="0xDB" description="circled_heavy_white_leftwards_arrow"/>
CIRCLED HEAVY WHITE RIGHTWARDS ARROW:'DC:10162:➲
PROPOSE "CIRCLED HEAVY WHITE UPWARDS ARROW":'DD:none:<udef_symbol num="0xDD" description="circled_heavy_white_upwards_arrow"/>
PROPOSE "CIRCLED HEAVY WHITE DOWNWARDS ARROW":'DE:none:<udef_symbol num="0xDE" description="circled_heavy_white_downwards_arrow"/>
PROPOSE "WIDE-HEADED LEFTWARDS ARROW":'DF:none:<udef_symbol num="0xDF" description="wide-headed_leftwards_arrow"/>
PROPOSE "WIDE-HEADED RIGHTWARDS ARROW":'E0:none:<udef_symbol num="0xE0" description="wide-headed_rightwards_arrow"/>
PROPOSE "WIDE-HEADED UPWARDS ARROW":'E1:none:<udef_symbol num="0xE1" description="wide-headed_upwards_arrow"/>
PROPOSE "WIDE-HEADED DOWNWARDS ARROW":'E2:none:<udef_symbol num="0xE2" description="wide-headed_downwards_arrow"/>
PROPOSE "WIDE-HEADED NORTHWEST-WARDS ARROW":'E3:none:<udef_symbol num="0xE3" description="wide-headed_northwest-wards_arrow"/>
PROPOSE "WIDE-HEADED NORTHEAST-WARDS ARROW":'E4:none:<udef_symbol num="0xE4" description="wide-headed_northeast-wards_arrow"/>
PROPOSE "WIDE-HEADED SOUTHWEST-WARDS ARROW":'E5:none:<udef_symbol num="0xE5" description="wide-headed_southwest-wards_arrow"/>
PROPOSE "WIDE-HEADED SOUTHEAST-WARDS ARROW":'E6:none:<udef_symbol num="0xE6" description="wide-headed_southeast-wards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED LEFTWARDS ARROW":'E7:none:<udef_symbol num="0xE7" description="heavy_wide-headed_leftwards_arrow"/>
HEAVY WIDE-HEADED RIGHTWARDS ARROW:'E8:10132:➔
PROPOSE "HEAVY WIDE-HEADED UPWARDS ARROW":'E9:none:<udef_symbol num="0xE9" description="heavy_wide-headed_upwards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED DOWNWARDS ARROW":'EA:none:<udef_symbol num="0xEA" description="heavy_wide-headed_downwards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED NORTHWEST-WARDS ARROW":'EB:none:<udef_symbol num="0xEB" description="heavy_wide-headed_northwest-wards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED NORTHEAST-WARDS ARROW":'EC:none:<udef_symbol num="0xEC" description="heavy_wide-headed_northeast-wards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED SOUTHWEST-WARDS ARROW":'ED:none:<udef_symbol num="0xED" description="heavy_wide-headed_southwest-wards_arrow"/>
PROPOSE "HEAVY WIDE-HEADED SOUTHEAST-WARDS ARROW":'EE:none:<udef_symbol num="EE" description="heavy_wide-headed_southeast-wards_arrow"/>
LEFTWARDS WHITE ARROW:'EF:8678:⇦
RIGHTWARDS WHITE ARROW:'F0:8680:⇨
UPWARDS WHITE ARROW:'F1:8679:⇧
DOWNWARDS WHITE ARROW:'F2:8681:⇩
LEFT RIGHT DOUBLE ARROW:'F3:8660:⇔
UP DOWN DOUBLE ARROW:'F4:8661:⇕
NORTH WEST DOUBLE ARROW:'F5:8662:⇖
NORTH EAST DOUBLE ARROW:'F6:8663:⇗
SOUTH WEST DOUBLE ARROW:'F7:8665:⇙
SOUTH EAST DOUBLE ARROW:'F8:8664:⇘
"NO IDEA":'F9:none:<udef_symbol num="0xF9" description="no_idea"/>
"NO IDEA":'FA:none:<udef_symbol num="0xFA" description="no_idea"/>
BALLOT X:'FB:10007:✗
CHECK MARK:'FC:10003:✓
BALLOT BOX WITH X:'FD:9746:☒
BALLOT BOX WITH CHECK:'FE:9745:☑
PROPOSE "MICROSOFT WINDOWS LOGO":'FF:none:<udef_symbol num="0xFF" description="microsoft_windows_logo"/>
</wingdings>
<dingbats>
SPACE:'20:32:
UPPER BLADE SCISSORS:'21:9985:✁
BLACK SCISSORS:'22:9986:✂
LOWER BLADE SCISSORS:'23:9987:✃
WHITE SCISSORS:'24:9988:✄
BLACK TELEPHONE:'25:9742:☎
TELEPHONE LOCATION SIGN:'26:9990:✆
TAPE DRIVE:'27:9991:✇
AIRPLANE:'28:9992:✈
ENVELOPE:'29:9993:✉
BLACK RIGHT POINTING INDEX:'2A:9755:☛
WHITE RIGHT POINTING INDEX:'2B:9758:☞
VICTORY HAND:'2C:9996:✌
WRITING HAND:'2D:9997:✍
LOWER RIGHT PENCIL:'2E:9998:✎
PENCIL:'2F:9999:✏
UPPER RIGHT PENCIL:'30:10000:✐
WHITE NIB:'31:10001:✑
BLACK NIB:'32:10002:✒
CHECKMARK:'33:10003:✓
HEAVY CHECKMARK:'34:10004:✔
MULTIPLICATION X:'35:10005:✕
HEAVY MULTIPLICATION X:'36:10006:✖
BALLOT X:'37:10007:✗
HEAVY BALLOT X:'38:10008:✘
OUTLINED GREEK CROSS:'39:10009:✙
HEAVY GREK CROSS:'3A:10010:✚
OPEN CENTRE CROSS:'3B:10011:✛
HEAVY OPEN CENTRE CROSS:'3C:10011:✛
LATIN CROSS:'3D:10013:✝
SHADOWED WHITE LATIN CROSS:'3E:10014:✞
OUTLINED LATIN CROSS:'3F:10015:✟
MALTESE CROSS:'40:10016:✠
STAR OF DAVID:'41:10017:✡
FOUR TEARDROP-SPOKED ASTERISK:'42:10018:✢
FOUR BALLOON-SPOKED ASTERISK:'43:10019:✣
10019:'43:10019:✣
HEAVY FOUR BALLOON-SPOKED ASTERISK:'44:10020:✤
FOUR CLUB-SPOKED ASTERISK:'45:10021:✥
BLACK FOUR POINTED STAR:'46:10022:✦
WHITE FOUR POINTED STAR:'47:10023:✧
BLACK STAR:'48:9989:✅
STRESS OUTLINED WHITE STAR:'49:10025:✩
CIRCLED WHITE STAR:'4A:10026:✪
OPEN CENTRE BLACK STAR:'4B:10027:✫
BLACK CENTRE WHITE STAR:'4C:10028:✬
OUTLINED BLACK STAR:'4D:10029:✭
HEAVY OUTLINED BLACK STAR:'4E:10030:✮
PINWHEEL STAR:'4F:10031:✯
SHADOWED WHITE STAR:'50:10032:✰
HEAVY ASTERISK:'51:10033:✱
OPEN CENTRE ASTERISK:'52:10034:✲
EIGHT SPOKED ASTERISK:'53:10035:✳
EIGHT POINTED BLACK STAR:'54:10036:✴
EIGHT POINTED PINWHEEL STAR:'55:10037:✵
SIX POINTED BLACK STAR:'56:10038:✶
EIGHT POINTED RECTILINEAR BLACK STAR:'57:10039:✷
HEAVY EIGHT POINTED RECTILINEAR BLACK STAR:'58:10040:✸
TWELVE POINTED BLACK STAR:'59:10041:✹
SIXTEEN POINTED ASTERISK:'5A:10042:✺
TEARDROP-SPOKED ASTERISK:'5B:10043:✻
OPEN CENTRE TEARDROP-SPOKED ASTERISK:'5C:10044:✼
HEAVY TEARDROP-SPOKED ASTERISK:'5D:10045:✽
SIX PETALLED BLACK AND WHITE FLORETTE:'5E:10046:✾
BLACK FLORETTE:'5F:10047:✿
WHITE FLORETTE:'60:10048:❀
EIGHT PETALLED OUTLINED BLACK FLORETTE:'61:10049:❁
CIRCLED OPEN CENTRE EIGHT POINTED STAR:'62:10050:❂
HEAVY TEARDROP-SPOKED PINWHEEL ASTERISK:'63:10051:❃
SNOWFLAKE:'64:10052:❄
TIGHT TRIFOLIATE SNOWFLAKE:'65:10053:❅
HEAVY CHEVRON SNOWFLAKE:'66:10054:❆
SPARKLE:'67:10055:❇
HEAVY SPARKLE:'68:10056:❈
BALLOON-SPOKED ASTERISK:'69:10057:❉
TEARDROP-SPOKED ASTERISK:'6A:10043:✻
HEAVY TEARDROP-SPOKED ASTERISK:'6B:10045:✽
BLACK CIRCLE:'6C:9679:●
SHADOWED WHITE CIRCLE:'6D:10061:❍
BLACK SQUARE:'6E:9632:■
LOWER RIGHT DROP-SHADOWED SQUARE:'6F:10063:❏
UPPER RIGHT DROP-SHADOWED WHITE SQUARE:'70:10064:❐
LOWER RIGHT SHADOWED SQUARE:'71:10065:❑
UPPER RIGHT SHADOWED WHITE SQUARE:'72:10066:❒
BLACK UP-POINTING TRIANGLE:'73:9660:▲
BLACK DOWN-POINTING TRIANGLE:'74:9651:▼
BLACK DIAMOND:'75:9670:◆
BLACK DIAMOND MINUS WHITE X:'76:10070:❖
RIGHT HALF BLACK CIRCLE:'77:9479:┇
LIGHT VERTICAL BAR:'78:10072:❘
MEDIUM VERTICAL BAR:'79:10073:❙
HEAVY VERTICAL BAR:'7A:10074:❚
HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT:'7B:10075:❛
HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT:'7C:10076:❜
HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT:'7D:10077:❝
HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT:'7E:10078:❞
UNUSED:'7F:none:udef_symbol num="7F"/>
MEDIUM LEFT PARENTHESIS ORNAMENT:'80:10088:❨
MEDIUM RIGHT PARENTHESIS ORNAMENT:'81:10089:❩
MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT:'82:10090:❪
MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT:'83:10091:❫
MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT:'84:10092:❬
MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT:'85:10093:❭
HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT:'86:10094:❮
HEAVY RIGHT-POITING ANGLE QUOTATION MARK ORNAMENT:'87:10095:❯
HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT:'88:10096:❰
HEAVY RIGHT-POTING ANGLE BRACKET ORNAMENT:'89:10097:❱
LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT:'8A:10098:❲
LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT:'8B:10099:❳
MEDIUM LEFT CURLY BRACKET ORNAMENT:'8C:10100:❴
MEDIUM RIGHT CURLY BRACKET ORNAMENT:'8D:10101:❵
UNUSED:'8E:none:<udef_symbol num="8E"/>
UNUSED:'8F:none:udef_symbol num="8F"/>
UNUSED:'90:none:udef_symbol num="90"/>
UNUSED:'91:none:udef_symbol num="91"/>
UNUSED:'92:none:udef_symbol num="92"/>
UNUSED:'93:none:udef_symbol num="93"/>
UNUSED:'94:none:udef_symbol num="94"/>
UNUSED:'95:none:udef_symbol num="95"/>
UNUSED:'96:none:udef_symbol num="96"/>
UNUSED:'97:none:udef_symbol num="97"/>
UNUSED:'98:none:udef_symbol num="98"/>
UNUSED:'99:none:udef_symbol num="99"/>
UNUSED:'9A:none:udef_symbol num="9A"/>
UNUSED:'9B:none:udef_symbol num="9B"/>
UNUSED:'9C:none:udef_symbol num="9C"/>
UNUSED:'9D:none:udef_symbol num="9D"/>
UNUSED:'9E:none:udef_symbol num="9E"/>
UNUSED:'9F:none:udef_symbol num="9F"/>
UNUSED:'A0:none:udef_symbol num="A0"/>
CURVED STEM PARAGRAPH SIGN ORNAMENT:'A1:10081:❡
HEAVY EXCLAMATION MARK ORNAMENT:'A2:10082:❢
HEAVY HEART EXCLAMATION MARK ORNAMENT:'A3:10083:❣
HEAVY BLACK HEART:'A4:10084:❤
ROTATED HEAVY BLACK HEART BULLET:'A5:10085:❥
FLORAL HEART:'A6:10086:❦
ROTATED FLORAL HEART BULLET:'A7:10087:❧
BLACK CLUB SUIT:'A8:9827:♣
BLACK DIAMOND SUIT:'A9:9830:♦
BLACK HEART SUIT:'AA:9829:♥
BLACK SPADE SUIT:'AB:9824:♠
DINGBAT CIRCLED SANS SERIF DIGIT ONE:'AC:10112:➀
DINGBAT CIRCLED SANS SERIF DIGIT TWO:'AD:10113:➁
DINGBAT CIRCLED SANS SERIF DIGIT THREE:'AE:10114:➂
DINGBAT CIRCLED SANS SERIF DIGIT FOUR:'AF:10115:➃
DINGBAT CIRCLED SANS SERIF DIGIT FIVE:'B0:10116:➄
DINGBAT CIRCLED SANS SERIF DIGIT SIX:'B1:10117:➅
DINGBAT CIRCLED SANS SERIF DIGIT SEVEN:'B2:10118:➆
DINGBAT CIRCLED SANS SERIF DIGIT EIGHT:'B3:10119:➇
DINGBAT CIRCLED SANS SERIF DIGIT NINE:'B4:10120:➈
DINGBAT CIRCLED SANS SERIF DIGIT TEN:'B5:10121:➉
DINGBAT NEGATIVE CIRCLED DIGIT ONE:'B6:10102:❶
DINGBAT NEGATIVE CIRCLED DIGIT TWO:'B7:10103:❷
DINGBAT NEGATIVE CIRCLED DIGIT THREE:'B8:10104:❸
DINGBAT NEGATIVE CIRCLED DIGIT FOUR:'B9:10105:❹
DINGBAT NEGATIVE CIRCLED DIGIT FIVE:'BA:10106:❺
DINGBAT NEGATIVE CIRCLED DIGIT SIX:'BB:10107:❻
DINGBAT NEGATIVE CIRCLED DIGIT SEVEN:'BC:10108:❼
DINGBAT NEGATIVE CIRCLED DIGIT EIGHT:'BD:10109:❽
DINGBAT NEGATIVE CIRCLED DIGIT:'BE:10110:❾
DINGBAT NEGATIVE CIRCLED DIGIT:'BF:10111:❿
DINGBAT CIRCLED SANS-SERIF DIGIT ONE:'C0:10112:➀
DINGBAT CIRCLED SANS-SERIF DIGIT TWO:'C1:10113:➁
DINGBAT CIRCLED SANS-SERIF DIGIT THREE:'C2:10114:➂
DINGBAT CIRCLED SANS-SERIF DIGIT FOUR:'C3:10115:➃
DINGBAT CIRCLED SANS-SERIF DIGIT FIVE:'C4:10116:➄
DINGBAT CIRCLED SANS-SERIF DIGIT SIX:'C5:10117:➅
DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN:'C6:10118:➆
DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT:'C7:10119:➇
DINGBAT CIRCLED SANS-SERIF DIGIT NINE:'C8:10120:➈
DINGBAT CIRCLED SANS-SERIF DIGIT TEN:'C9:10121:➉
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE:'CA:10122:➊
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO:'CB:10123:➋
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE:'CC:10124:➌
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR:'CD:10125:➍
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE:'CE:10126:➎
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX:'CF:10127:➏
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN:'D0:10128:➐
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT:'D1:10129:➑
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE:'D2:10130:➒
DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TEN:'D3:10131:➓
HEAVY WIDE-HEADED RIGHTWARDS ARROW:'D4:10132:➔
RIGHTWARDS ARROW:'D5:8594:→
LEFT RIGHT ARROW:'D6:8596:↔
UP DOWN ARROW:'D7:8597:↕
HEAVY SOUTH EAST ARROW:'D8:10136:➘
HEAVY RIGHTWARDS ARROW:'D9:10137:➙
HEAVY NORTHEAST ARROW:'DA:10138:➚
DRAFTING POINT RIGHTWARDS ARROW:'DB:10139:➛
HEAVY ROUND-TIPPED RIGHTWARDS ARROW:'DC:10140:➜
TRIANGLE-HEADED RIGHTWARDS ARROW:'DD:10141:➝
HEAVY TRIANGLE-HEADED RIGHTWARDS ARROW:'DE:10142:➞
DASHED TRIANGLE-HEADED RIGHTWARDS ARROW:'DF:10143:➟
HEAVY DASHED TRIANGLE-HEADED RIGHTWARS ARROW:'E0:10144:➠
BLACK RIGHTWARDS ARROW:'E1:10145:➡
THREE-D TOP-LIGHTED RIGHTWARDS ARROWHEAD:'E2:10146:➢
THREE-D BOTTOM-LIGHTED RIGHTWARDS ARROWHEAD:'E3:10147:➣
BLACK RIGHTWARDS ARROWHEAD:'E4:10148:➤
HEAVY BLACK CURVED DOWNWARDS AND RIGHTWARDS ARROW:'E5:10149:➥
HEAVY BLACK CURVED UPWARDS AND RIGHTWARDS ARROW:'E6:10150:➦
SQUAT BLACK RIGHTWARDS ARROW:'E7:10151:➧
HEAVY CONCAVE-POINTED BLACK RIGHTWARDS ARROW:'E8:10152:➨
RIGHT-SHADED WHITE RIGHTWARDS ARROW:'E9:10153:➩
LEFT-SHADED WHITE RIGHTWARDS ARROW:'EA:10154:➪
BACK-TILTED SHADOWED WHITE RIGHTWARDS ARROW:'EB:10155:➫
FRONT-TILTED SHADOWED WHITE RIGHWARDS ARROW:'EC:10156:➬
HEAVY LOWER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW:'ED:10157:➭
HEAVY UPPER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW:'EE:10157:➭
NOTCHED LOWER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW:'EF:10158:➮
UNUSED:'F0:none:udef_symbol num="F0"/>
NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARDS ARROW:'F1:10161:➱
CIRCLED HEAVY WHITE RIGHTWARDS ARROW:'F2:10162:➲
WHITE-FEATHERED RIGHTWARDS ARROW:'F3:10163:➳
BLACK-FEATHERED SOUTH EAST ARROW:'F4:10164:➴
BLACK-FEATHERED RIGHTWARDS ARROW:'F5:10165:➵
BLACK-FEATHERED NORTH EAST ARROW:'F6:10166:➶
HEAVY BLACK-FEATHERED SOUTH EAST ARROW:'F7:10167:➷
HEAVY BLACK-FEATHERED RIGHTWARDS ARROW:'F8:10168:➸
HEAVY BLACK-FEATHERED NORTH EAST ARROW:'F9:10169:➹
TEARDROP-BARBED RITGHTWARDS ARROW:'FA:10170:➺
HEAVY TEARDROP-SHANKED RIGHTWARDS ARROW:'FB:10171:➻
WEDGE-TAILED RIGHTWARDS ARROW:'FC:10172:➼
HEAVY WEDGED-TAILED RIGHTWARDS ARROW:'FD:10173:➽
OPEN-OUTLINED RIGHTWARDS ARROW:'FE:10174:➾
UNUSED:'FF:none:udef_symbol num="FF"/>
</dingbats>
<caps_uni>
LATIN CAPITAL LETTER S WITH CARON:š:352:Š
LATIN CAPITAL LETTER S WITH ACUTE:ś:346:Ś
LATIN CAPITAL LETTER T WITH CARON:ť:356:Ť
LATIN CAPITAL LETTER Z WITH CARON:ž:381:Ž
LATIN CAPITAL LETTER Z WITH ACUTE:ź:377:Ź
LATIN CAPITAL LETTER L WITH STROKE:ł:321:Ł
LATIN CAPITAL LETTER A WITH OGONEK:ą:260:Ą
LATIN CAPITAL LETTER S WITH CEDILLA:ş:350:Ş
LATIN CAPITAL LETTER Z WITH DOT ABOVE:ż:379:Ż
LATIN CAPITAL LETTER L WITH CARON:ľ:317:Ľ
LATIN CAPITAL LETTER R WITH ACUTE:ŕ:340:Ŕ
LATIN CAPITAL LETTER A WITH ACUTE:á:193:Á
LATIN CAPITAL LETTER A WITH CIRCUMFLEX:â:194:Â
LATIN CAPITAL LETTER A WITH BREVE:ă:258:Ă
LATIN CAPITAL LETTER A WITH DIAERESIS:ä:196:Ä
LATIN CAPITAL LETTER L WITH ACUTE:ĺ:313:Ĺ
LATIN CAPITAL LETTER C WITH ACUTE:ć:262:Ć
LATIN CAPITAL LETTER C WITH CEDILLA:ç:199:Ç
LATIN CAPITAL LETTER C WITH CARON:č:268:Č
LATIN CAPITAL LETTER E WITH ACUTE:é:201:É
LATIN CAPITAL LETTER E WITH OGONEK:ę:280:Ę
LATIN CAPITAL LETTER E WITH DIAERESIS:ë:203:Ë
LATIN CAPITAL LETTER E WITH CARON:ě:282:Ě
LATIN CAPITAL LETTER I WITH ACUTE:í:205:Í
LATIN CAPITAL LETTER I WITH CIRCUMFLEX:î:206:Î
LATIN CAPITAL LETTER D WITH CARON:ď:270:Ď
LATIN CAPITAL LETTER D WITH STROKE:đ:272:Đ
LATIN CAPITAL LETTER N WITH ACUTE:ń:323:Ń
LATIN CAPITAL LETTER N WITH CARON:ň:327:Ň
LATIN CAPITAL LETTER O WITH ACUTE:ó:211:Ó
LATIN CAPITAL LETTER O WITH CIRCUMFLEX:ô:212:Ô
LATIN CAPITAL LETTER O WITH DOUBLE ACUTE:ő:336:Ő
LATIN CAPITAL LETTER O WITH DIAERESIS:ö:214:Ö
LATIN CAPITAL LETTER R WITH CARON:ř:344:Ř
LATIN CAPITAL LETTER U WITH RING ABOVE:ů:366:Ů
LATIN CAPITAL LETTER U WITH ACUTE:ú:218:Ú
LATIN CAPITAL LETTER U WITH DOUBLE ACUTE:ű:368:Ű
LATIN CAPITAL LETTER U WITH DIAERESIS:ü:220:Ü
LATIN CAPITAL LETTER Y WITH ACUTE:ý:221:Ý
LATIN CAPITAL LETTER T WITH CEDILLA:ţ:354:Ţ
CYRILLIC CAPITAL LETTER DJE (SERBOCROATIAN):ђ:1026:Ђ
CYRILLIC CAPITAL LETTER GJE:ѓ:1027:Ѓ
CYRILLIC CAPITAL LETTER LJE:љ:1033:Љ
CYRILLIC CAPITAL LETTER NJE:њ:1034:Њ
CYRILLIC CAPITAL LETTER KJE:ќ:1036:Ќ
CYRILLIC CAPITAL LETTER TSHE (SERBOCROATIAN):ћ:1035:Ћ
CYRILLIC CAPITAL LETTER DZHE:џ:1039:Џ
CYRILLIC CAPITAL LETTER SHORT U (BYELORUSSIAN):ў:1038:Ў
CYRILLIC CAPITAL LETTER JE:ј:1032:Ј
CYRILLIC CAPITAL LETTER GHE WITH UPTURN:ґ:1168:Ґ
CYRILLIC CAPITAL LETTER IO:ё:1025:Ё
CYRILLIC CAPITAL LETTER UKRAINIAN IE:є:1028:Є
CYRILLIC CAPITAL LETTER YI (UKRAINIAN):ї:1031:Ї
CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I:і:1030:І
CYRILLIC CAPITAL LETTER DZE:ѕ:1029:Ѕ
CYRILLIC CAPITAL LETTER A:а:1040:А
CYRILLIC CAPITAL LETTER BE:б:1041:Б
CYRILLIC CAPITAL LETTER VE:в:1042:В
CYRILLIC CAPITAL LETTER GHE:г:1043:Г
CYRILLIC CAPITAL LETTER DE:д:1044:Д
CYRILLIC CAPITAL LETTER IE:е:1045:Е
CYRILLIC CAPITAL LETTER ZHE:ж:1046:Ж
CYRILLIC CAPITAL LETTER ZE:з:1047:З
CYRILLIC CAPITAL LETTER I:и:1048:И
CYRILLIC CAPITAL LETTER SHORT I:й:1049:Й
CYRILLIC CAPITAL LETTER KA:к:1050:К
CYRILLIC CAPITAL LETTER EL:л:1051:Л
CYRILLIC CAPITAL LETTER EM:м:1052:М
CYRILLIC CAPITAL LETTER EN:н:1053:Н
CYRILLIC CAPITAL LETTER O:о:1054:О
CYRILLIC CAPITAL LETTER PE:п:1055:П
CYRILLIC CAPITAL LETTER ER:р:1056:Р
CYRILLIC CAPITAL LETTER ES:с:1057:С
CYRILLIC CAPITAL LETTER TE:т:1058:Т
CYRILLIC CAPITAL LETTER U:у:1059:У
CYRILLIC CAPITAL LETTER EF:ф:1060:Ф
CYRILLIC CAPITAL LETTER HA:х:1061:Х
CYRILLIC CAPITAL LETTER TSE:ц:1062:Ц
CYRILLIC CAPITAL LETTER CHE:ч:1063:Ч
CYRILLIC CAPITAL LETTER SHA:ш:1064:Ш
CYRILLIC CAPITAL LETTER SHCHA:щ:1065:Щ
CYRILLIC CAPITAL LETTER YERU:ы:1067:Ы
CYRILLIC CAPITAL LETTER SOFT SIGN:ь:1068:Ь
CYRILLIC CAPITAL LETTER E:э:1069:Э
CYRILLIC CAPITAL LETTER YU:ю:1070:Ю
CYRILLIC CAPITAL LETTER YA:я:1071:Я
CYRILLIC CAPITAL LETTER HARD SIGN:ъ:1066:Ъ
LATIN CAPITAL LIGATURE OE:œ:338:Œ
LATIN CAPITAL LETTER Y WITH DIAERESIS:ÿ:376:Ÿ
LATIN CAPITAL LETTER A WITH GRAVE:à:192:À
LATIN CAPITAL LETTER A WITH TILDE:ã:195:Ã
LATIN CAPITAL LETTER A WITH RING ABOVE:å:197:Å
LATIN CAPITAL LETTER AE:æ:198:Æ
LATIN CAPITAL LETTER E WITH GRAVE:è:200:È
LATIN CAPITAL LETTER E WITH CIRCUMFLEX:ê:202:Ê
LATIN CAPITAL LETTER I WITH GRAVE:ì:204:Ì
LATIN CAPITAL LETTER I WITH DIAERESIS:ï:207:Ï
LATIN CAPITAL LETTER ETH (ICELANDIC):ð:208:Ð
LATIN CAPITAL LETTER N WITH TILDE:ñ:209:Ñ
LATIN CAPITAL LETTER O WITH GRAVE:ò:210:Ò
LATIN CAPITAL LETTER O WITH TILDE:õ:213:Õ
LATIN CAPITAL LETTER O WITH STROKE:ø:216:Ø
LATIN CAPITAL LETTER U WITH GRAVE:ù:217:Ù
LATIN CAPITAL LETTER U WITH CIRCUMFLEX:û:219:Û
LATIN CAPITAL LETTER THORN (ICELANDIC):þ:222:Þ
GREEK CAPITAL LETTER EPSILON WITH TONOS:έ:904:Έ
GREEK CAPITAL LETTER ETA WITH TONOS:ή:905:Ή
GREEK CAPITAL LETTER IOTA WITH TONOS:ί:906:Ί
GREEK CAPITAL LETTER OMICRON WITH TONOS:ό:908:Ό
GREEK CAPITAL LETTER UPSILON WITH TONOS:ύ:910:Ύ
GREEK CAPITAL LETTER OMEGA WITH TONOS:ώ:911:Ώ
GREEK CAPITAL LETTER IOTA WITH DIALYTIKA:Ϊ:938:Ϊ
GREEK CAPITAL LETTER ALPHA:α:913:Α
GREEK CAPITAL LETTER BETA:β:914:Β
GREEK CAPITAL LETTER GAMMA:γ:915:Γ
GREEK CAPITAL LETTER DELTA:δ:916:Δ
GREEK CAPITAL LETTER EPSILON:ε:917:Ε
GREEK CAPITAL LETTER ZETA:ζ:918:Ζ
GREEK CAPITAL LETTER ETA:η:919:Η
GREEK CAPITAL LETTER THETA:θ:920:Θ
GREEK CAPITAL LETTER IOTA:ι:921:Ι
GREEK CAPITAL LETTER KAPPA:κ:922:Κ
GREEK CAPITAL LETTER LAMDA:λ:923:Λ
GREEK CAPITAL LETTER MU:μ:924:Μ
GREEK CAPITAL LETTER NU:ν:925:Ν
GREEK CAPITAL LETTER XI:ξ:926:Ξ
GREEK CAPITAL LETTER OMICRON:ο:927:Ο
GREEK CAPITAL LETTER PI:π:928:Π
GREEK CAPITAL LETTER RHO:ρ:929:Ρ
GREEK CAPITAL LETTER SIGMA:σ:931:Σ
GREEK CAPITAL LETTER TAU:τ:932:Τ
GREEK CAPITAL LETTER UPSILON:υ:933:Υ
GREEK CAPITAL LETTER PHI:φ:934:Φ
GREEK CAPITAL LETTER CHI:χ:935:Χ
GREEK CAPITAL LETTER OMEGA:ω:937:Ω
GREEK CAPITAL LETTER ALPHA WITH TONOS:ά:902:Ά
GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA:ϋ:939:Ϋ
GREEK CAPITAL LETTER PSI:ψ:936:Ψ
GREEK CAPITAL LETTER IOTA WITH DIALYTIKA:ϊ:938:Ϊ
LATIN CAPITAL LETTER G WITH BREVE:ğ:286:Ğ
LATIN CAPITAL LETTER E WITH DOT ABOVE:ė:278:Ė
LATIN CAPITAL LETTER R WITH CEDILLA:ŗ:342:Ŗ
LATIN CAPITAL LETTER I WITH OGONEK:ī:302:Į
LATIN CAPITAL LETTER A WITH MACRON:ā:256:Ā
LATIN CAPITAL LETTER E WITH MACRON:ē:274:Ē
LATIN CAPITAL LETTER G WITH CEDILLA:ģ:290:Ģ
LATIN CAPITAL LETTER K WITH CEDILLA:ķ:310:Ķ
LATIN CAPITAL LETTER I WITH MACRON:ī:298:Ī
LATIN CAPITAL LETTER L WITH CEDILLA:ļ:315:Ļ
LATIN CAPITAL LETTER N WITH CEDILLA:ņ:325:Ņ
LATIN CAPITAL LETTER O WITH MACRON:ō:332:Ō
LATIN CAPITAL LETTER U WITH OGONEK:ų:370:Ų
LATIN CAPITAL LETTER U WITH MACRON:ū:362:Ū
</caps_uni>
#unused character maps
<unused>
<caps_to_lower>
LATIN CAPITAL LETTER A:'41:65:'61
LATIN CAPITAL LETTER B:'42:66:'62
LATIN CAPITAL LETTER C:'43:67:'63
LATIN CAPITAL LETTER D:'44:68:'64
LATIN CAPITAL LETTER E:'45:69:'65
LATIN CAPITAL LETTER F:'46:70:'66
LATIN CAPITAL LETTER G:'47:71:'67
LATIN CAPITAL LETTER H:'48:72:'68
LATIN CAPITAL LETTER I:'49:73:'69
LATIN CAPITAL LETTER J:'4A:74:'6a
LATIN CAPITAL LETTER K:'4B:75:'6b
LATIN CAPITAL LETTER L:'4C:76:'6c
LATIN CAPITAL LETTER M:'4D:77:'6d
LATIN CAPITAL LETTER N:'4E:78:'6e
LATIN CAPITAL LETTER O:'4F:79:'6f
LATIN CAPITAL LETTER P:'50:80:'70
LATIN CAPITAL LETTER Q:'51:81:'71
LATIN CAPITAL LETTER R:'52:82:'72
LATIN CAPITAL LETTER S:'53:83:'73
LATIN CAPITAL LETTER T:'54:84:'74
LATIN CAPITAL LETTER U:'55:85:'75
LATIN CAPITAL LETTER V:'56:86:'76
LATIN CAPITAL LETTER W:'57:87:'77
LATIN CAPITAL LETTER X:'58:88:'78
LATIN CAPITAL LETTER Y:'59:89:'79
LATIN CAPITAL LETTER Z:'5A:90:'7a
LATIN CAPITAL LETTER A:A:65:a
LATIN CAPITAL LETTER B:B:66:b
LATIN CAPITAL LETTER C:C:67:c
LATIN CAPITAL LETTER D:D:68:d
LATIN CAPITAL LETTER E:E:69:e
LATIN CAPITAL LETTER F:F:70:f
LATIN CAPITAL LETTER G:G:71:g
LATIN CAPITAL LETTER H:H:72:h
LATIN CAPITAL LETTER I:I:73:i
LATIN CAPITAL LETTER J:J:74:j
LATIN CAPITAL LETTER K:K:75:K
LATIN CAPITAL LETTER L:L:76:l
LATIN CAPITAL LETTER M:M:77:m
LATIN CAPITAL LETTER N:N:78:n
LATIN CAPITAL LETTER O:O:79:o
LATIN CAPITAL LETTER P:P:80:p
LATIN CAPITAL LETTER Q:Q:81:q
LATIN CAPITAL LETTER R:R:82:r
LATIN CAPITAL LETTER S:S:83:s
LATIN CAPITAL LETTER T:T:84:t
LATIN CAPITAL LETTER U:U:85:u
LATIN CAPITAL LETTER V:V:86:v
LATIN CAPITAL LETTER W:W:87:x
LATIN CAPITAL LETTER X:X:88:x
LATIN CAPITAL LETTER Y:Y:89:y
LATIN CAPITAL LETTER Z:Z:90:z
NO UNICODE VALUE:'E7:231:\'87
NO UNICODE VALUE:'83:131:\'8E
NO UNICODE VALUE:'92:146:\'EA
NO UNICODE VALUE:'EE:238:\'97
NO UNICODE VALUE:'F2:242:\'9C
NO UNICODE VALUE:'CB:203:\'88
NO UNICODE VALUE:'E9:233:\'8F
NO UNICODE VALUE:'ED:237:\'93
NO UNICODE VALUE:'F1:241:\'98
NO UNICODE VALUE:'F4:244:\'9D
NO UNICODE VALUE:'E5:229:\'89
NO UNICODE VALUE:'E6:230:\'90
NO UNICODE VALUE:'EB:235:\'94
NO UNICODE VALUE:'EF:239:\'99
NO UNICODE VALUE:'F3:243:\'9E
NO UNICODE VALUE:'AF:175:\'BF
NO UNICODE VALUE:'84:132:\'96
NO UNICODE VALUE:'CD:205:\'9B
NO UNICODE VALUE:'CC:204:\'8B
NO UNICODE VALUE:'80:128:\'8A
NO UNICODE VALUE:'E8:232:\'91
NO UNICODE VALUE:'EC:236:\'95
NO UNICODE VALUE:'85:133:\'9A
NO UNICODE VALUE:'86:134:\'9F
NO UNICODE VALUE:'82:130:\'8D
NO UNICODE VALUE:'81:129:\'8C
NO UNICODE VALUE:'E1:129:\'C1
</caps_to_lower>
<wingdings_old>
CANCER:a:9803:♋
LEO:b:9804:♌
VIRGO:c:9805:♍
LIBRA:d:9806:♎
SCORPIOUS:e:9807:♏
SAGITARRIUS:f:9808:♐
CAPRICON:g:9809:♑
AQUARIUS:h:9810:♒
PISCES:i:9811:♓
MY LOOPY ET:j:0:<fancy_et/>
AMPERSAND:k:38:&
BLACK CIRCLE:l:9679:●
SHADOWED WHITE CIRCLE:m:10061:❍
BLACK SQUARE:n:9632:■
WHITE SQUARE:o:9633:□
WHITE SQUARE:p:9633:□
LOWER RIGHT SHADOWED SQUARE:q:10065:❑
UPPER RIGHT SHADOWED WHITE SQUARE:r:10066:❒
BLACK DIAMOND:s:9670:◆
BLACK DIAMOND:t:9670:◆
BLACK DIAMOND:u:9670:◆
BLACK DIAMOND MINUS WHITE X:v:10070:❖
BLACK DIAMOND MINUS WHITE X:w:10070:❖
BALLOT BOX WITH X:x:9746:☒
MY COMPUTER KEY:y:0:<on_key/>
MY APPLE KEY:z:0:<apple_key/>
VICTORY HAND:A:9996:✌
OKAY HAND:B:0:<okay_hand/>
MY THUMBS UP:C:0:<thumbs_up/>
MY THUMBS DOWN:D:0:<thumbs_down/>
WHITE LEFT POINTING INDEX:E:9756:☜
WHITE RIGHT POINTING INDEX:F:9758:☞
WHITE POINTING UP INDEX:G:9757:☝
WHITE POINTING DOWN INDEX:H:9759:☟
MY OPEN HAND:I:0:<open_hand/>
WHITE SMILING FACE:J:9786:☺
MY STRAIGHT FACE:K:0:<straight_face/>
WHITE FROWNING FACE:L:9785:☹
MY BOMB:M:0:<bomb/>
SKULL AND CROSSBONES:N:9760:☠
MY FLAG:O:0:<flag/>
MY PENNANT:P:0:<pennant/>
AIRPLANE:Q:9992:✈
CIRCLED OPEN CENTRE EIGHT POINTED STAR:R:9794:♂
MY TEARDROP:S:0:<teardrop/>
SNOWFLAKE:T:10052:❄
SHADOWED WHITE LATIN CROSS:U:10014:✞
SHADOWED WHITE LATIN CROSS:V:10014:✞
MY CELTIC CROSS:W:0:<celtic_cross/>
MALTESE CROSS:X:10016:✠
STAR OF DAVID:Y:10017:✡
STAR AND CRESCENT:Z:9770:☪
MY FOLDER:0:0:<folder/>
MY OPEN FOLDER:1:0:<open_folder/>
MY DOG-EARED DOCUMENT:2:0:<dog_eared_doc/>
MY DOCUMENT:3:0:<document/>
MY PAGES:4:0:<pages/>
MY FILE CABINETS:5:0:<file_cabinets/>
MY HOUR GLASS:6:0:<hour_glass/>
MY KEYBOARD:7:0:<keyboard/>
MY MOUSE:8:0:<mouse/>
MY BOTTOM OF MOUSE:9:0:<bottom_mouse/>
LOWER RIGHT PENCIL:!:9998:✎
WRITING HAND:@:9996:✌
UPPER BLADE SCISSORS:#:9985:✁
MY GLASSES:$:0:<glasses/>
MY BELL:%:0:<bell/>
ARIES:^:9800:♈
MY BOOK:&:0:<book/>
ENVELOPE:*:9993:✉
BLACK TELEPHONE:(:9742:☎
TELEPHONE LOCATION SIGN:):9990:✆
MY MAILBOX:-:0:<mailbox/>
TAURUS:_:9801:♉
MY BLACK FLOPPY DISK:=:0:<black_floppy_disk/>
ENVELOPE:+:9993:✉
HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT:~:10078:❞
GEMINI:`:9802:♊
MY WHITE FLOPPY DISK:<:0:<white_floppy_disk/>
MY TAPE REEL:>:0:<tape_reel/>
MY OPEN MAILBOX:.:0:<open_mailbox/>
WRITING HAND:?:9996:✌
EIGHT PETALLED OUTLINED BLACK FLORETTE:|:10049:❁
MY OPEN MAILBOX:/:0:<open_mailbox/>
MY COMPUETR:\\colon:0:<computer/>
MY DOWNWARD LEAF:" :0:<downward_leaf/>
MY UPWARD LEAF:" :0:<upward_leaf/>
</wingdings_old>
<SYMBOL_decimal>
EXCLAMATION MARK:33:33:!
FOR ALL:34:8704:∀
NUMBER SIGN:35:35:#
THERE EXISTS:36:8707:∃
PERCENTAGE SIGN:37:37:%
AMPERSAND:38:38:&
CONTAINS AS A MEMBER:39:8715:∋
LEFT PARENTHESIS:40:40:(
RIGHT PERENTHESIS:41:41:)
ASTERISK OPERATOR:42:8727:∗
PLUS SIGN:43:43:+
COMMA:44:44:,
MINUS SIGN:45:8722:−
FULL STOP:46:46:.
DIVISION SLASH:47:8725:∕
DIGIT ZERO:48:48:0
DIGIT ONE:49:49:1
DIGIT TWO:50:50:2
DIGIT THREE:51:51:3
DIGIT FOUR:52:52:4
DIGIT FIVE:53:53:5
DIGIT SIX:54:54:6
DIGIT SEVEN:55:55:7
DIGIT EIGHT:56:56:8
DIGIT NINE:57:57:9
RATIO:58:8758:∶
SEMICOLON:59:59:;
LESS-THAN SIGN:60:60:<
EQUALS SIGN TO:61:61:=
GREATER-THAN SIGN:62:62:>
QUESTION MARK:63:63:?
APPROXTIMATELY EQUAL TO:64:8773:≅
GREEK CAPITOL LETTER ALPHA:65:913:Α
GREEK CAPAITOL LETTER BETA:66:914:Β
GREEK CAPITOL LETTER CHI:67:935:Χ
GREEK CAPITOL LETTER DELTA:68:916:Δ
GREEK CAPITOL LETTER EPSILON:69:917:Ε
GREEK CAPITOL LETTER PHI:70:934:Φ
GREEK CAPITOL LETTER GAMMA:71:915:Γ
GREEK CAPITOL LETTER ETA:72:919:Η
GREEK CAPITOL LETTER ITOA:73:913:Α
GREEK THETA SYMBOL:74:977:ϑ
GREEK CAPITOL LETTER KAPPA:75:922:Κ
GREEK CAPITOL LETTER LAMBDA:76:923:Λ
GREEK CAPITOL LETTER MU:77:924:Μ
GREEK CAPITOL LETTER NU:78:925:Ν
GREEK CAPITOL LETTER OMICRON:79:927:Ο
GREEK CAPITAL LETTER PI:80:928:Π
GREEK CAPITOL LETTER THETA:81:920:Θ
GREEK CAPITOL LETTER RHO:82:929:Ρ
GREEK CAPITOL LETTER SIGMA:83:931:Σ
GREEK CAPITOL LETTER TAU:84:932:Τ
GREEK CAPITOL LETTER UPSILON:85:933:Υ
GREEK LETTER STIGMA:86:986:Ϛ
GREEK CAPITOL LETTER OMEGA:87:937:Ω
GREEK CAPITOL LETTER XI:88:926:Ξ
GREEK CAPITOL LETTER PSI:89:936:Ψ
GREEK CAPITOL LETTER ZETA:90:918:Ζ
LEFT SQUARE BRACKET:91:91:[
THEREFORE:92:8756:∴
RIGHT SQUARE BRACKET:93:93:]
UP TACK:94:8869:⊥
MODIFIER LETTER LOW MACRON:95:717:ˍ
MODIFIER LETTER MACRON:96:713:ˉ
GREEK SMALL LETTER ALPHA:97:945:α
GREEK SMALL LETTER BETA:98:946:β
GREEK SMALL LETTER CHI:99:967:χ
GREEK SMALL LETTER DELTA:100:948:δ
GREEK SMALL LETTER EPSILON:101:949:ε
GREEK PHI SYMBOL:102:981:ϕ
GREEK MSALL LETTER DELTA:103:947:γ
GREEK SMALL LETTER ETA:104:951:η
GREEK SMALL LETTER IOTA:105:953:ι
GREEK SMALL LETTER PHI:106:966:φ
GREEK SMALL LETTER KAPPA:107:954:κ
GREEK SMALL LETTER LAMDA:108:955:λ
GREEK SMALL LETTER MU:109:956:μ
GREEK SMALL LETTER NU:110:957:ν
GREEK SMALL LETTER OMICRON:111:959:ο
GREEK SMALL LETTER PI:112:960:π
GREEK SMALL LETTER THETA:113:952:θ
GREEK SMALL LETTER RHO:114:961:ρ
GREEK SMALL LETTER SIGMA:115:963:σ
GREEK SMALL LETTER TAU:116:964:τ
GREEK SMALL LETTER UPSILON:117:965:υ
GREEK PI SYMBOL:118:982:ϖ
GREEK SMALL LETTER OMEGA:119:969:ω
GREEK SMALL LETTER XI:120:958:ξ
GREEK SMALL LETTER PHI:121:966:φ
GREEK SMALL LETTER ZETA:122:950:ζ
LEFT CURLY BRACKET:123:123:{
DIVIDES:124:8739:∣
RIGHT CURLY BRACKET:125:125:}
TILDE OPERATOR:126:8764:∼
GREEK UPSILON WITH HOOK SYMBOL:161:978:ϒ
COMBINING ACUTE TONE MARK:162:833:́
LESS THAN OR EQUAL TO:163:8804:≤
DIVISION SLASH:164:8725:∕
INFINITY:165:8734:∞
SMALL LETTER F:166:15:f
BLACK CLUB SUIT:167:9827:♣
BLACK DIAMOND SUIT:168:9830:♦
BLACK HEART SUIT:169:9829:♥
BLACK SPADE SUIT:170:9824:♠
LEFT RIGHT ARROW:171:8596:↔
LEFTWARDS ARROW:172:8592:←
UPWARDS ARROW:173:8593:↑
RIGHTWARDS ARROW:174:8594:→
DOWNWARDS ARROW:175:8595:↓
DEGREE SIGN:176:176:°
PLUS OR MINUS SIGN:177:177:±
DOUBLE ACUTE ACCENT:178:733:˝
GREATER THAN OR EQUAL TO:179:8805:≥
MULTIPLICATION SIGN:180:215:×
DON'T KNOW:181:8733:∝
PARTIAL DIFFERENTIAL:182:8706:∂
BULLET:183:183:·
DIVISION:184:247:÷
NOT EQUAL TO:185:8800:≠
IDENTICAL TO:186:8801:≡
ALMOST EQUAL TO:187:8776:≈
MIDLINE HORIZONTAL ELLIPSES:188:8943:⋯
DIVIDES:189:8739:∣
BOX DRAWINGS LIGHT HORIZONTAL:190:9472:─
DOWNWARDS ARROW WITH TIP LEFTWARDS:191:8626:↲
CIRCLED TIMES:196:8855:⊗
CIRCLED PLUS:197:8853:⊕
EMPTY SET:198:8709:∅
INTERSECTION:199:8745:∩
UNION:200:8746:∪
SUPERSET OF:201:8835:⊃
SUPERSET OF OR EQUAL TO:202:8839:⊇
NIETHER A SUBSET OR EQUAL TO:203:8836:⊄
SUBSET OF:204:8834:⊂
SUBSET OR EQUAL TO:205:8838:⊆
ELEMENT OF:206:8712:∈
NOT AN ELEMENT OF:207:8713:∉
ANGLE:208:8736:∠
WHITE DOWN POINTING TRIANBLE:209:9661:▽
REGISTERED SIGN:210:174:®
COPYRIGHT:211:169:©
TRADEMARK SYMBOL:212:8482:™
NARY OPERATOR:213:8719:∏
SQUARE ROOT:214:8730:√
BULLET OPERATOR:215:8729:∙
NOT SIGN:216:172:¬
LOGICAL AND:217:8743:∧
LOGICAL OR:218:8744:∨
LEFT RIGHT DOUBLE ARROW:219:8660:⇔
LEFTWARDS DOUBLE ARROW:220:8656:⇐
UPWARDS DOUBLE ARROW:221:8657:⇑
RIGHTWARDS DOUBLE ARROW:222:8658:⇒
DOWNWARDS DOUBLE ARROW:223:8659:⇓
BETWEEN:224:8812:≬
MATHEMATICAL LEFT ANGELBRACKET:225:10216:⟨
REGISTERED SIGN:226:174:®
COPYRIGHT:227:169:©
TRADEMARK SYMBOL:228:8482:™
N-ARY SUMMATION:229:8721:∑
LARGE LEFT PARENTHESIS PART1:230:0:<udef_symbol num="0x230" description="left_paraenthesis part 1"/>
LARGE LEFT PARENTHESIS PART2:231:0:<udef_symbol num="0x231" description="left_parenthesis part 2"/>
LARGE LEFT PARENTHESIS PART3:232:0:<udef_symbol num="0x232" description="left_paranethesis part 3"/>
LARGE LEFT SQUARE BRACKET PART1:233:0:<udef_symbol num="0x233" description="left_square_bracket part 1"/>
LARGE LEFT SQUARE BRACKET PART2:234:0:<udef_symbol num="0x234" description="left_square_bracket part 2"/>
LARGE LEFT SQUARE BRACKET PART3:235:0:<udef_symbol num="0x235" description="left_square_bracket part 3"/>
LARGE LEFT BRACKET PART1:236:0:<udef_symbol num="0x236" description="right_bracket part 1"/>
LARGE LEFT BRACKET PART2:237:0:<udef_symbol num="0x237" description="right_bracket part 2"/>
LARGE LEFT BRACKET PART3:238:0:<udef_symbol num="0x238" description="right_bracket part 3"/>
DIVIDES:239:8739:∣
MATHEMATICAL RIGHT ANGLE BRACKET:241:10217:⟩
INTEGRAL:242:8747:∫
LARGE INTEGRAL PART 1:243:0:<udef_symbol num="0x243" description="integral part 1"/>
LARGE INTEGRAL PART 2:244:0:<udef_symbol num="0x244" description="integral part 2"/>
LARGE INTEGRAL PART 3:245:0:<udef_symbol num="0x245" description="integral part 3"/>
LARGE RIGHT PARENTHESIS PART1:246:0:<udef_symbol num="0x246" description="right_parenthesis part 1"/>
LARGE RIGHT PARENTHESIS PART2:247:0:<udef_symbol num="0x247" description="right_parenthesis part 2"/>
LARGE RIGHT PARENTHESIS PART3:248:0:<udef_symbol num="0x248" description="right_parenthesis part 3"/>
LARGE RIGHT SQUARE BRACKET PART1:249:0:<udef_symbol num="0x249" description="right_square_bracket part 1"/>
LARGE RIGHT SQUARE BRACKET PART2:250:0:<udef_symbol num="0x250" description="right_square_bracket part 2"/>
LARGE RIGHT SQUARE BRACKETPART3:251:0:<udef_symbol num="0x251" description="right_square_bracket part 3"/>
LARGE RIGHT BRACKET PART1:252:0:<udef_symbol num="0x252" description="right_bracket part 1"/>
LARGE RIGHT BRACKETPART2:253:0:<udef_symbol num="0x253" description="right_bracket part 2"/>
LARGE RIGHT BRACKETPART3:254:0:<udef_symbol num="0x254" description="right_bracket part 3"/>
DOUBLE ACUTE ACCENT:178:733:˝
</SYMBOL_decimal>
<SYMBOL_old>
EXCLMATION POINT:33:unknown:!
FOR ALL:34:8704:∀
POUND SIGN:35:unknown:#
THERE EXISTS:36:8707:∃
PERCENTAGE SIGN:37:unknown:%
AMPERSAND:38:38:&
CONTAINS AS A MEMBER:39:unknown:∋
LEFT PARENTHESIS:40:unknown:(
RIGHT PERENTHESIS:41:unknown:)
ASTERISK OPERATOR:42:8727:∗
PLUS:43:unknown:+
COMMA:44:unknown:,
MINUS SIGN:45:8722:−
PERIOD:46:unknown:.
DIVISION SLASH:47:8725:∕
ZERO:48:0:0
ONE:49:1:1
TWO:50:2:2
THREE:51:3:3
FOUR:52:4:4
FIVE:53:5:5
SIX:54:6:6
SEVEN:55:7:7
EIGHT:56:8:8
NINE:57:9:9
RATIO:58:8758:∶
SEMICOLON:59:unknown:;
LESS THAN:60:unknown:<
EQAULS TO:61:unknown:=
GREATER THAN:62:unknown:>
QUESTION MARK:63:unknown:?
APPROXTIMATELY EQUAL TO:64:8773:≅
GREEK CAPITOL LETTER ALPHA:65:913:Α
GREEK CAPAITOL LETTER BETA:66:914:Β
GREEK CAPITOL LETTER CHI:67:unknown:Χ
GREEK CAPITOL LETTER DELTA:68:916:Δ
GREEK CAPITOL LETTER EPSILON:69:917:Ε
GREEK CAPITOL LETTER PHI:70:unknown:Φ
GREEK CAPITOL LETTER GAMMA:71:915:Γ
GREEK CAPITOL LETTER ETA:72:919:Η
GREEK CAPITOL LETTER ITOA:73:913:Α
GREEK THETA SYMBOL:74:unknown:ϑ
GREEK CAPITOL LETTER KAPPA:75:unknown:Κ
GREEK CAPITOL LETTER LAMBDA:76:unknown:Λ
GREEK CAPITOL LETTER MU:77:unknown:Μ
GREEK CAPITOL LETTER NU:78:unknown:Ν
GREEK CAPITOL LETTER OMICRON:79:unknown:Ο
GREEK CAPITAL LETTER PI:80:unknown:Π
GREEK CAPITOL LETTER THETA:81:920:Θ
GREEK CAPITOL LETTER RHO:82:unknown:Ρ
GREEK CAPITOL LETTER SIGMA:83:unknown:Σ
GREEK CAPITOL LETTER TAU:84:unknown:Τ
GREEK CAPITOL LETTER UPSILON:85:unknown:Υ
GREEK LETTER STIGMA:86:unknown:Ϛ
GREEK CAPITOL LETTEROMEGA:87:unknown:Ω
GREEK CAPITOL LETTER XI:88:unknown:Ξ
GREEK CAPITOL LETTER PSI:89:unknown:Ψ
GREEK CAPITOL LETTER ZETA:90:918:Ζ
LEFT BRACKET:91:unknown:[
THEREFORE:92:8756:∴
LEFT BRACKET:93:unknown:[
UP TACK:94:unknown:⊥
MODIFIER LETTER LOW MACRON:95:unknown:ˍ
MODIFIER LETTER MACRON:96:unknown:ˉ
GREEK SMALL LETTER ALPHA:97:unknown:α
GREEK SMALL LETTER BETA:98:unknown:β
GREEK SMALL LETTER CHI:99:unknown:χ
GREEK SMALL LETTER DELTA:100:unknown:δ
GREEK SMALL LETTER EPSILON:101:unknown:ε
GREEK PHI SYMBOL:102:unknown:ϕ
GREEK MSALL LETTER DELTA:103:unknown:γ
GREEK SMALL LETTER ETA:104:unknown:η
GREEK SMALL LETTER IOTA:105:unknown:ι
GREEK SMALL LETTER PHI:106:unknown:φ
GREEK SMALL LETTER KAPPA:107:unknown:κ
GREEK SMALL LETTER LAMDA:108:unknown:λ
GREEK SMALL LETTER MU:109:unknown:μ
GREEK SMALL LETTER NU:110:unknown:ν
GREEK SMALL LETTER OMICRON:111:unknown:ο
GREEK SMALL LETTER PI:112:unknown:π
GREEK SMALL LETTER THETA:113:unknown:θ
GREEK SMALL LETTER RHO:114:unknown:ρ
GREEK SMALL LETTER SIGMA:115:unknown:σ
GREEK SMALL LETTER TAU:116:unknown:τ
GREEK SMALL LETTER UPSILON:117:unknown:υ
GREEK PI SYMBOL:118:unknown:ϖ
GREEK SMALL LETTER OMEGA:119:unknown:ω
GREEK SMALL LETTER XI:120:unknown:ξ
GREEK SMALL LETTER PHI:121:unknown:φ
GREEK SMALL LETTER ZETA:122:unknown:ζ
RIGHT BRACKET:123:unknown:{
DIVIDES:124:8739:∣
LEFT BRACKET:125:unknown:}
TILDE OPERATOR:126:unknown:∼
GREEK UPSILON WITH HOOK SYMBOL:161:unknown:ϒ
COMBINING ACUTE TONE MARK:162:833:́
LESS THAN OR EQUAL TO:163:8804:≤
DIVISION SLASH:164:8725:∕
INFINITY:165:unknown:∞
SMALL LETTER F:166:unknown:f
BLACK CLUB SUIT:167:9827:♣
BLACK DIAMOND SUIT:168:9830:♦
BLACK HEART SUIT:169:9829:♥
BLACK SPADE SUIT:170:9824:♠
LEFT RIGHT ARROW:171:8596:↔
LEFTWARDS ARROW:172:8592:←
UPWARDS ARROW:173:8593:↑
RIGHTWARDS ARROW:174:8594:→
DOWNWARDS ARROW:175:8595:↓
DEGREE SIGN:176:unknown:°
PLUS OR MINUS SIGN:177:unknown:±
DOUBLE ACUTE ACCENT:178:unknown:˝
GREATER THAN OR EQUAL TO:179:8805:≥
MULTIPLICATION SIGN:180:unknown:×
DON' T KNOW:181:unknown:∝
PARTIAL DIFFERENTIAL:182:8706:∂
BULLET?:183:unknown:·
DIVISION:184:unknown:÷
NOT EQUAL TO:185:8800:≠
IDENTICAL TO:186:8801:≡
ALMOST EQUAL TO:187:8776:≈
MIDLINE HORIZONTAL ELLIPSES:188:unknown:⋯
DIVIDES:189:8739:∣
BOX DRAWINGS LIGHT HORIZONTAL:190:9472:─
DOWNWARDS ARROW WITH TIP LEFTWARDS:191:unknown:↲
CIRCLED TIMES:196:8855:⊗
CIRCLED PLUS:197:8853:⊕
EMPTY SET:198:8709:∅
INTERSECTION:199:8745:∩
UNION:200:unknown:∪
SUPERSET OF:201:8835:⊃
SUPERSET OF OR EQUAL TO:202:8839:⊇
NIETHER A SUBSET OR EQUAL TO:203:8836:⊄
SUBSET OF:204:8834:⊂
SUBSET OR EQUAL TO:205:8838:⊆
ELEMENT OF:206:8712:∈
NOT AN ELEMENT OF:207:8713:∉
ANGLE:208:8736:∠
WHITE DOWN POINTING TRIANBLE:209:unknown:▽
REGISTERED SIGN:210:unknown:®
COPYRIGHT:211:unknown:©
NARY OPERATOR:213:unknown:∏
SQUARE ROOT:214:unknown:√
BULLET OPERATOR:215:8729:∙
NOT SIGN:216:unknown:¬
LOGICAL AND:217:8743:∧
LOGICAL OR:218:8744:∨
LEFT RIGHT DOUBLE ARROW:219:unknown:⇔
LEFTWARDS DOUBLE ARROW:220:unknown:⇐
UPWARDS DOUBLE ARROW:221:unknown:⇑
RIGHTWARDS DOUBLE ARROW:222:unknown:⇒
DOWNWARDS DOUBLE ARROW:223:unknown:⇓
BETWEEN:224:unknown:≬
MATHEMATICAL LEFT ANGELBRACKET:225:unknown:⟨
REGISTERED SIGN:226:unknown:®
COPYRIGHT:227:unknown:©
N-ARY SUMMATION:229:8721:∑
LARGE LEFT PARENTHESIS PART1:230:unknown:<udef_symbol num="0x230" description="left_paraenthesis part 1"/>
LARGE LEFT PARENTHESIS PART2:231:unknown:<udef_symbol num="0x231" description="left_parenthesis part 2"/>
LARGE LEFT PARENTHESIS PART3:232:unknown:<udef_symbol num="0x232" description="left_paranethesis part 3"/>
LARGE LEFT SQUARE BRACKET PART1:233:unknown:<udef_symbol num="0x233" description="right_square_bracket part 1"/>
LARGE LEFT SQUARE BRACKET PART2:234:unknown:<udef_symbol num="0x234" description="right_square_bracket part 2"/>
LARGE LEFT SQUARE BRACKET PART3:235:unknown:<udef_symbol num="0x235" description="right_square_bracket part 3"/>
LARGE LEFT BRACKET PART1:236:unknown:<udef_symbol num="0x236" description="right_bracket part 1"/>
LARGE LEFT BRACKET PART2:237:unknown:<udef_symbol num="0x237" description="right_bracket part 2"/>
LARGE LEFT BRACKET PART3:238:unknown:<udef_symbol num="0x238" description="right_bracket part 3"/>
DIVIDES:239:8739:∣
MATHEMATICAL RIGHT ANGLE BRACKET:241:unknown:27E9
INTEGRAL:242:unknown:∫
LARGE INTEGRAL PART 1:243:unknown:<integral part="1"/>
LARGE INTEGRAL PART 2:244:unknown:<integral part="2"/>
LARGE INTEGRAL PART 3:245:unknown:<integral part="3"/>
LARGE RIGHT PARENTHESIS PART1:246:unknown:<right_parenthesis part="1"/>
LARGE RIGHT PARENTHESIS PART2:247:unknown:<right_parenthesis part="2"/>
LARGE RIGHT PARENTHESIS PART3:248:unknown:<right_parenthesis part="3"/>
LARGE RIGHT SQUARE BRACKET PART1:249:unknown:<right_square_bracket part="1"/>
LARGE RIGHT SQUARE BRACKET PART2:250:unknown:<right_square_bracket part="2"/>
LARGE RIGHT SQUARE BRACKETPART3:251:unknown:<right_square_bracket part="3"/>
LARGE RIGHT BRACKET PART1:252:unknown:<right_bracket part="1"/>
LARGE RIGHT BRACKETPART2:253:unknown:<right_bracket part="2"/>
LARGE RIGHT BRACKETPART3:254:unknown:<right_bracket part="3"/>
DOUBLE ACUTE ACCENT:178:unknown:02DD
TRADEMARK SYMBOL:212:unknown:™
TRADEMARK SYMBOL:228:unknown:™
</SYMBOL_old>
<greek>
GREEK CAPITAL LETTER ALPHA:A:65:Α
GREEK CAPITAL LETTER BETA:B:66:Β
GREEK CAPITAL LETTER CHI:C:67:Χ
GREEK CAPITAL LETTER DELTA:D:68:Δ
GREEK CAPITAL LETTER EPSILON:E:69:Ε
GREEK CAPITAL LETTER PHI:F:70:Φ
GREEK CAPITAL LETTER GAMMA:G:71:Γ
GREEK CAPITAL LETTER ETA:H:72:Η
GREEK CAPITAL LETTER IOTA:I:73:Ι
GREEK THETA SYMBOL:J:74:ϑ
GREEK CAPITAL LETTER KAPPA:K:75:Κ
GREEK CAPITAL LETTER LAMDA:L:76:Λ
GREEK CAPITAL LETTER MU:M:77:Μ
GREEK CAPITAL LETTER NU:N:78:Ν
GREEK CAPITAL LETTER OMICRON:O:79:Ο
GREEK CAPITAL LETTER PI:P:80:Π
GREEK CAPITAL LETTER THETA:T:81:Θ
GREEK CAPITAL LETTER RHO:R:82:Ρ
GREEK CAPITAL LETTER SIGMA:S:83:Σ
GREEK CAPITAL LETTER TAU:T:84:Τ
GREEK CAPITAL LETTER UPSILON:U:85:Υ
GREEK SMALL LETTER FINAL SIGMA:V:86:Ϛ
GREEK CAPITAL LETTER OMEGA:W:87:Ω
GREEK CAPITAL LETTER XI:X:88:Ξ
GREEK CAPITAL LETTER PSI:Y:89:Ψ
GREEK CAPITAL LETTER ZETA:Z:90:Ζ
GREEK SMALL LETTER ALPHA:a:97:α
GREEK SMALL LETTER BETA:b:98:β
GREEK SMALL LETTER CHI:c:99:χ
GREEK SMALL LETTER DELTA:d:100:δ
GREEK SMALL LETTER EPSILON:e:101:ε
GREEK SMALL LETTER PHI:f:102:φ
GREEK SMALL LETTER GAMMA:g:103:γ
GREEK SMALL LETTER ETA:h:104:η
GREEK SMALL LETTER IOTA:i:105:ι
GREEK PHI SYMBOL:j:106:φ
GREEK SMALL LETTER KAPPA:k:107:κ
GREEK SMALL LETTER LAMDA:l:108:λ
GREEK SMALL LETTER MU:m:109:μ
GREEK SMALL LETTER NU:n:110:ν
GREEK SMALL LETTER OMICRON:o:111:ο
GREEK SMALL LETTER PI:p:112:π
GREEK SMALL LETTER THETA:q:113:θ
GREEK SMALL LETTER RHO:r:114:ρ
GREEK SMALL LETTER SIGMA:s:115:σ
GREEK SMALL LETTER TAU:t:116:τ
GREEK SMALL LETTER UPSILON:u:117:υ
GREEK PI SYMBOL:v:118:ϖ
GREEK SMALL LETTER OMEGA:w:119:ω
GREEK SMALL LETTER XI:x:120:ξ
GREEK SMALL LETTER PSI:y:121:ψ
GREEK SMALL LETTER ZETA:z:122:ζ
APROXTIMATELY EQUAL TO:@:unknown:≅
THERE EXISTS:$:unknown:2203
UP TACK:^:unknown:⊥
</greek>
<dingbats_old>
STAR OF DAVID:A:10017:✡
FOUR TEARDROP-SPOKED ASTERISK:B:10018:✢
FOUR BALLOON-SPOKED ASTERISK:C:10019:✣
HEAVY FOUR BALLOON-SPOKED ASTERISK:D:10020:✤
FOUR CLUB-SPOKED ASTERISK:E:10021:✥
BLACK FOUR POINTED STAR:F:10022:✦
WHITE FOUR POINTED STAR:G:10023:✧
BLACK STAR:H:9989:✅
STRESS OUTLINED WHITE STAR:I:10025:✩
CIRCLED WHITE STAR:J:10026:✪
OPEN CENTRE BLACK STAR:K:10027:✫
BLACK CENTRE WHITE STAR:L:10028:✬
OUTLINED BLACK STAR:M:10029:✭
HEAVY OUTLINED BLACK STAR:N:10030:✮
PINWHEEL STAR:O:10031:✯
SHADOWED WHITE STAR:P:10032:✰
HEAVY ASTERISK:Q:10033:✱
OPEN CENTRE ASTERISK:R:10034:✲
EIGHT SPOKED ASTERISK:S:10035:✳
EIGHT POINTED BLACK STAR:T:10036:✴
EIGHT POINTED PINWHEEL STAR:U:10037:✵
SIX POINTED BLACK STAR:V:10038:✶
EIGHT POINTED RECTILINEAR BLACK STAR:W:10039:✷
HEAVY EIGHT POINTED RECTILINEAR BLACK STAR:X:10040:✸
TWELVE POINTED BLACK STAR:Y:10041:✹
SIXTEEN POINTED ASTERISK:Z:10042:✺
EIGHT PETALLED OUTLINED BLACK FLORETTE:a:10049:❁
CIRCLED OPEN CENTRE EIGHT POINTED STAR:b:10050:❂
HEAVY TEARDROP-SPOKED PINWHEEL ASTERISK:c:10051:❃
SNOWFLAKE:d:10052:❄
TIGHT TRIFOLIATE SNOWFLAKE:e:10053:❅
HEAVY CHEVRON SNOWFLAKE:f:10054:❆
SPARKLE:g:10055:❇
HEAVY SPARKLE:h:10056:❈
BALLOON-SPOKED ASTERISK:i:10057:❉
TEARDROP-SPOKED ASTERISK:j:10043:✻
HEAVY TEARDROP-SPOKED ASTERISK:k:10045:✽
BLACK CIRCLE:l:9679:●
SHADOWED WHITE CIRCLE:m:10061:❍
BLACK SQUARE:n:9632:■
LOWER RIGHT DROP-SHADOWED SQUARE:o:10063:❏
UPPER RIGHT DROP-SHADOWED WHITE SQUARE:p:10064:❐
LOWER RIGHT SHADOWED SQUARE:q:10065:❑
UPPER RIGHT SHADOWED WHITE SQUARE:r:10066:❒
BLACK UP-POINTING TRIANGLE:s:9650:▲
BLACK DOWN-POINTING TRIANGLE:t:9660:▼
BLACK DIAMOND:u:9670:◆
BLACK DIAMOND MINUS WHITE X:v:10070:❖
RIGHT HALF BLACK CIRCLE:w:9479:┇
LIGHT VERTICAL BAR:x:10072:❘
MEDIUM VERTICAL BAR:y:10073:❙
HEAVY VERTICAL BAR:z:10074:❚
WHITE NIB:1:10001:✑
BLACK NIB:2:10002:✒
CHECKMARK:3:10003:✓
HEAVY CHECKMARK:4:10004:✔
MULTIPLICATION X:5:10005:✕
HEAVY MULTIPLICATION X:6:10006:✖
BALLOT X:7:10007:✗
HEAVY BALLOT X:8:10008:✘
OUTLINED GREEK CROSS:9:10009:✙
UPPER RIGHT PENCIL:0:10000:✐
UPPER BLADE SCISSORS:!:9985:✁
MALTESE CROSS:@:10016:✠
LOWER BLADE SCISSORS:#:9987:✃
WHITE SCISSORS:$:9988:✄
BLACK TELEPHONE:%:9742:☎
SIX PETALLED BLACK AND WHITE FLORETTE:^:10046:✾
TELEPHONE LOCATION SIGN:&:9990:✆
BLACK RIGHT POINTING INDEX:*:9755:☛
AIRPLANE:(:9992:✈
ENVELOPE:):9993:✉
HEAVY GREK CROSS:\\colon:10010:✚
OUTLINED LATIN CROSS:?:10015:✟
PENCIL:/:9999:✏
OPEN CENTRE TEARDROP-SPOKED ASTERISK:\\:10044:✼
WHITE RIGHT POINTING INDEX:+:9758:☞
WRITING HAND:-:9997:✍
LATIN CROSS:=:10013:✝
HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT:~:10078:❞
</dingbats_old>
<ms_set_old>
LEFT DOUBLE QUOTATION MARK:LDBLQUOTE :8220:“
RIGHT DOUBLE QUOTATION MARK:RDBLQUOTE :8221:”
RIGHT SINGLE QUOTATION MARK:RQUOTE :8217:’
LEFT SINGLE QUOTATION MARK:LQUOTE :8216:‘
EM DASH:EMDASH :8212:—
EN DASH:ENDASH :8211:–
MIDDLE DOT:BULLET :183:·
NO-BREAK SPACE:~ :167:§
HORIZONTAL TABULATION:TAB :9:	
</ms_set_old>
<single_set>
NULL:\xef:0:&#xnull;
</single_set>
</unused>
"""
| 705,807 | Python | .py | 16,708 | 41.243656 | 160 | 0.795456 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,313 | fields_large.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/fields_large.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy, field_strings
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class FieldsLarge:
r"""
=========================
Logic
=========================
Make tags for fields.
-Fields reflect text that Microsoft Word automatically generates.
-Each file contains (or should contain) an inner group called field instructions.
-Fields can be nested.
--------------
Logic
--------------
1. As soon as a field is found, make a new text string by appending an empty
text string to the field list. Collect all the lines in this string until the
field instructions are found.
2. Collect all the tokens and text in the field instructions. When the end of
the field instructions is found, process the string of text with the
field_strings module. Append the processed string to the field instructins
list.
3. Continue collecting tokens. Check for paragraphs or sections. If either is found, add to the paragraph or section list.
4. Continue collecting tokens and text either the beginning of a new field is found, or the end of this field is found.
5. If a new field is found, repeat steps 1-3.
6. If the end of the field is found, process the last text string of the field list.
7. If the field list is empty (after removing the last text string), there are
no more fields. Print out the final string. If the list contains other strings,
add the processed string to the last string in the field list.
============================
Examples
============================
This line of RTF:
{\field{\*\fldinst { CREATEDATE \\* MERGEFORMAT }}{\fldrslt {
\lang1024 1/11/03 10:34 PM}}}
Becomes:
<field type = "insert-time">
10:34 PM
</field>
The simple field in the above example contains no paragraph or sections breaks.
This line of RTF:
{{\field{\*\fldinst SYMBOL 97 \\f "Symbol" \\s 12}{\fldrslt\f3\fs24}}}
Becomes:
<para><inline font-size="18"><inline font-style="Symbol">Χ</inline></inline></para>
The RTF in the example above should be represented as UTF-8 rather than a field.
This RTF:
{\field\fldedit{\*\fldinst { TOC \\o "1-3" }}{\fldrslt {\lang1024
Heading one\tab }{\field{\*\fldinst {\lang1024 PAGEREF _Toc440880424
\\h }{\lang1024 {\*\datafield
{\lang1024 1}}}{\lang1024 \par }\pard\plain
\s18\li240\widctlpar\tqr\tldot\tx8630\aspalpha\aspnum\faauto\adjustright\rin0\lin240\itap0
\f4\lang1033\cgrid {\lang1024 Heading 2\tab }{\field{\*\fldinst
{\lang1024 PAGEREF _Toc440880425 \\h }{\lang1024 {\*\datafield
{\lang1024 1}}}{\lang1024 \par }\pard\plain
\widctlpar\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0
\f4\lang1033\cgrid }}\pard\plain
\widctlpar\aspalpha\aspnum\faauto\adjustright\rin0\lin0\itap0
\f4\lang1033\cgrid {\fs28 \\u214\'85 \par }{\fs36 {\field{\*\fldinst
SYMBOL 67 \\f "Symbol" \\s 18}{\fldrslt\f3\fs36}}}
Becomes:
<field-block type="table-of-contents">
<paragraph-definition language="1033" nest-level="0"
font-style="Times" name="toc 1" adjust-right="true"
widow-control="true">
<para><inline language="1024">Heading one	</inline><field
type="reference-to-page" ref="_Toc440880424"><inline
language="1024">1</inline></field></para>
</paragraph-definition>
<paragraph-definition language="1033" nest-level="0" left-indent="12"
font-style="Times" name="toc 2" adjust-right="true"
widow-control="true">
<para><inline language="1024">Heading 2	</inline><field
type="reference-to-page" ref="_Toc440880425"><inline
language="1024">1</inline></field></para>
</paragraph-definition>
</field-block>
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__text_string = ''
self.__field_instruction_string = ''
self.__marker = 'mi<mk<inline-fld\n'
self.__state = 'before_body'
self.__string_obj = field_strings.FieldStrings(run_level=self.__run_level,
bug_handler=self.__bug_handler,)
self.__state_dict = {
'before_body' : self.__before_body_func,
'in_body' : self.__in_body_func,
'field' : self.__in_field_func,
'field_instruction' : self.__field_instruction_func,
}
self.__in_body_dict = {
'cw<fd<field_____' : self.__found_field_func,
}
self.__field_dict = {
'cw<fd<field-inst' : self.__found_field_instruction_func,
'cw<fd<field_____' : self.__found_field_func,
'cw<pf<par-end___' : self.__par_in_field_func,
'cw<sc<section___' : self.__sec_in_field_func,
}
self.__field_count = [] # keep track of the brackets
self.__field_instruction = [] # field instruction strings
self.__symbol = 0 # whether or not the field is really UTF-8
# (these fields cannot be nested.)
self.__field_instruction_string = '' # string that collects field instruction
self.__par_in_field = [] # paragraphs in field?
self.__sec_in_field = [] # sections in field?
self.__field_string = [] # list of field strings
def __before_body_func(self, line):
"""
Required:
line --line ro parse
Returns:
nothing (changes an instant and writes a line)
Logic:
Check for the beginninf of the body. If found, changed the state.
Always write out the line.
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'in_body'
self.__write_obj.write(line)
def __in_body_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing. (Writes a line to the output file, or performs other actions.)
Logic:
Check of the beginning of a field. Always output the line.
"""
action = self.__in_body_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __found_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Set the values for parsing the field. Four lists have to have
items appended to them.
"""
self.__state = 'field'
self.__cb_count = 0
ob_count = self.__ob_count
self.__field_string.append('')
self.__field_count.append(ob_count)
self.__sec_in_field.append(0)
self.__par_in_field.append(0)
def __in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing.
Logic:
Check for the end of the field; a paragraph break; a section break;
the beginning of another field; or the beginning of the field
instruction.
"""
if self.__cb_count == self.__field_count[-1]:
self.__field_string[-1] += line
self.__end_field_func()
else:
action = self.__field_dict.get(self.__token_info)
if action:
action(line)
else:
self.__field_string[-1] += line
def __par_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Write the line to the output file and set the last item in the
paragraph in field list to true.
"""
self.__field_string[-1] += line
self.__par_in_field[-1] = 1
def __sec_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Write the line to the output file and set the last item in the
section in field list to true.
"""
self.__field_string[-1] += line
self.__sec_in_field[-1] = 1
def __found_field_instruction_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Change the state to field instruction. Set the open bracket count of
the beginning of this field so you know when it ends. Set the closed
bracket count to 0 so you don't prematureley exit this state.
"""
self.__state = 'field_instruction'
self.__field_instruction_count = self.__ob_count
self.__cb_count = 0
def __field_instruction_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Collect all the lines until the end of the field is reached.
Process these lines with the module rtr.field_strings.
Check if the field instruction is 'Symbol' (really UTF-8).
"""
if self.__cb_count == self.__field_instruction_count:
# The closing bracket should be written, since the opening bracket
# was written
self.__field_string[-1] += line
my_list = self.__string_obj.process_string(
self.__field_instruction_string, 'field_instruction')
instruction = my_list[2]
self.__field_instruction.append(instruction)
if my_list[0] == 'Symbol':
self.__symbol = 1
self.__state = 'field'
self.__field_instruction_string = ''
else:
self.__field_instruction_string += line
def __end_field_func(self):
"""
Requires:
nothing
Returns:
Nothing
Logic:
Pop the last values in the instructions list, the fields list, the
paragraph list, and the section list.
If the field is a symbol, do not write the tags <field></field>,
since this field is really just UTF-8.
If the field contains paragraph or section breaks, it is a
field-block rather than just a field.
Write the paragraph or section markers for later parsing of the
file.
If the filed list contains more strings, add the latest
(processed) string to the last string in the list. Otherwise,
write the string to the output file.
"""
last_bracket = self.__field_count.pop()
instruction = self.__field_instruction.pop()
inner_field_string = self.__field_string.pop()
sec_in_field = self.__sec_in_field.pop()
par_in_field = self.__par_in_field.pop()
# add a closing bracket, since the closing bracket is not included in
# the field string
if self.__symbol:
inner_field_string = '%scb<nu<clos-brack<%s\n' % \
(instruction, last_bracket)
elif sec_in_field or par_in_field:
inner_field_string = \
'mi<mk<fldbkstart\n'\
'mi<tg<open-att__<field-block<type>%s\n%s'\
'mi<mk<fldbk-end_\n' \
'mi<tg<close_____<field-block\n'\
'mi<mk<fld-bk-end\n' \
% (instruction, inner_field_string)
# write a marker to show an inline field for later parsing
else:
inner_field_string = \
'%s' \
'mi<tg<open-att__<field<type>%s\n%s'\
'mi<tg<close_____<field\n'\
% (self.__marker, instruction, inner_field_string)
if sec_in_field:
inner_field_string = 'mi<mk<sec-fd-beg\n' + inner_field_string + \
'mi<mk<sec-fd-end\n'
if par_in_field:
inner_field_string = 'mi<mk<par-in-fld\n' + inner_field_string
if len(self.__field_string) == 0:
self.__write_field_string(inner_field_string)
else:
self.__field_string[-1] += inner_field_string
self.__symbol = 0
def __write_field_string(self, the_string):
self.__state = 'in_body'
self.__write_obj.write(the_string)
def fix_fields(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the body, look for the
beginning of the body.
If the state is body, send the line to the body method.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('no no matching state in module styles.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "fields_large.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 15,246 | Python | .py | 363 | 32.809917 | 122 | 0.556505 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,314 | override_table.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/override_table.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
class OverrideTable:
"""
Parse a line of text to make the override table. Return a string
(which will convert to XML) and the dictionary containing all the
information about the lists. This dictionary is the result of the
dictionary that is first passed to this module. This module
modifies the dictionary, assigning lists numbers to each list.
"""
def __init__(
self,
list_of_lists,
run_level=1,
):
self.__list_of_lists = list_of_lists
self.__initiate_values()
self.__run_level = run_level
def __initiate_values(self):
self.__override_table_final = ''
self.__state = 'default'
self.__override_list = []
self.__state_dict = {
'default' : self.__default_func,
'override' : self.__override_func,
'unsure_ob' : self.__after_bracket_func,
}
self.__override_dict = {
'cw<ls<lis-tbl-id' : 'list-table-id',
'cw<ls<list-id___' : 'list-id',
}
def __override_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
The group {\\override has been found.
Check for the end of the group.
Otherwise, add appropriate tokens to the override dictionary.
"""
if self.__token_info == 'cb<nu<clos-brack' and\
self.__cb_count == self.__override_ob_count:
self.__state = 'default'
self.__parse_override_dict()
else:
att = self.__override_dict.get(self.__token_info)
if att:
value = line[20:]
self.__override_list[-1][att] = value
def __parse_override_dict(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
The list of all information about RTF lists has been passed to
this module. As of this point, this python list has no id number,
which is needed later to identify which lists in the body should
be assigned which formatting commands from the list-table.
In order to get an id, I have to check to see when the list-table-id
from the override_dict (generated in this module) matches the list-table-id
in list_of_lists (generated in the list_table.py module). When a match is found,
append the lists numbers to the self.__list_of_lists dictionary
that contains the empty lists:
[[{list-id:[HERE!],[{}]]
This is a list, since one list in the table in the preamble of RTF can
apply to multiple lists in the body.
"""
override_dict = self.__override_list[-1]
list_id = override_dict.get('list-id')
if list_id is None and self.__level > 3:
msg = 'This override does not appear to have a list-id\n'
raise self.__bug_handler(msg)
current_table_id = override_dict.get('list-table-id')
if current_table_id is None and self.__run_level > 3:
msg = 'This override does not appear to have a list-table-id\n'
raise self.__bug_handler(msg)
counter = 0
for list in self.__list_of_lists:
info_dict = list[0]
old_table_id = info_dict.get('list-table-id')
if old_table_id == current_table_id:
self.__list_of_lists[counter][0]['list-id'].append(list_id)
break
counter += 1
def __parse_lines(self, line):
"""
Requires:
line --ine to parse
Returns:
nothing
Logic:
Break the into tokens by splitting it on the newline.
Call on the method according to the state.
"""
lines = line.split('\n')
self.__ob_count = 0
self.__ob_group = 0
for line in lines:
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-4:]
self.__ob_group += 1
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-4:]
self.__ob_group -= 1
action = self.__state_dict.get(self.__state)
if action is None:
print(self.__state)
action(line)
self.__write_final_string()
# self.__add_to_final_line()
def __default_func(self, line):
"""
Requires:
line -- line to parse
Return:
nothing
Logic:
Look for an open bracket and change states when found.
"""
if self.__token_info == 'ob<nu<open-brack':
self.__state = 'unsure_ob'
def __after_bracket_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
The last token was an open bracket. You need to determine
the group based on the token after.
WARNING: this could cause problems. If no group is found, the
state will remain unsure_ob, which means no other text will be
parsed. I should do states by a list and simply pop this
unsure_ob state to get the previous state.
"""
if self.__token_info == 'cw<ls<lis-overid':
self.__state = 'override'
self.__override_ob_count = self.__ob_count
the_dict = {}
self.__override_list.append(the_dict)
elif self.__run_level > 3:
msg = 'No matching token after open bracket\n'
msg += 'token is "%s\n"' % (line)
raise self.__bug_handler(msg)
def __write_final_string(self):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
First write out the override-table tag.
Iteratere through the dictionaries in the main override_list.
For each dictionary, write an empty tag "override-list". Add
the attributes and values of the tag from the dictionary.
"""
self.__override_table_final = 'mi<mk<over_beg_\n'
self.__override_table_final += 'mi<tg<open______<override-table\n' + \
'mi<mk<overbeg__\n' + self.__override_table_final
for the_dict in self.__override_list:
self.__override_table_final += 'mi<tg<empty-att_<override-list'
the_keys = the_dict.keys()
for the_key in the_keys:
self.__override_table_final += \
f'<{the_key}>{the_dict[the_key]}'
self.__override_table_final += '\n'
self.__override_table_final += '\n'
self.__override_table_final += \
'mi<mk<overri-end\n' + 'mi<tg<close_____<override-table\n'
self.__override_table_final += 'mi<mk<overribend_\n'
def parse_override_table(self, line):
"""
Requires:
line -- line with border definition in it
Returns:
A string that will be converted to XML, and a dictionary of
all the properties of the RTF lists.
Logic:
"""
self.__parse_lines(line)
return self.__override_table_final, self.__list_of_lists
| 8,339 | Python | .py | 196 | 32.081633 | 92 | 0.514019 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,315 | input.py | kovidgoyal_calibre/src/calibre/ebooks/rtf/input.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
from lxml import etree
class InlineClass(etree.XSLTExtension):
FMTS = ('italics', 'bold', 'strike-through', 'small-caps')
def __init__(self, log):
etree.XSLTExtension.__init__(self)
self.log = log
self.font_sizes = []
self.colors = []
def execute(self, context, self_node, input_node, output_parent):
classes = ['none']
for x in self.FMTS:
if input_node.get(x, None) == 'true':
classes.append(x)
# underlined is special
if input_node.get('underlined', 'false') != 'false':
classes.append('underlined')
fs = input_node.get('font-size', False)
if fs:
if fs not in self.font_sizes:
self.font_sizes.append(fs)
classes.append('fs%d'%self.font_sizes.index(fs))
fc = input_node.get('font-color', False)
if fc:
if fc not in self.colors:
self.colors.append(fc)
classes.append('col%d'%self.colors.index(fc))
output_parent.text = ' '.join(classes)
| 1,168 | Python | .py | 29 | 30.862069 | 69 | 0.571176 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,316 | preprocess.py | kovidgoyal_calibre/src/calibre/ebooks/rtf/preprocess.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Gerendi Sandor Attila'
__docformat__ = 'restructuredtext en'
"""
RTF tokenizer and token parser. v.1.0 (1/17/2010)
Author: Gerendi Sandor Attila
At this point this will tokenize a RTF file then rebuild it from the tokens.
In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compliant.
"""
class tokenDelimitatorStart():
def __init__(self):
pass
def toRTF(self):
return '{'
def __repr__(self):
return '{'
class tokenDelimitatorEnd():
def __init__(self):
pass
def toRTF(self):
return '}'
def __repr__(self):
return '}'
class tokenControlWord():
def __init__(self, name, separator=''):
self.name = name
self.separator = separator
def toRTF(self):
return self.name + self.separator
def __repr__(self):
return self.name + self.separator
class tokenControlWordWithNumericArgument():
def __init__(self, name, argument, separator=''):
self.name = name
self.argument = argument
self.separator = separator
def toRTF(self):
return self.name + repr(self.argument) + self.separator
def __repr__(self):
return self.name + repr(self.argument) + self.separator
class tokenControlSymbol():
def __init__(self, name):
self.name = name
def toRTF(self):
return self.name
def __repr__(self):
return self.name
class tokenData():
def __init__(self, data):
self.data = data
def toRTF(self):
return self.data
def __repr__(self):
return self.data
class tokenBinN():
def __init__(self, data, separator=''):
self.data = data
self.separator = separator
def toRTF(self):
return "\\bin" + repr(len(self.data)) + self.separator + self.data
def __repr__(self):
return "\\bin" + repr(len(self.data)) + self.separator + self.data
class token8bitChar():
def __init__(self, data):
self.data = data
def toRTF(self):
return "\\'" + self.data
def __repr__(self):
return "\\'" + self.data
class tokenUnicode():
def __init__(self, data, separator='', current_ucn=1, eqList=[]):
self.data = data
self.separator = separator
self.current_ucn = current_ucn
self.eqList = eqList
def toRTF(self):
result = '\\u' + repr(self.data) + ' '
ucn = self.current_ucn
if len(self.eqList) < ucn:
ucn = len(self.eqList)
result = tokenControlWordWithNumericArgument('\\uc', ucn).toRTF() + result
i = 0
for eq in self.eqList:
if i >= ucn:
break
result = result + eq.toRTF()
return result
def __repr__(self):
return '\\u' + repr(self.data)
def isAsciiLetter(value):
return ((value >= 'a') and (value <= 'z')) or ((value >= 'A') and (value <= 'Z'))
def isDigit(value):
return (value >= '0') and (value <= '9')
def isChar(value, char):
return value == char
def isString(buffer, string):
return buffer == string
class RtfTokenParser():
def __init__(self, tokens):
self.tokens = tokens
self.process()
self.processUnicode()
def process(self):
i = 0
newTokens = []
while i < len(self.tokens):
if isinstance(self.tokens[i], tokenControlSymbol):
if isString(self.tokens[i].name, "\\'"):
i = i + 1
if not isinstance(self.tokens[i], tokenData):
raise Exception('Error: token8bitChar without data.')
if len(self.tokens[i].data) < 2:
raise Exception('Error: token8bitChar without data.')
newTokens.append(token8bitChar(self.tokens[i].data[0:2]))
if len(self.tokens[i].data) > 2:
newTokens.append(tokenData(self.tokens[i].data[2:]))
i = i + 1
continue
newTokens.append(self.tokens[i])
i = i + 1
self.tokens = list(newTokens)
def processUnicode(self):
i = 0
newTokens = []
ucNbStack = [1]
while i < len(self.tokens):
if isinstance(self.tokens[i], tokenDelimitatorStart):
ucNbStack.append(ucNbStack[len(ucNbStack) - 1])
newTokens.append(self.tokens[i])
i = i + 1
continue
if isinstance(self.tokens[i], tokenDelimitatorEnd):
ucNbStack.pop()
newTokens.append(self.tokens[i])
i = i + 1
continue
if isinstance(self.tokens[i], tokenControlWordWithNumericArgument):
if isString(self.tokens[i].name, '\\uc'):
ucNbStack[len(ucNbStack) - 1] = self.tokens[i].argument
newTokens.append(self.tokens[i])
i = i + 1
continue
if isString(self.tokens[i].name, '\\u'):
x = i
j = 0
i = i + 1
replace = []
partialData = None
ucn = ucNbStack[len(ucNbStack) - 1]
while (i < len(self.tokens)) and (j < ucn):
if isinstance(self.tokens[i], tokenDelimitatorStart):
break
if isinstance(self.tokens[i], tokenDelimitatorEnd):
break
if isinstance(self.tokens[i], tokenData):
if len(self.tokens[i].data) >= ucn - j:
replace.append(tokenData(self.tokens[i].data[0 : ucn - j]))
if len(self.tokens[i].data) > ucn - j:
partialData = tokenData(self.tokens[i].data[ucn - j:])
i = i + 1
break
else:
replace.append(self.tokens[i])
j = j + len(self.tokens[i].data)
i = i + 1
continue
if isinstance(self.tokens[i], token8bitChar) or isinstance(self.tokens[i], tokenBinN):
replace.append(self.tokens[i])
i = i + 1
j = j + 1
continue
raise Exception('Error: incorrect utf replacement.')
# calibre rtf2xml does not support utfreplace
replace = []
newTokens.append(tokenUnicode(self.tokens[x].argument, self.tokens[x].separator, ucNbStack[len(ucNbStack) - 1], replace))
if partialData is not None:
newTokens.append(partialData)
continue
newTokens.append(self.tokens[i])
i = i + 1
self.tokens = list(newTokens)
def toRTF(self):
result = []
for token in self.tokens:
result.append(token.toRTF())
return "".join(result)
class RtfTokenizer():
def __init__(self, rtfData):
self.rtfData = []
self.tokens = []
self.rtfData = rtfData
self.tokenize()
def tokenize(self):
i = 0
lastDataStart = -1
while i < len(self.rtfData):
if isChar(self.rtfData[i], '{'):
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
self.tokens.append(tokenDelimitatorStart())
i = i + 1
continue
if isChar(self.rtfData[i], '}'):
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
self.tokens.append(tokenDelimitatorEnd())
i = i + 1
continue
if isChar(self.rtfData[i], '\\'):
if i + 1 >= len(self.rtfData):
raise Exception('Error: Control character found at the end of the document.')
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
tokenStart = i
i = i + 1
# Control Words
if isAsciiLetter(self.rtfData[i]):
# consume <ASCII Letter Sequence>
consumed = False
while i < len(self.rtfData):
if not isAsciiLetter(self.rtfData[i]):
tokenEnd = i
consumed = True
break
i = i + 1
if not consumed:
raise Exception('Error (at:%d): Control Word without end.'%(tokenStart))
# we have numeric argument before delimiter
if isChar(self.rtfData[i], '-') or isDigit(self.rtfData[i]):
# consume the numeric argument
consumed = False
l = 0
while i < len(self.rtfData):
if not isDigit(self.rtfData[i]):
consumed = True
break
l = l + 1
i = i + 1
if l > 10 :
raise Exception('Error (at:%d): Too many digits in control word numeric argument.'%[tokenStart])
if not consumed:
raise Exception('Error (at:%d): Control Word without numeric argument end.'%[tokenStart])
separator = ''
if isChar(self.rtfData[i], ' '):
separator = ' '
controlWord = self.rtfData[tokenStart: tokenEnd]
if tokenEnd < i:
value = int(self.rtfData[tokenEnd: i])
if isString(controlWord, "\\bin"):
i = i + value
self.tokens.append(tokenBinN(self.rtfData[tokenStart:i], separator))
else:
self.tokens.append(tokenControlWordWithNumericArgument(controlWord, value, separator))
else:
self.tokens.append(tokenControlWord(controlWord, separator))
# space delimiter, we should discard it
if self.rtfData[i] == ' ':
i = i + 1
# Control Symbol
else:
self.tokens.append(tokenControlSymbol(self.rtfData[tokenStart : i + 1]))
i = i + 1
continue
if lastDataStart < 0:
lastDataStart = i
i = i + 1
def toRTF(self):
result = []
for token in self.tokens:
result.append(token.toRTF())
return "".join(result)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage %prog rtfFileToConvert")
sys.exit()
with open(sys.argv[1], 'rb') as f:
data = f.read()
tokenizer = RtfTokenizer(data)
parsedTokens = RtfTokenParser(tokenizer.tokens)
data = parsedTokens.toRTF()
with open(sys.argv[1], 'w') as f:
f.write(data)
| 11,884 | Python | .py | 287 | 26.832753 | 141 | 0.493698 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,317 | rtfml.py | kovidgoyal_calibre/src/calibre/ebooks/rtf/rtfml.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Transform OEB content into RTF markup
'''
import io
import os
import re
from binascii import hexlify
from lxml import etree
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import identify
from polyglot.builtins import string_or_bytes
TAGS = {
'b': '\\b',
'del': '\\deleted',
'h1': '\\s1 \\afs32',
'h2': '\\s2 \\afs28',
'h3': '\\s3 \\afs28',
'h4': '\\s4 \\afs23',
'h5': '\\s5 \\afs23',
'h6': '\\s6 \\afs21',
'i': '\\i',
'li': '\t',
'p': '\t',
'sub': '\\sub',
'sup': '\\super',
'u': '\\ul',
}
SINGLE_TAGS = {
'br': '\n{\\line }\n',
}
STYLES = [
('font-weight', {'bold': '\\b', 'bolder': '\\b'}),
('font-style', {'italic': '\\i'}),
('text-align', {'center': '\\qc', 'left': '\\ql', 'right': '\\qr'}),
('text-decoration', {'line-through': '\\strike', 'underline': '\\ul'}),
]
BLOCK_TAGS = [
'div',
'p',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'li',
]
BLOCK_STYLES = [
'block'
]
'''
TODO:
* Tables
* Fonts
'''
def txt2rtf(text):
# Escape { and } in the text.
text = text.replace('{', r'\'7b')
text = text.replace('}', r'\'7d')
text = text.replace('\\', r'\'5c')
if not isinstance(text, str):
return text
buf = io.StringIO()
for x in text:
val = ord(x)
if val == 160:
buf.write(r'\~')
elif val <= 127:
buf.write(x)
else:
# python2 and ur'\u' does not work
c = f'\\u{val:d}?'
buf.write(c)
return buf.getvalue()
class RTFMLizer:
def __init__(self, log):
self.log = log
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to RTF markup...')
self.oeb_book = oeb_book
self.opts = opts
return self.mlize_spine()
def mlize_spine(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
from calibre.utils.xml_parse import safe_xml_fromstring
output = self.header()
if 'titlepage' in self.oeb_book.guide:
href = self.oeb_book.guide['titlepage'].href
item = self.oeb_book.manifest.hrefs[href]
if item.spine_position is None:
stylizer = Stylizer(item.data, item.href, self.oeb_book,
self.opts, self.opts.output_profile)
self.currently_dumping_item = item
output += self.dump_text(item.data.find(XHTML('body')), stylizer)
output += r'{\page }'
for item in self.oeb_book.spine:
self.log.debug('Converting %s to RTF markup...' % item.href)
# Removing comments is needed as comments with -- inside them can
# cause fromstring() to fail
content = re.sub('<!--.*?-->', '', etree.tostring(item.data, encoding='unicode'), flags=re.DOTALL)
content = self.remove_newlines(content)
content = self.remove_tabs(content)
content = safe_xml_fromstring(content)
stylizer = Stylizer(content, item.href, self.oeb_book, self.opts, self.opts.output_profile)
self.currently_dumping_item = item
output += self.dump_text(content.find(XHTML('body')), stylizer)
output += r'{\page }'
output += self.footer()
output = self.insert_images(output)
output = self.clean_text(output)
return output
def remove_newlines(self, text):
self.log.debug('\tRemove newlines for processing...')
text = text.replace('\r\n', ' ')
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
return text
def remove_tabs(self, text):
self.log.debug('Replace tabs with space for processing...')
text = text.replace('\t', ' ')
return text
def header(self):
header = '{{\\rtf1{{\\info{{\\title {}}}{{\\author {}}}}}\\ansi\\ansicpg1252\\deff0\\deflang1033\n'.format(
self.oeb_book.metadata.title[0].value, authors_to_string([x.value for x in self.oeb_book.metadata.creator]))
return header + (
'{\\fonttbl{\\f0\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f1\\froman\\fprq2\\fcharset128 Times New Roman;}{\\f2\\fswiss\\fprq2\\fcharset128 Arial;}{\\f3\\fnil\\fprq2\\fcharset128 Arial;}{\\f4\\fnil\\fprq2\\fcharset128 MS Mincho;}{\\f5\\fnil\\fprq2\\fcharset128 Tahoma;}{\\f6\\fnil\\fprq0\\fcharset128 Tahoma;}}\n' # noqa
'{\\stylesheet{\\ql \\li0\\ri0\\nowidctlpar\\wrapdefault\\faauto\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\af25\\afs24\\alang1033 \\ltrch\\fcs0 \\fs24\\lang1033\\langfe255\\cgrid\\langnp1033\\langfenp255 \\snext0 Normal;}\n' # noqa
'{\\s1\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel0\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs32\\alang1033 \\ltrch\\fcs0 \\b\\fs32\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink21 heading 1;}\n' # noqa
'{\\s2\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel1\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\i\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink22 heading 2;}\n' # noqa
'{\\s3\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel2\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs28\\alang1033 \\ltrch\\fcs0 \\b\\fs28\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink23 heading 3;}\n' # noqa
'{\\s4\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel3\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\ai\\af0\\afs23\\alang1033 \\ltrch\\fcs0\\b\\i\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink24 heading 4;}\n' # noqa
'{\\s5\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel4\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs23\\alang1033 \\ltrch\\fcs0 \\b\\fs23\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink25 heading 5;}\n' # noqa
'{\\s6\\ql \\li0\\ri0\\sb240\\sa120\\keepn\\nowidctlpar\\wrapdefault\\faauto\\outlinelevel5\\rin0\\lin0\\itap0 \\rtlch\\fcs1 \\ab\\af0\\afs21\\alang1033 \\ltrch\\fcs0 \\b\\fs21\\lang1033\\langfe255\\loch\\f1\\hich\\af1\\dbch\\af26\\cgrid\\langnp1033\\langfenp255 \\sbasedon15 \\snext16 \\slink26 heading 6;}}\n' # noqa
)
def footer(self):
return ' }'
def insert_images(self, text):
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
for item in self.oeb_book.manifest:
if item.media_type in OEB_RASTER_IMAGES:
src = item.href
try:
data, width, height = self.image_to_hexstring(item.data)
except Exception:
self.log.exception('Image %s is corrupted, ignoring'%item.href)
repl = '\n\n'
else:
repl = '\n\n{\\*\\shppict{\\pict\\jpegblip\\picw%i\\pich%i \n%s\n}}\n\n' % (width, height, data)
text = text.replace('SPECIAL_IMAGE-%s-REPLACE_ME' % src, repl)
return text
def image_to_hexstring(self, data):
# Images must be hex-encoded in 128 character lines
data = save_cover_data_to(data)
width, height = identify(data)[1:]
lines = []
v = memoryview(data)
for i in range(0, len(data), 64):
lines.append(hexlify(v[i:i+64]))
hex_string = b'\n'.join(lines).decode('ascii')
return hex_string, width, height
def clean_text(self, text):
# Remove excessive newlines
text = re.sub('%s{3,}' % os.linesep, f'{os.linesep}{os.linesep}', text)
# Remove excessive spaces
text = re.sub('[ ]{2,}', ' ', text)
text = re.sub('\t{2,}', '\t', text)
text = re.sub('\t ', '\t', text)
# Remove excessive line breaks
text = re.sub(r'(\{\\line \}\s*){3,}', r'{\\line }{\\line }', text)
# Remove non-breaking spaces
text = text.replace('\xa0', ' ')
text = text.replace('\n\r', '\n')
return text
def dump_text(self, elem, stylizer, tag_stack=[]):
from calibre.ebooks.oeb.base import XHTML_NS, barename, namespace, urlnormalize
if not isinstance(elem.tag, string_or_bytes) \
or namespace(elem.tag) != XHTML_NS:
p = elem.getparent()
if p is not None and isinstance(p.tag, string_or_bytes) and namespace(p.tag) == XHTML_NS \
and elem.tail:
return elem.tail
return ''
text = ''
style = stylizer.style(elem)
if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \
or style['visibility'] == 'hidden':
if hasattr(elem, 'tail') and elem.tail:
return elem.tail
return ''
tag = barename(elem.tag)
tag_count = 0
# Are we in a paragraph block?
if tag in BLOCK_TAGS or style['display'] in BLOCK_STYLES:
if 'block' not in tag_stack:
tag_count += 1
tag_stack.append('block')
# Process tags that need special processing and that do not have inner
# text. Usually these require an argument
if tag == 'img':
src = elem.get('src')
if src:
src = urlnormalize(self.currently_dumping_item.abshref(src))
block_start = ''
block_end = ''
if 'block' not in tag_stack:
block_start = r'{\par\pard\hyphpar '
block_end = '}'
text += f'{block_start} SPECIAL_IMAGE-{src}-REPLACE_ME {block_end}'
single_tag = SINGLE_TAGS.get(tag, None)
if single_tag:
text += single_tag
rtf_tag = TAGS.get(tag, None)
if rtf_tag and rtf_tag not in tag_stack:
tag_count += 1
text += '{%s\n' % rtf_tag
tag_stack.append(rtf_tag)
# Processes style information
for s in STYLES:
style_tag = s[1].get(style[s[0]], None)
if style_tag and style_tag not in tag_stack:
tag_count += 1
text += '{%s\n' % style_tag
tag_stack.append(style_tag)
# Process tags that contain text.
if hasattr(elem, 'text') and elem.text:
text += txt2rtf(elem.text)
for item in elem:
text += self.dump_text(item, stylizer, tag_stack)
for i in range(0, tag_count):
end_tag = tag_stack.pop()
if end_tag != 'block':
if tag in BLOCK_TAGS:
text += r'\par\pard\plain\hyphpar}'
else:
text += '}'
if hasattr(elem, 'tail') and elem.tail:
if 'block' in tag_stack:
text += '%s' % txt2rtf(elem.tail)
else:
text += r'{\par\pard\hyphpar %s}' % txt2rtf(elem.tail)
return text
| 11,622 | Python | .py | 246 | 37.678862 | 340 | 0.569701 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,318 | metadata.py | kovidgoyal_calibre/src/calibre/ebooks/chm/metadata.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs
import re
from calibre import force_unicode
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata import MetaInformation, string_to_authors
from calibre.ptempfile import TemporaryFile
from calibre.utils.logging import default_log
from polyglot.builtins import iterkeys
def _clean(s):
return s.replace('\u00a0', ' ')
def _detag(tag):
ans = ""
if tag is None:
return ans
for elem in tag:
if hasattr(elem, "contents"):
ans += _detag(elem)
else:
ans += _clean(elem)
return ans
def _metadata_from_table(soup, searchfor):
td = soup.find('td', text=re.compile(searchfor, flags=re.I))
if td is None:
return None
td = td.parent
# there appears to be multiple ways of structuring the metadata
# on the home page. cue some nasty special-case hacks...
if re.match(r'^\s*'+searchfor+r'\s*$', td.decode_contents(), flags=re.I):
meta = _detag(td.findNextSibling('td'))
return re.sub('^:', '', meta).strip()
else:
meta = _detag(td)
return re.sub(r'^[^:]+:', '', meta).strip()
def _metadata_from_span(soup, searchfor):
span = soup.find('span', {'class': re.compile(searchfor, flags=re.I)})
if span is None:
return None
# this metadata might need some cleaning up still :/
return _detag(span.decode_contents().strip())
def _get_authors(soup):
aut = (_metadata_from_span(soup, r'author') or _metadata_from_table(soup, r'^\s*by\s*:?\s+'))
ans = [_('Unknown')]
if aut is not None:
ans = string_to_authors(aut)
return ans
def _get_publisher(soup):
return (_metadata_from_span(soup, 'imprint') or _metadata_from_table(soup, 'publisher'))
def _get_isbn(soup):
return (_metadata_from_span(soup, 'isbn') or _metadata_from_table(soup, 'isbn'))
def _get_comments(soup):
date = (_metadata_from_span(soup, 'cwdate') or _metadata_from_table(soup, 'pub date'))
pages = (_metadata_from_span(soup, 'pages') or _metadata_from_table(soup, 'pages'))
try:
# date span can have copyright symbols in it...
date = date.replace('\u00a9', '').strip()
# and pages often comes as '(\d+ pages)'
pages = re.search(r'\d+', pages).group(0)
return f'Published {date}, {pages} pages.'
except:
pass
return None
def _get_cover(soup, rdr):
ans = None
try:
ans = soup.find('img', alt=re.compile('cover', flags=re.I))['src']
except TypeError:
# meeehh, no handy alt-tag goodness, try some hackery
# the basic idea behind this is that in general, the cover image
# has a height:width ratio of ~1.25, whereas most of the nav
# buttons are decidedly less than that.
# what we do in this is work out that ratio, take 1.25 off it and
# save the absolute value when we sort by this value, the smallest
# one is most likely to be the cover image, hopefully.
r = {}
for img in soup('img'):
try:
r[abs(float(re.search(r'[0-9.]+',
img['height']).group())/float(re.search(r'[0-9.]+',
img['width']).group())-1.25)] = img['src']
except KeyError:
# interestingly, occasionally the only image without height
# or width attrs is the cover...
r[0] = img['src']
except:
# Probably invalid width, height aattributes, ignore
continue
if r:
l = sorted(iterkeys(r))
ans = r[l[0]]
# this link comes from the internal html, which is in a subdir
if ans is not None:
try:
ans = rdr.GetFile(ans)
except:
ans = rdr.root + "/" + ans
try:
ans = rdr.GetFile(ans)
except:
ans = None
if ans is not None:
import io
from PIL import Image
buf = io.BytesIO()
try:
Image.open(io.BytesIO(ans)).convert('RGB').save(buf, 'JPEG')
ans = buf.getvalue()
except:
ans = None
return ans
def get_metadata_from_reader(rdr):
raw = rdr.get_home()
home = BeautifulSoup(xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True)[0])
title = rdr.title
try:
x = rdr.GetEncoding()
codecs.lookup(x)
enc = x
except:
enc = 'cp1252'
title = force_unicode(title, enc)
authors = _get_authors(home)
mi = MetaInformation(title, authors)
publisher = _get_publisher(home)
if publisher:
mi.publisher = publisher
isbn = _get_isbn(home)
if isbn:
mi.isbn = isbn
comments = _get_comments(home)
if comments:
mi.comments = comments
cdata = _get_cover(home, rdr)
if cdata is not None:
mi.cover_data = ('jpg', cdata)
return mi
def get_metadata(stream):
with TemporaryFile('_chm_metadata.chm') as fname:
with open(fname, 'wb') as f:
f.write(stream.read())
from calibre.ebooks.chm.reader import CHMReader
rdr = CHMReader(fname, default_log)
return get_metadata_from_reader(rdr)
| 5,494 | Python | .py | 148 | 29.351351 | 97 | 0.600828 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,319 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/chm/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Used for chm input
'''
| 172 | Python | .py | 7 | 23.142857 | 56 | 0.679012 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,320 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/chm/reader.py | ''' CHM File decoding support '''
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>,' \
' and Alex Bramley <a.bramley at gmail.com>.'
import codecs
import os
import re
import struct
from chm.chm import CHMFile, chmlib
from calibre import guess_type as guess_mimetype
from calibre.constants import filesystem_encoding, iswindows
from calibre.ebooks.BeautifulSoup import BeautifulSoup, NavigableString
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata.toc import TOC
from polyglot.builtins import as_unicode
def match_string(s1, s2_already_lowered):
if s1 is not None and s2_already_lowered is not None:
if s1.lower()==s2_already_lowered:
return True
return False
def check_all_prev_empty(tag):
if tag is None:
return True
if tag.__class__ == NavigableString and not check_empty(tag):
return False
return check_all_prev_empty(tag.previousSibling)
def check_empty(s, rex=re.compile(r'\S')):
return rex.search(s) is None
class CHMError(Exception):
pass
class CHMReader(CHMFile):
def __init__(self, input, log, input_encoding=None):
CHMFile.__init__(self)
if isinstance(input, str):
enc = 'mbcs' if iswindows else filesystem_encoding
try:
input = input.encode(enc)
except UnicodeEncodeError:
from calibre.ptempfile import PersistentTemporaryFile
with PersistentTemporaryFile(suffix='.chm') as t:
t.write(open(input, 'rb').read())
input = t.name
if not self.LoadCHM(input):
raise CHMError("Unable to open CHM file '%s'"%(input,))
self.log = log
self.input_encoding = input_encoding
self._sourcechm = input
self._contents = None
self._playorder = 0
self._metadata = False
self._extracted = False
self.re_encoded_files = set()
self.get_encodings()
if self.home:
self.home = self.decode_hhp_filename(self.home)
if self.topics:
self.topics = self.decode_hhp_filename(self.topics)
# location of '.hhc' file, which is the CHM TOC.
base = self.topics or self.home
self.root = os.path.splitext(base.lstrip('/'))[0]
self.hhc_path = self.root + ".hhc"
def relpath_to_first_html_file(self):
# See https://www.nongnu.org/chmspec/latest/Internal.html#SYSTEM
data = self.GetFile('/#SYSTEM')
pos = 4
while pos < len(data):
code, length_of_data = struct.unpack_from('<HH', data, pos)
pos += 4
if code == 2:
default_topic = data[pos:pos+length_of_data].rstrip(b'\0')
break
pos += length_of_data
else:
raise CHMError('No default topic found in CHM file that has no HHC ToC either')
default_topic = self.decode_hhp_filename(b'/' + default_topic)
return default_topic[1:]
def decode_hhp_filename(self, path):
if isinstance(path, str):
return path
for enc in (self.encoding_from_system_file, self.encoding_from_lcid, 'cp1252', 'cp1251', 'latin1', 'utf-8'):
if enc:
try:
q = path.decode(enc)
except UnicodeDecodeError:
continue
res, ui = self.ResolveObject(q)
if res == chmlib.CHM_RESOLVE_SUCCESS:
return q
def get_encodings(self):
self.encoding_from_system_file = self.encoding_from_lcid = None
q = self.GetEncoding()
if q:
try:
if isinstance(q, bytes):
q = q.decode('ascii')
codecs.lookup(q)
self.encoding_from_system_file = q
except Exception:
pass
lcid = self.GetLCID()
if lcid is not None:
q = lcid[0]
if q:
try:
if isinstance(q, bytes):
q = q.decode('ascii')
codecs.lookup(q)
self.encoding_from_lcid = q
except Exception:
pass
def get_encoding(self):
return self.encoding_from_system_file or self.encoding_from_lcid or 'cp1252'
def _parse_toc(self, ul, basedir=os.getcwd()):
toc = TOC(play_order=self._playorder, base_path=basedir, text='')
self._playorder += 1
for li in ul('li', recursive=False):
href = li.object('param', {'name': 'Local'})[0]['value']
if href.count('#'):
href, frag = href.split('#')
else:
frag = None
name = self._deentity(li.object('param', {'name': 'Name'})[0]['value'])
# print "========>", name
toc.add_item(href, frag, name, play_order=self._playorder)
self._playorder += 1
if li.ul:
child = self._parse_toc(li.ul)
child.parent = toc
toc.append(child)
# print toc
return toc
def ResolveObject(self, path):
# filenames are utf-8 encoded in the chm index as far as I can
# determine, see https://tika.apache.org/1.11/api/org/apache/tika/parser/chm/accessor/ChmPmgiHeader.html
if not isinstance(path, bytes):
path = path.encode('utf-8')
return CHMFile.ResolveObject(self, path)
def file_exists(self, path):
res, ui = self.ResolveObject(path)
return res == chmlib.CHM_RESOLVE_SUCCESS
def GetFile(self, path):
# have to have abs paths for ResolveObject, but Contents() deliberately
# makes them relative. So we don't have to worry, re-add the leading /.
# note this path refers to the internal CHM structure
if path[0] != '/':
path = '/' + path
res, ui = self.ResolveObject(path)
if res != chmlib.CHM_RESOLVE_SUCCESS:
raise CHMError(f"Unable to locate {path!r} within CHM file {self.filename!r}")
size, data = self.RetrieveObject(ui)
if size == 0:
raise CHMError(f"{path!r} is zero bytes in length!")
return data
def get_home(self):
return self.GetFile(self.home)
def ExtractFiles(self, output_dir=os.getcwd(), debug_dump=False):
html_files = set()
for path in self.Contents():
fpath = path
lpath = os.path.join(output_dir, fpath)
self._ensure_dir(lpath)
try:
data = self.GetFile(path)
except:
self.log.exception('Failed to extract %s from CHM, ignoring'%path)
continue
if lpath.find(';') != -1:
# fix file names with ";<junk>" at the end, see _reformat()
lpath = lpath.split(';')[0]
try:
with open(lpath, 'wb') as f:
f.write(data)
try:
if 'html' in guess_mimetype(path)[0]:
html_files.add(lpath)
except:
pass
except:
if iswindows and len(lpath) > 250:
self.log.warn('%r filename too long, skipping'%path)
continue
raise
if debug_dump:
import shutil
shutil.copytree(output_dir, os.path.join(debug_dump, 'debug_dump'))
for lpath in html_files:
with open(lpath, 'r+b') as f:
data = f.read()
data = self._reformat(data, lpath)
if isinstance(data, str):
data = data.encode('utf-8')
f.seek(0)
f.truncate()
f.write(data)
self._extracted = True
files = [y for y in os.listdir(output_dir) if
os.path.isfile(os.path.join(output_dir, y))]
if self.hhc_path not in files:
for f in files:
if f.lower() == self.hhc_path.lower():
self.hhc_path = f
break
if self.hhc_path not in files and files:
for f in files:
if f.partition('.')[-1].lower() in {'html', 'htm', 'xhtm',
'xhtml'}:
self.hhc_path = f
break
if self.hhc_path == '.hhc' and self.hhc_path not in files:
from calibre import walk
for x in walk(output_dir):
if os.path.basename(x).lower() in ('index.htm', 'index.html',
'contents.htm', 'contents.html'):
self.hhc_path = os.path.relpath(x, output_dir)
break
if self.hhc_path not in files and files:
self.hhc_path = files[0]
def _reformat(self, data, htmlpath):
if self.input_encoding:
data = data.decode(self.input_encoding)
try:
data = xml_to_unicode(data, strip_encoding_pats=True)[0]
soup = BeautifulSoup(data)
except ValueError:
# hit some strange encoding problems...
self.log.exception("Unable to parse html for cleaning, leaving it")
return data
# nuke javascript...
[s.extract() for s in soup('script')]
# See if everything is inside a <head> tag
# https://bugs.launchpad.net/bugs/1273512
body = soup.find('body')
if body is not None and body.parent.name == 'head':
html = soup.find('html')
html.insert(len(html), body)
# remove forward and back nav bars from the top/bottom of each page
# cos they really fuck with the flow of things and generally waste space
# since we can't use [a,b] syntax to select arbitrary items from a list
# we'll have to do this manually...
# only remove the tables, if they have an image with an alt attribute
# containing prev, next or team
t = soup('table')
if t:
if (t[0].previousSibling is None or t[0].previousSibling.previousSibling is None):
try:
alt = t[0].img['alt'].lower()
if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1:
t[0].extract()
except:
pass
if (t[-1].nextSibling is None or t[-1].nextSibling.nextSibling is None):
try:
alt = t[-1].img['alt'].lower()
if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1:
t[-1].extract()
except:
pass
# for some very odd reason each page's content appears to be in a table
# too. and this table has sub-tables for random asides... grr.
# remove br at top of page if present after nav bars removed
br = soup('br')
if br:
if check_all_prev_empty(br[0].previousSibling):
br[0].extract()
# some images seem to be broken in some chm's :/
base = os.path.dirname(htmlpath)
for img in soup('img', src=True):
src = img['src']
ipath = os.path.join(base, *src.split('/'))
if os.path.exists(ipath):
continue
src = src.split(';')[0]
if not src:
continue
ipath = os.path.join(base, *src.split('/'))
if not os.path.exists(ipath):
while src.startswith('../'):
src = src[3:]
img['src'] = src
try:
# if there is only a single table with a single element
# in the body, replace it by the contents of this single element
tables = soup.body.findAll('table', recursive=False)
if tables and len(tables) == 1:
trs = tables[0].findAll('tr', recursive=False)
if trs and len(trs) == 1:
tds = trs[0].findAll('td', recursive=False)
if tds and len(tds) == 1:
tdContents = tds[0].contents
tableIdx = soup.body.contents.index(tables[0])
tables[0].extract()
while tdContents:
soup.body.insert(tableIdx, tdContents.pop())
except:
pass
# do not prettify, it would reformat the <pre> tags!
try:
ans = soup.decode_contents()
self.re_encoded_files.add(os.path.abspath(htmlpath))
return ans
except RuntimeError:
return data
def Contents(self):
if self._contents is not None:
return self._contents
paths = []
def get_paths(chm, ui, ctx):
# these are supposed to be UTF-8 in CHM as best as I can determine
# see https://tika.apache.org/1.11/api/org/apache/tika/parser/chm/accessor/ChmPmgiHeader.html
path = as_unicode(ui.path, 'utf-8')
# skip directories
# note this path refers to the internal CHM structure
if path[-1] != '/':
# and make paths relative
paths.append(path.lstrip('/'))
chmlib.chm_enumerate(self.file, chmlib.CHM_ENUMERATE_NORMAL, get_paths, None)
self._contents = paths
return self._contents
def _ensure_dir(self, path):
dir = os.path.dirname(path)
if not os.path.isdir(dir):
os.makedirs(dir)
def extract_content(self, output_dir=os.getcwd(), debug_dump=False):
self.ExtractFiles(output_dir=output_dir, debug_dump=debug_dump)
| 13,908 | Python | .py | 324 | 30.484568 | 116 | 0.542669 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,321 | formatwriter.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/formatwriter.py | '''
Interface defining the necessary public functions for a pdb format writer.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
class FormatWriter:
def __init__(self, opts, log):
raise NotImplementedError()
def write_content(self, oeb_book, output_stream, metadata=None):
raise NotImplementedError()
| 408 | Python | .py | 11 | 33.454545 | 74 | 0.709184 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,322 | header.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/header.py | '''
Read the header data from a pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
import struct
import time
from polyglot.builtins import long_type
class PdbHeaderReader:
def __init__(self, stream):
self.stream = stream
self.ident = self.identity()
self.num_sections = self.section_count()
self.title = self.name()
def identity(self):
self.stream.seek(60)
ident = self.stream.read(8)
return ident.decode('utf-8')
def section_count(self):
self.stream.seek(76)
return struct.unpack('>H', self.stream.read(2))[0]
def name(self):
self.stream.seek(0)
return re.sub(b'[^-A-Za-z0-9 ]+', b'_', self.stream.read(32).replace(b'\x00', b''))
def full_section_info(self, number):
if not (0 <= number < self.num_sections):
raise ValueError('Not a valid section number %i' % number)
self.stream.seek(78 + number * 8)
offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', self.stream.read(8))[0]
flags, val = a1, a2 << 16 | a3 << 8 | a4
return (offset, flags, val)
def section_offset(self, number):
if not (0 <= number < self.num_sections):
raise ValueError('Not a valid section number %i' % number)
self.stream.seek(78 + number * 8)
return struct.unpack('>LBBBB', self.stream.read(8))[0]
def section_data(self, number):
if not (0 <= number < self.num_sections):
raise ValueError('Not a valid section number %i' % number)
start = self.section_offset(number)
if number == self.num_sections -1:
self.stream.seek(0, 2)
end = self.stream.tell()
else:
end = self.section_offset(number + 1)
self.stream.seek(start)
return self.stream.read(end - start)
class PdbHeaderBuilder:
def __init__(self, identity, title):
self.identity = identity.ljust(3, '\x00')[:8].encode('utf-8')
if isinstance(title, str):
title = title.encode('ascii', 'replace')
self.title = b'%s\x00' % re.sub(b'[^-A-Za-z0-9 ]+', b'_', title).ljust(31, b'\x00')[:31]
def build_header(self, section_lengths, out_stream):
'''
section_lengths = Length of each section in file.
'''
now = int(time.time())
nrecords = len(section_lengths)
out_stream.write(self.title + struct.pack('>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0))
out_stream.write(self.identity + struct.pack('>IIH', nrecords, 0, nrecords))
offset = 78 + (8 * nrecords) + 2
for id, record in enumerate(section_lengths):
out_stream.write(struct.pack('>LBBBB', long_type(offset), 0, 0, 0, 0))
offset += record
out_stream.write(b'\x00\x00')
| 2,897 | Python | .py | 68 | 34.691176 | 96 | 0.597435 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,323 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
class PDBError(Exception):
pass
FORMAT_READERS = None
def _import_readers():
global FORMAT_READERS
from calibre.ebooks.pdb.ereader.reader import Reader as ereader_reader
from calibre.ebooks.pdb.haodoo.reader import Reader as haodoo_reader
from calibre.ebooks.pdb.palmdoc.reader import Reader as palmdoc_reader
from calibre.ebooks.pdb.pdf.reader import Reader as pdf_reader
from calibre.ebooks.pdb.plucker.reader import Reader as plucker_reader
from calibre.ebooks.pdb.ztxt.reader import Reader as ztxt_reader
FORMAT_READERS = {
'PNPdPPrs': ereader_reader,
'PNRdPPrs': ereader_reader,
'zTXTGPlm': ztxt_reader,
'TEXtREAd': palmdoc_reader,
'.pdfADBE': pdf_reader,
'DataPlkr': plucker_reader,
'BOOKMTIT': haodoo_reader,
'BOOKMTIU': haodoo_reader,
}
ALL_FORMAT_WRITERS = 'doc', 'ereader', 'ztxt' # keep sorted alphabetically
FORMAT_WRITERS = None
def _import_writers():
global FORMAT_WRITERS
from calibre.ebooks.pdb.ereader.writer import Writer as ereader_writer
from calibre.ebooks.pdb.palmdoc.writer import Writer as palmdoc_writer
from calibre.ebooks.pdb.ztxt.writer import Writer as ztxt_writer
FORMAT_WRITERS = {
'doc': palmdoc_writer,
'ztxt': ztxt_writer,
'ereader': ereader_writer,
}
IDENTITY_TO_NAME = {
'PNPdPPrs': 'eReader',
'PNRdPPrs': 'eReader',
'zTXTGPlm': 'zTXT',
'TEXtREAd': 'PalmDOC',
'.pdfADBE': 'Adobe Reader',
'DataPlkr': 'Plucker',
'BOOKMTIT': 'Haodoo.net',
'BOOKMTIU': 'Haodoo.net',
'BVokBDIC': 'BDicty',
'DB99DBOS': 'DB (Database program)',
'vIMGView': 'FireViewer (ImageViewer)',
'PmDBPmDB': 'HanDBase',
'InfoINDB': 'InfoView',
'ToGoToGo': 'iSilo',
'SDocSilX': 'iSilo 3',
'JbDbJBas': 'JFile',
'JfDbJFil': 'JFile Pro',
'DATALSdb': 'LIST',
'Mdb1Mdb1': 'MobileDB',
'BOOKMOBI': 'MobiPocket',
'DataSprd': 'QuickSheet',
'SM01SMem': 'SuperMemo',
'TEXtTlDc': 'TealDoc',
'InfoTlIf': 'TealInfo',
'DataTlMl': 'TealMeal',
'DataTlPt': 'TealPaint',
'dataTDBP': 'ThinkDB',
'TdatTide': 'Tides',
'ToRaTRPW': 'TomeRaider',
'BDOCWrdS': 'WordSmith',
}
def get_reader(identity):
'''
Returns None if no reader is found for the identity.
'''
global FORMAT_READERS
if FORMAT_READERS is None:
_import_readers()
return FORMAT_READERS.get(identity, None)
def get_writer(extension):
'''
Returns None if no writer is found for extension.
'''
global FORMAT_WRITERS
if FORMAT_WRITERS is None:
_import_writers()
return FORMAT_WRITERS.get(extension, None)
| 2,821 | Python | .py | 84 | 28.357143 | 75 | 0.668874 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,324 | formatreader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/formatreader.py | '''
Interface defining the necessary public functions for a pdb format reader.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
class FormatReader:
def __init__(self, header, stream, log, options):
raise NotImplementedError()
def extract_content(self, output_dir):
raise NotImplementedError()
| 401 | Python | .py | 11 | 32.818182 | 74 | 0.709091 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,325 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/plucker/reader.py | __license__ = 'GPL v3'
__copyright__ = '20011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import struct
import zlib
from collections import OrderedDict
from calibre import CurrentDir
from calibre.ebooks.compression.palmdoc import decompress_doc
from calibre.ebooks.pdb.formatreader import FormatReader
from calibre.utils.img import Canvas, image_from_data, save_cover_data_to
from calibre.utils.imghdr import identify
from polyglot.builtins import codepoint_to_chr
DATATYPE_PHTML = 0
DATATYPE_PHTML_COMPRESSED = 1
DATATYPE_TBMP = 2
DATATYPE_TBMP_COMPRESSED = 3
DATATYPE_MAILTO = 4
DATATYPE_LINK_INDEX = 5
DATATYPE_LINKS = 6
DATATYPE_LINKS_COMPRESSED = 7
DATATYPE_BOOKMARKS = 8
DATATYPE_CATEGORY = 9
DATATYPE_METADATA = 10
DATATYPE_STYLE_SHEET = 11
DATATYPE_FONT_PAGE = 12
DATATYPE_TABLE = 13
DATATYPE_TABLE_COMPRESSED = 14
DATATYPE_COMPOSITE_IMAGE = 15
DATATYPE_PAGELIST_METADATA = 16
DATATYPE_SORTED_URL_INDEX = 17
DATATYPE_SORTED_URL = 18
DATATYPE_SORTED_URL_COMPRESSED = 19
DATATYPE_EXT_ANCHOR_INDEX = 20
DATATYPE_EXT_ANCHOR = 21
DATATYPE_EXT_ANCHOR_COMPRESSED = 22
# IETF IANA MIBenum value for the character set.
# See the http://www.iana.org/assignments/character-sets for valid values.
# Not all character sets are handled by Python. This is a small subset that
# the MIBenum maps to Python standard encodings
# from http://docs.python.org/library/codecs.html#standard-encodings
MIBNUM_TO_NAME = {
3: 'ascii',
4: 'latin_1',
5: 'iso8859_2',
6: 'iso8859_3',
7: 'iso8859_4',
8: 'iso8859_5',
9: 'iso8859_6',
10: 'iso8859_7',
11: 'iso8859_8',
12: 'iso8859_9',
13: 'iso8859_10',
17: 'shift_jis',
18: 'euc_jp',
27: 'utf_7',
36: 'euc_kr',
37: 'iso2022_kr',
38: 'euc_kr',
39: 'iso2022_jp',
40: 'iso2022_jp_2',
106: 'utf-8',
109: 'iso8859_13',
110: 'iso8859_14',
111: 'iso8859_15',
112: 'iso8859_16',
1013: 'utf_16_be',
1014: 'utf_16_le',
1015: 'utf_16',
2009: 'cp850',
2010: 'cp852',
2011: 'cp437',
2013: 'cp862',
2025: 'gb2312',
2026: 'big5',
2028: 'cp037',
2043: 'cp424',
2044: 'cp500',
2046: 'cp855',
2047: 'cp857',
2048: 'cp860',
2049: 'cp861',
2050: 'cp863',
2051: 'cp864',
2052: 'cp865',
2054: 'cp869',
2063: 'cp1026',
2085: 'hz',
2086: 'cp866',
2087: 'cp775',
2089: 'cp858',
2091: 'cp1140',
2102: 'big5hkscs',
2250: 'cp1250',
2251: 'cp1251',
2252: 'cp1252',
2253: 'cp1253',
2254: 'cp1254',
2255: 'cp1255',
2256: 'cp1256',
2257: 'cp1257',
2258: 'cp1258',
}
class HeaderRecord:
'''
Plucker header. PDB record 0.
'''
def __init__(self, raw):
self.uid, = struct.unpack('>H', raw[0:2])
# This is labeled version in the spec.
# 2 is ZLIB compressed,
# 1 is DOC compressed
self.compression, = struct.unpack('>H', raw[2:4])
self.records, = struct.unpack('>H', raw[4:6])
# uid of the first html file. This should link
# to other files which in turn may link to others.
self.home_html = None
self.reserved = {}
for i in range(self.records):
adv = 4*i
name, = struct.unpack('>H', raw[6+adv:8+adv])
id, = struct.unpack('>H', raw[8+adv:10+adv])
self.reserved[id] = name
if name == 0:
self.home_html = id
class SectionHeader:
'''
Every sections (record) has this header. It gives
details about the section such as it's uid.
'''
def __init__(self, raw):
self.uid, = struct.unpack('>H', raw[0:2])
self.paragraphs, = struct.unpack('>H', raw[2:4])
self.size, = struct.unpack('>H', raw[4:6])
self.type, = struct.unpack('>B', raw[6:7])
self.flags, = struct.unpack('>B', raw[7:8])
class SectionHeaderText:
'''
Sub header for text records.
'''
def __init__(self, section_header, raw):
# The uncompressed size of each paragraph.
self.sizes = []
# uncompressed offset of each paragraph starting
# at the beginning of the PHTML.
self.paragraph_offsets = []
# Paragraph attributes.
self.attributes = []
for i in range(section_header.paragraphs):
adv = 4*i
self.sizes.append(struct.unpack('>H', raw[adv:2+adv])[0])
self.attributes.append(struct.unpack('>H', raw[2+adv:4+adv])[0])
running_offset = 0
for size in self.sizes:
running_offset += size
self.paragraph_offsets.append(running_offset)
class SectionMetadata:
'''
Metadata.
This does not store metadata such as title, or author.
That metadata would be best retrieved with the PDB (plucker)
metadata reader.
This stores document specific information such as the
text encoding.
Note: There is a default encoding but each text section
can be assigned a different encoding.
'''
def __init__(self, raw):
self.default_encoding = 'latin-1'
self.exceptional_uid_encodings = {}
self.owner_id = None
record_count, = struct.unpack('>H', raw[0:2])
adv = 0
for i in range(record_count):
try:
type, length = struct.unpack_from('>HH', raw, 2 + adv)
except struct.error:
break
# CharSet
if type == 1:
val, = struct.unpack('>H', raw[6+adv:8+adv])
self.default_encoding = MIBNUM_TO_NAME.get(val, 'latin-1')
# ExceptionalCharSets
elif type == 2:
ii_adv = 0
for ii in range(length // 2):
uid, = struct.unpack('>H', raw[6+adv+ii_adv:8+adv+ii_adv])
mib, = struct.unpack('>H', raw[8+adv+ii_adv:10+adv+ii_adv])
self.exceptional_uid_encodings[uid] = MIBNUM_TO_NAME.get(mib, 'latin-1')
ii_adv += 4
# OwnerID
elif type == 3:
self.owner_id = struct.unpack('>I', raw[6+adv:10+adv])
# Author, Title, PubDate
# Ignored here. The metadata reader plugin
# will get this info because if it's missing
# the metadata reader plugin will use fall
# back data from elsewhere in the file.
elif type in (4, 5, 6):
pass
# Linked Documents
elif type == 7:
pass
adv += 2*length
class SectionText:
'''
Text data. Stores a text section header and the PHTML.
'''
def __init__(self, section_header, raw):
self.header = SectionHeaderText(section_header, raw)
self.data = raw[section_header.paragraphs * 4:]
class SectionCompositeImage:
'''
A composite image consists of a 2D array
of rows and columns. The entries in the array
are uid's.
'''
def __init__(self, raw):
self.columns, = struct.unpack('>H', raw[0:2])
self.rows, = struct.unpack('>H', raw[2:4])
# [
# [uid, uid, uid, ...],
# [uid, uid, uid, ...],
# ...
# ]
#
# Each item in the layout is in it's
# correct position in the final
# composite.
#
# Each item in the layout is a uid
# to an image record.
self.layout = []
offset = 4
for i in range(self.rows):
col = []
for j in range(self.columns):
col.append(struct.unpack('>H', raw[offset:offset+2])[0])
offset += 2
self.layout.append(col)
class Reader(FormatReader):
'''
Convert a plucker archive into HTML.
TODO:
* UTF 16 and 32 characters.
* Margins.
* Alignment.
* Font color.
* DATATYPE_MAILTO
* DATATYPE_TABLE(_COMPRESSED)
* DATATYPE_EXT_ANCHOR_INDEX
* DATATYPE_EXT_ANCHOR(_COMPRESSED)
'''
def __init__(self, header, stream, log, options):
self.stream = stream
self.log = log
self.options = options
# Mapping of section uid to our internal
# list of sections.
self.uid_section_number = OrderedDict()
self.uid_text_secion_number = OrderedDict()
self.uid_text_secion_encoding = {}
self.uid_image_section_number = {}
self.uid_composite_image_section_number = {}
self.metadata_section_number = None
self.default_encoding = 'latin-1'
self.owner_id = None
self.sections = []
# The Plucker record0 header
self.header_record = HeaderRecord(header.section_data(0))
for i in range(1, header.num_sections):
section_number = len(self.sections)
# The length of the section header.
# Where the actual data in the section starts.
start = 8
section = None
raw_data = header.section_data(i)
# Every sections has a section header.
section_header = SectionHeader(raw_data)
# Store sections we care able.
if section_header.type in (DATATYPE_PHTML, DATATYPE_PHTML_COMPRESSED):
self.uid_text_secion_number[section_header.uid] = section_number
section = SectionText(section_header, raw_data[start:])
elif section_header.type in (DATATYPE_TBMP, DATATYPE_TBMP_COMPRESSED):
self.uid_image_section_number[section_header.uid] = section_number
section = raw_data[start:]
elif section_header.type == DATATYPE_METADATA:
self.metadata_section_number = section_number
section = SectionMetadata(raw_data[start:])
elif section_header.type == DATATYPE_COMPOSITE_IMAGE:
self.uid_composite_image_section_number[section_header.uid] = section_number
section = SectionCompositeImage(raw_data[start:])
# Store the section.
if section:
self.uid_section_number[section_header.uid] = section_number
self.sections.append((section_header, section))
# Store useful information from the metadata section locally
# to make access easier.
if self.metadata_section_number:
mdata_section = self.sections[self.metadata_section_number][1]
for k, v in mdata_section.exceptional_uid_encodings.items():
self.uid_text_secion_encoding[k] = v
self.default_encoding = mdata_section.default_encoding
self.owner_id = mdata_section.owner_id
# Get the metadata (tile, author, ...) with the metadata reader.
from calibre.ebooks.metadata.pdb import get_metadata
self.mi = get_metadata(stream, False)
def extract_content(self, output_dir):
# Each text record is independent (unless the continuation
# value is set in the previous record). Put each converted
# text recorded into a separate file. We will reference the
# home.html file as the first file and let the HTML input
# plugin assemble the order based on hyperlinks.
with CurrentDir(output_dir):
for uid, num in self.uid_text_secion_number.items():
self.log.debug(f'Writing record with uid: {uid} as {uid}.html')
with open('%s.html' % uid, 'wb') as htmlf:
html = '<html><body>'
section_header, section_data = self.sections[num]
if section_header.type == DATATYPE_PHTML:
html += self.process_phtml(section_data.data, section_data.header.paragraph_offsets)
elif section_header.type == DATATYPE_PHTML_COMPRESSED:
d = self.decompress_phtml(section_data.data)
html += self.process_phtml(d, section_data.header.paragraph_offsets)
html += '</body></html>'
htmlf.write(html.encode('utf-8'))
# Images.
# Cache the image sizes in case they are used by a composite image.
images = set()
if not os.path.exists(os.path.join(output_dir, 'images/')):
os.makedirs(os.path.join(output_dir, 'images/'))
with CurrentDir(os.path.join(output_dir, 'images/')):
# Single images.
for uid, num in self.uid_image_section_number.items():
section_header, section_data = self.sections[num]
if section_data:
idata = None
if section_header.type == DATATYPE_TBMP:
idata = section_data
elif section_header.type == DATATYPE_TBMP_COMPRESSED:
if self.header_record.compression == 1:
idata = decompress_doc(section_data)
elif self.header_record.compression == 2:
idata = zlib.decompress(section_data)
try:
save_cover_data_to(idata, '%s.jpg' % uid, compression_quality=70)
images.add(uid)
self.log.debug(f'Wrote image with uid {uid} to images/{uid}.jpg')
except Exception as e:
self.log.error(f'Failed to write image with uid {uid}: {e}')
else:
self.log.error('Failed to write image with uid %s: No data.' % uid)
# Composite images.
# We're going to use the already compressed .jpg images here.
for uid, num in self.uid_composite_image_section_number.items():
try:
section_header, section_data = self.sections[num]
# Get the final width and height.
width = 0
height = 0
for row in section_data.layout:
row_width = 0
col_height = 0
for col in row:
if col not in images:
raise Exception('Image with uid: %s missing.' % col)
w, h = identify(open('%s.jpg' % col, 'rb'))[1:]
row_width += w
if col_height < h:
col_height = h
if width < row_width:
width = row_width
height += col_height
# Create a new image the total size of all image
# parts. Put the parts into the new image.
with Canvas(width, height) as canvas:
y_off = 0
for row in section_data.layout:
x_off = 0
largest_height = 0
for col in row:
im = image_from_data(open('%s.jpg' % col, 'rb').read())
canvas.compose(im, x_off, y_off)
w, h = im.width(), im.height()
x_off += w
if largest_height < h:
largest_height = h
y_off += largest_height
with open('%s.jpg' % uid) as out:
out.write(canvas.export(compression_quality=70))
self.log.debug(f'Wrote composite image with uid {uid} to images/{uid}.jpg')
except Exception as e:
self.log.error(f'Failed to write composite image with uid {uid}: {e}')
# Run the HTML through the html processing plugin.
from calibre.customize.ui import plugin_for_input_format
html_input = plugin_for_input_format('html')
for opt in html_input.options:
setattr(self.options, opt.option.name, opt.recommended_value)
self.options.input_encoding = 'utf-8'
odi = self.options.debug_pipeline
self.options.debug_pipeline = None
# Determine the home.html record uid. This should be set in the
# reserved values in the metadata recorded. home.html is the first
# text record (should have hyper link references to other records)
# in the document.
try:
home_html = self.header_record.home_html
if not home_html:
home_html = self.uid_text_secion_number.items()[0][0]
except:
raise Exception('Could not determine home.html')
# Generate oeb from html conversion.
oeb = html_input.convert(open('%s.html' % home_html, 'rb'), self.options, 'html', self.log, {})
self.options.debug_pipeline = odi
return oeb
def decompress_phtml(self, data):
if self.header_record.compression == 2:
if self.owner_id:
raise NotImplementedError
return zlib.decompress(data)
elif self.header_record.compression == 1:
from calibre.ebooks.compression.palmdoc import decompress_doc
return decompress_doc(data)
def process_phtml(self, d, paragraph_offsets=()):
html = '<p id="p0">'
offset = 0
paragraph_open = True
link_open = False
need_set_p_id = False
p_num = 1
font_specifier_close = ''
while offset < len(d):
if not paragraph_open:
if need_set_p_id:
html += '<p id="p%s">' % p_num
p_num += 1
need_set_p_id = False
else:
html += '<p>'
paragraph_open = True
c = ord(d[offset:offset+1])
# PHTML "functions"
if c == 0x0:
offset += 1
c = ord(d[offset:offset+1])
# Page link begins
# 2 Bytes
# record ID
if c == 0x0a:
offset += 1
id = struct.unpack('>H', d[offset:offset+2])[0]
if id in self.uid_text_secion_number:
html += '<a href="%s.html">' % id
link_open = True
offset += 1
# Targeted page link begins
# 3 Bytes
# record ID, target
elif c == 0x0b:
offset += 3
# Paragraph link begins
# 4 Bytes
# record ID, paragraph number
elif c == 0x0c:
offset += 1
id = struct.unpack('>H', d[offset:offset+2])[0]
offset += 2
pid = struct.unpack('>H', d[offset:offset+2])[0]
if id in self.uid_text_secion_number:
html += f'<a href="{id}.html#p{pid}">'
link_open = True
offset += 1
# Targeted paragraph link begins
# 5 Bytes
# record ID, paragraph number, target
elif c == 0x0d:
offset += 5
# Link ends
# 0 Bytes
elif c == 0x08:
if link_open:
html += '</a>'
link_open = False
# Set font
# 1 Bytes
# font specifier
elif c == 0x11:
offset += 1
specifier = d[offset]
html += font_specifier_close
# Regular text
if specifier == 0:
font_specifier_close = ''
# h1
elif specifier == 1:
html += '<h1>'
font_specifier_close = '</h1>'
# h2
elif specifier == 2:
html += '<h2>'
font_specifier_close = '</h2>'
# h3
elif specifier == 3:
html += '<h13>'
font_specifier_close = '</h3>'
# h4
elif specifier == 4:
html += '<h4>'
font_specifier_close = '</h4>'
# h5
elif specifier == 5:
html += '<h5>'
font_specifier_close = '</h5>'
# h6
elif specifier == 6:
html += '<h6>'
font_specifier_close = '</h6>'
# Bold
elif specifier == 7:
html += '<b>'
font_specifier_close = '</b>'
# Fixed-width
elif specifier == 8:
html += '<tt>'
font_specifier_close = '</tt>'
# Small
elif specifier == 9:
html += '<small>'
font_specifier_close = '</small>'
# Subscript
elif specifier == 10:
html += '<sub>'
font_specifier_close = '</sub>'
# Superscript
elif specifier == 11:
html += '<sup>'
font_specifier_close = '</sup>'
# Embedded image
# 2 Bytes
# image record ID
elif c == 0x1a:
offset += 1
uid = struct.unpack('>H', d[offset:offset+2])[0]
html += '<img src="images/%s.jpg" />' % uid
offset += 1
# Set margin
# 2 Bytes
# left margin, right margin
elif c == 0x22:
offset += 2
# Alignment of text
# 1 Bytes
# alignment
elif c == 0x29:
offset += 1
# Horizontal rule
# 3 Bytes
# 8-bit height, 8-bit width (pixels), 8-bit width (%, 1-100)
elif c == 0x33:
offset += 3
if paragraph_open:
html += '</p>'
paragraph_open = False
html += '<hr />'
# New line
# 0 Bytes
elif c == 0x38:
if paragraph_open:
html += '</p>\n'
paragraph_open = False
# Italic text begins
# 0 Bytes
elif c == 0x40:
html += '<i>'
# Italic text ends
# 0 Bytes
elif c == 0x48:
html += '</i>'
# Set text color
# 3 Bytes
# 8-bit red, 8-bit green, 8-bit blue
elif c == 0x53:
offset += 3
# Multiple embedded image
# 4 Bytes
# alternate image record ID, image record ID
elif c == 0x5c:
offset += 3
uid = struct.unpack('>H', d[offset:offset+2])[0]
html += '<img src="images/%s.jpg" />' % uid
offset += 1
# Underline text begins
# 0 Bytes
elif c == 0x60:
html += '<u>'
# Underline text ends
# 0 Bytes
elif c == 0x68:
html += '</u>'
# Strike-through text begins
# 0 Bytes
elif c == 0x70:
html += '<s>'
# Strike-through text ends
# 0 Bytes
elif c == 0x78:
html += '</s>'
# 16-bit Unicode character
# 3 Bytes
# alternate text length, 16-bit unicode character
elif c == 0x83:
offset += 3
# 32-bit Unicode character
# 5 Bytes
# alternate text length, 32-bit unicode character
elif c == 0x85:
offset += 5
# Begin custom font span
# 6 Bytes
# font page record ID, X page position, Y page position
elif c == 0x8e:
offset += 6
# Adjust custom font glyph position
# 4 Bytes
# X page position, Y page position
elif c == 0x8c:
offset += 4
# Change font page
# 2 Bytes
# font record ID
elif c == 0x8a:
offset += 2
# End custom font span
# 0 Bytes
elif c == 0x88:
pass
# Begin new table row
# 0 Bytes
elif c == 0x90:
pass
# Insert table (or table link)
# 2 Bytes
# table record ID
elif c == 0x92:
offset += 2
# Table cell data
# 7 Bytes
# 8-bit alignment, 16-bit image record ID, 8-bit columns, 8-bit rows, 16-bit text length
elif c == 0x97:
offset += 7
# Exact link modifier
# 2 Bytes
# Paragraph Offset (The Exact Link Modifier modifies a Paragraph Link or
# Targeted Paragraph Link function to specify an exact byte offset within
# the paragraph. This function must be followed immediately by the
# function it modifies).
elif c == 0x9a:
offset += 2
elif c == 0xa0:
html += ' '
else:
html += codepoint_to_chr(c)
offset += 1
if offset in paragraph_offsets:
need_set_p_id = True
if paragraph_open:
html += '</p>\n'
paragraph_open = False
if paragraph_open:
html += '</p>'
return html
| 26,725 | Python | .py | 671 | 26.007452 | 108 | 0.491134 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,326 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/pdf/reader.py | '''
Read content from palmdoc pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2010, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.pdb.formatreader import FormatReader
from calibre.ptempfile import PersistentTemporaryFile
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.header = header
self.stream = stream
self.log = log
self.options = options
def extract_content(self, output_dir):
self.log.info('Extracting PDF...')
pdf = PersistentTemporaryFile('.pdf')
pdf.close()
pdf = open(pdf, 'wb')
for x in range(self.header.section_count()):
pdf.write(self.header.section_data(x))
pdf.close()
from calibre.customize.ui import plugin_for_input_format
pdf_plugin = plugin_for_input_format('pdf')
for opt in pdf_plugin.options:
if not hasattr(self.options, opt.option.name):
setattr(self.options, opt.option.name, opt.recommended_value)
return pdf_plugin.convert(open(pdf, 'rb'), self.options, 'pdf', self.log, {})
| 1,169 | Python | .py | 28 | 34.642857 | 85 | 0.656637 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,327 | writer.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/palmdoc/writer.py | '''
Writer content to palmdoc pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import struct
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.txt.newlines import TxtNewlines, specified_newlines
from calibre.ebooks.txt.txtml import TXTMLizer
MAX_RECORD_SIZE = 4096
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
from calibre.ebooks.compression.palmdoc import compress_doc
title = self.opts.title if self.opts.title else oeb_book.metadata.title[0].value if oeb_book.metadata.title != [] else _('Unknown')
txt_records, txt_length = self._generate_text(oeb_book)
header_record = self._header_record(txt_length, len(txt_records))
section_lengths = [len(header_record)]
self.log.info('Compessing data...')
for i in range(0, len(txt_records)):
self.log.debug('\tCompressing record %i' % i)
txt_records[i] = compress_doc(txt_records[i])
section_lengths.append(len(txt_records[i]))
out_stream.seek(0)
hb = PdbHeaderBuilder('TEXtREAd', title)
hb.build_header(section_lengths, out_stream)
for record in [header_record] + txt_records:
out_stream.write(record)
def _generate_text(self, oeb_book):
writer = TXTMLizer(self.log)
txt = writer.extract_content(oeb_book, self.opts)
self.log.debug('\tReplacing newlines with selected type...')
txt = specified_newlines(TxtNewlines('windows').newline,
txt).encode(self.opts.pdb_output_encoding, 'replace')
txt_length = len(txt)
txt_records = []
for i in range(0, (len(txt) // MAX_RECORD_SIZE) + 1):
txt_records.append(txt[i * MAX_RECORD_SIZE: (i * MAX_RECORD_SIZE) + MAX_RECORD_SIZE])
return txt_records, txt_length
def _header_record(self, txt_length, record_count):
record = b''
record += struct.pack('>H', 2) # [0:2], PalmDoc compression. (1 = No compression).
record += struct.pack('>H', 0) # [2:4], Always 0.
record += struct.pack('>L', txt_length) # [4:8], Uncompressed length of the entire text of the book.
record += struct.pack('>H', record_count) # [8:10], Number of PDB records used for the text of the book.
record += struct.pack('>H', MAX_RECORD_SIZE) # [10-12], Maximum size of each record containing text, always 4096.
record += struct.pack('>L', 0) # [12-16], Current reading position, as an offset into the uncompressed text.
return record
| 2,884 | Python | .py | 52 | 47.826923 | 139 | 0.639986 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,328 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/palmdoc/reader.py | '''
Read content from palmdoc pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import io
import struct
from calibre.ebooks.pdb.formatreader import FormatReader
class HeaderRecord:
'''
The first record in the file is always the header record. It holds
information related to the location of text, images, and so on
in the file. This is used in conjunction with the sections
defined in the file header.
'''
def __init__(self, raw):
self.compression, = struct.unpack('>H', raw[0:2])
self.num_records, = struct.unpack('>H', raw[8:10])
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.stream = stream
self.log = log
self.options = options
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
self.header_record = HeaderRecord(self.section_data(0))
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
if self.header_record.compression == 1:
return self.section_data(number)
if self.header_record.compression == 2 or self.header_record.compression == 258:
from calibre.ebooks.compression.palmdoc import decompress_doc
return decompress_doc(self.section_data(number))
return b''
def extract_content(self, output_dir):
raw_txt = b''
self.log.info('Decompressing text...')
for i in range(1, self.header_record.num_records + 1):
self.log.debug('\tDecompressing text section %i' % i)
raw_txt += self.decompress_text(i)
self.log.info('Converting text to OEB...')
stream = io.BytesIO(raw_txt)
from calibre.customize.ui import plugin_for_input_format
txt_plugin = plugin_for_input_format('txt')
for opt in txt_plugin.options:
if not hasattr(self.options, opt.option.name):
setattr(self.options, opt.option.name, opt.recommended_value)
stream.seek(0)
return txt_plugin.convert(stream, self.options, 'txt', self.log, {})
| 2,258 | Python | .py | 52 | 35.903846 | 88 | 0.655693 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,329 | writer.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ztxt/writer.py | '''
Writer content to ztxt pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import struct
import zlib
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.txt.newlines import TxtNewlines, specified_newlines
from calibre.ebooks.txt.txtml import TXTMLizer
MAX_RECORD_SIZE = 8192
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
title = self.opts.title if self.opts.title else oeb_book.metadata.title[0].value if oeb_book.metadata.title != [] else _('Unknown')
txt_records, txt_length = self._generate_text(oeb_book)
crc32 = 0
section_lengths = []
compressor = zlib.compressobj(9)
self.log.info('Compressing data...')
for i in range(0, len(txt_records)):
self.log.debug('\tCompressing record %i' % i)
txt_records[i] = compressor.compress(txt_records[i])
txt_records[i] = txt_records[i] + compressor.flush(zlib.Z_FULL_FLUSH)
section_lengths.append(len(txt_records[i]))
crc32 = zlib.crc32(txt_records[i], crc32) & 0xffffffff
header_record = self._header_record(txt_length, len(txt_records), crc32)
section_lengths.insert(0, len(header_record))
out_stream.seek(0)
hb = PdbHeaderBuilder('zTXTGPlm', title)
hb.build_header(section_lengths, out_stream)
for record in [header_record]+txt_records:
out_stream.write(record)
def _generate_text(self, oeb_book):
writer = TXTMLizer(self.log)
txt = writer.extract_content(oeb_book, self.opts)
self.log.debug('\tReplacing newlines with selected type...')
txt = specified_newlines(TxtNewlines('windows').newline,
txt).encode(self.opts.pdb_output_encoding, 'replace')
txt_length = len(txt)
txt_records = []
for i in range(0, (len(txt) / MAX_RECORD_SIZE) + 1):
txt_records.append(txt[i * MAX_RECORD_SIZE : (i * MAX_RECORD_SIZE) + MAX_RECORD_SIZE])
return txt_records, txt_length
def _header_record(self, txt_length, record_count, crc32):
record = b''
record += struct.pack('>H', 0x012c) # [0:2], version. 0x012c = 1.44
record += struct.pack('>H', record_count) # [2:4], Number of PDB records used for the text of the book.
record += struct.pack('>L', txt_length) # [4:8], Uncompressed length of the entire text of the book.
record += struct.pack('>H', MAX_RECORD_SIZE) # [8:10], Maximum size of each record containing text
record += struct.pack('>H', 0) # [10:12], Number of bookmarks.
record += struct.pack('>H', 0) # [12:14], Bookmark record. 0 if there are no bookmarks.
record += struct.pack('>H', 0) # [14:16], Number of annotations.
record += struct.pack('>H', 0) # [16:18], Annotation record. 0 if there are no annotations.
record += struct.pack('>B', 1) # [18:19], Flags. Bitmask, 0x01 = Random Access. 0x02 = Non-Uniform text block size.
record += struct.pack('>B', 0) # [19:20], Reserved.
record += struct.pack('>L', crc32) # [20:24], crc32
record += struct.pack('>LL', 0, 0) # [24:32], padding
return record
| 3,611 | Python | .py | 63 | 49.444444 | 140 | 0.611851 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,330 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ztxt/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
class zTXTError(Exception):
pass
| 163 | Python | .py | 5 | 30.4 | 60 | 0.698718 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,331 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ztxt/reader.py | '''
Read content from ztxt pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import io
import struct
import zlib
from calibre.ebooks.pdb.formatreader import FormatReader
from calibre.ebooks.pdb.ztxt import zTXTError
SUPPORTED_VERSION = (1, 40)
class HeaderRecord:
'''
The first record in the file is always the header record. It holds
information related to the location of text, images, and so on
in the file. This is used in conjunction with the sections
defined in the file header.
'''
def __init__(self, raw):
self.version, = struct.unpack('>H', raw[0:2])
self.num_records, = struct.unpack('>H', raw[2:4])
self.size, = struct.unpack('>L', raw[4:8])
self.record_size, = struct.unpack('>H', raw[8:10])
self.flags, = struct.unpack('>B', raw[18:19])
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.stream = stream
self.log = log
self.options = options
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
self.header_record = HeaderRecord(self.section_data(0))
vmajor = (self.header_record.version & 0x0000FF00) >> 8
vminor = self.header_record.version & 0x000000FF
if vmajor < 1 or (vmajor == 1 and vminor < 40):
raise zTXTError('Unsupported ztxt version (%i.%i). Only versions newer than %i.%i are supported.' %
(vmajor, vminor, SUPPORTED_VERSION[0], SUPPORTED_VERSION[1]))
if (self.header_record.flags & 0x01) == 0:
raise zTXTError('Only compression method 1 (random access) is supported')
self.log.debug('Foud ztxt version: %i.%i' % (vmajor, vminor))
# Initialize the decompressor
self.uncompressor = zlib.decompressobj()
self.uncompressor.decompress(self.section_data(1))
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
if number == 1:
self.uncompressor = zlib.decompressobj()
return self.uncompressor.decompress(self.section_data(number))
def extract_content(self, output_dir):
raw_txt = b''
self.log.info('Decompressing text...')
for i in range(1, self.header_record.num_records + 1):
self.log.debug('\tDecompressing text section %i' % i)
raw_txt += self.decompress_text(i)
self.log.info('Converting text to OEB...')
stream = io.BytesIO(raw_txt)
from calibre.customize.ui import plugin_for_input_format
txt_plugin = plugin_for_input_format('txt')
for opt in txt_plugin.options:
if not hasattr(self.options, opt.option.name):
setattr(self.options, opt.option.name, opt.recommended_value)
stream.seek(0)
return txt_plugin.convert(stream, self.options, 'txt', self.log, {})
| 3,049 | Python | .py | 66 | 38.348485 | 111 | 0.646164 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,332 | inspector.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/inspector.py | '''
Inspect the header of ereader files. This is primarily used for debugging.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import struct
import sys
from calibre.ebooks.pdb.ereader import EreaderError
from calibre.ebooks.pdb.header import PdbHeaderReader
def ereader_header_info(header):
h0 = header.section_data(0)
print('Header Size: %s' % len(h0))
if len(h0) == 132:
print('Header Type: Dropbook compatible')
print('')
ereader_header_info132(h0)
elif len(h0) == 202:
print('Header Type: Makebook compatible')
print('')
ereader_header_info202(h0)
else:
raise EreaderError('Size mismatch. eReader header record size %i KB is not supported.' % len(h0))
def pdb_header_info(header):
print('PDB Header Info:')
print('')
print('Identity: %s' % header.ident)
print('Total Sections: %s' % header.num_sections)
print('Title: %s' % header.title)
print('')
def ereader_header_info132(h0):
print('Ereader Record 0 (Header) Info:')
print('')
print('0-2 Version: %i' % struct.unpack('>H', h0[0:2])[0])
print('2-4: %i' % struct.unpack('>H', h0[2:4])[0])
print('4-6: %i' % struct.unpack('>H', h0[4:6])[0])
print('6-8 Codepage: %i' % struct.unpack('>H', h0[6:8])[0])
print('8-10: %i' % struct.unpack('>H', h0[8:10])[0])
print('10-12: %i' % struct.unpack('>H', h0[10:12])[0])
print('12-14 Non-Text offset: %i' % struct.unpack('>H', h0[12:14])[0])
print('14-16: %i' % struct.unpack('>H', h0[14:16])[0])
print('16-18: %i' % struct.unpack('>H', h0[16:18])[0])
print('18-20: %i' % struct.unpack('>H', h0[18:20])[0])
print('20-22 Image Count: %i' % struct.unpack('>H', h0[20:22])[0])
print('22-24: %i' % struct.unpack('>H', h0[22:24])[0])
print('24-26 Has Metadata?: %i' % struct.unpack('>H', h0[24:26])[0])
print('26-28: %i' % struct.unpack('>H', h0[26:28])[0])
print('28-30 Footnote Count: %i' % struct.unpack('>H', h0[28:30])[0])
print('30-32 Sidebar Count: %i' % struct.unpack('>H', h0[30:32])[0])
print('32-34 Bookmark Offset: %i' % struct.unpack('>H', h0[32:34])[0])
print('34-36 MAGIC: %i' % struct.unpack('>H', h0[34:36])[0])
print('36-38: %i' % struct.unpack('>H', h0[36:38])[0])
print('38-40: %i' % struct.unpack('>H', h0[38:40])[0])
print('40-42 Image Data Offset: %i' % struct.unpack('>H', h0[40:42])[0])
print('42-44: %i' % struct.unpack('>H', h0[42:44])[0])
print('44-46 Metadata Offset: %i' % struct.unpack('>H', h0[44:46])[0])
print('46-48: %i' % struct.unpack('>H', h0[46:48])[0])
print('48-50 Footnote Offset: %i' % struct.unpack('>H', h0[48:50])[0])
print('50-52 Sidebar Offset: %i' % struct.unpack('>H', h0[50:52])[0])
print('52-54 Last Data Offset: %i' % struct.unpack('>H', h0[52:54])[0])
for i in range(54, 131, 2):
print('%i-%i: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0]))
print('')
def ereader_header_info202(h0):
print('Ereader Record 0 (Header) Info:')
print('')
print('0-2 Version: %i' % struct.unpack('>H', h0[0:2])[0])
print('2-4 Garbage: %i' % struct.unpack('>H', h0[2:4])[0])
print('4-6 Garbage: %i' % struct.unpack('>H', h0[4:6])[0])
print('6-8 Garbage: %i' % struct.unpack('>H', h0[6:8])[0])
print('8-10 Non-Text Offset: %i' % struct.unpack('>H', h0[8:10])[0])
print('10-12: %i' % struct.unpack('>H', h0[10:12])[0])
print('12-14: %i' % struct.unpack('>H', h0[12:14])[0])
print('14-16 Garbage: %i' % struct.unpack('>H', h0[14:16])[0])
print('16-18 Garbage: %i' % struct.unpack('>H', h0[16:18])[0])
print('18-20 Garbage: %i' % struct.unpack('>H', h0[18:20])[0])
print('20-22 Garbage: %i' % struct.unpack('>H', h0[20:22])[0])
print('22-24 Garbage: %i' % struct.unpack('>H', h0[22:24])[0])
print('24-26: %i' % struct.unpack('>H', h0[24:26])[0])
print('26-28: %i' % struct.unpack('>H', h0[26:28])[0])
for i in range(28, 98, 2):
print('%i-%i Garbage: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0]))
print('98-100: %i' % struct.unpack('>H', h0[98:100])[0])
for i in range(100, 110, 2):
print('%i-%i Garbage: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0]))
print('110-112: %i' % struct.unpack('>H', h0[110:112])[0])
print('112-114: %i' % struct.unpack('>H', h0[112:114])[0])
print('114-116 Garbage: %i' % struct.unpack('>H', h0[114:116])[0])
for i in range(116, 202, 2):
print('%i-%i: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0]))
print('')
print('* Garbage: Random values.')
print('')
def section_lengths(header):
print('Section Sizes')
print('')
for i in range(0, header.section_count()):
size = len(header.section_data(i))
if size > 65505:
message = '<--- Over!'
else:
message = ''
print('Section %i: %i %s' % (i, size, message))
def main(args=sys.argv):
if len(args) < 2:
print('Error: requires input file.')
return 1
f = open(sys.argv[1], 'rb')
pheader = PdbHeaderReader(f)
pdb_header_info(pheader)
ereader_header_info(pheader)
section_lengths(pheader)
return 0
if __name__ == '__main__':
sys.exit(main())
| 5,941 | Python | .py | 115 | 46.269565 | 105 | 0.511126 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,333 | writer.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/writer.py | '''
Write content to ereader pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import io
import re
import struct
import zlib
from PIL import Image
from calibre.ebooks.pdb.formatwriter import FormatWriter
from calibre.ebooks.pdb.header import PdbHeaderBuilder
from calibre.ebooks.pml.pmlml import PMLMLizer
from polyglot.builtins import as_bytes
IDENTITY = 'PNRdPPrs'
# This is an arbitrary number that is small enough to work. The actual maximum
# record size is unknown.
MAX_RECORD_SIZE = 8192
class Writer(FormatWriter):
def __init__(self, opts, log):
self.opts = opts
self.log = log
def write_content(self, oeb_book, out_stream, metadata=None):
pmlmlizer = PMLMLizer(self.log)
pml = str(pmlmlizer.extract_content(oeb_book, self.opts)).encode('cp1252', 'replace')
text, text_sizes = self._text(pml)
chapter_index = self._index_item(br'(?s)\\C(?P<val>[0-4])="(?P<text>.+?)"', pml)
chapter_index += self._index_item(br'(?s)\\X(?P<val>[0-4])(?P<text>.+?)\\X[0-4]', pml)
chapter_index += self._index_item(br'(?s)\\x(?P<text>.+?)\\x', pml)
link_index = self._index_item(br'(?s)\\Q="(?P<text>.+?)"', pml)
images = self._images(oeb_book.manifest, pmlmlizer.image_hrefs)
metadata = [self._metadata(metadata)]
hr = [self._header_record(len(text), len(chapter_index), len(link_index), len(images))]
'''
Record order as generated by Dropbook.
1. eReader Header
2. Compressed text
3. Small font page index
4. Large font page index
5. Chapter index
6. Links index
7. Images
8. (Extrapolation: there should be one more record type here though yet uncovered what it might be).
9. Metadata
10. Sidebar records
11. Footnote records
12. Text block size record
13. "MeTaInFo\x00" word record
'''
sections = hr+text+chapter_index+link_index+images+metadata+[text_sizes]+[b'MeTaInFo\x00']
lengths = [len(i) if i not in images else len(i[0]) + len(i[1]) for i in sections]
pdbHeaderBuilder = PdbHeaderBuilder(IDENTITY, metadata[0].partition(b'\x00')[0])
pdbHeaderBuilder.build_header(lengths, out_stream)
for item in sections:
if item in images:
out_stream.write(item[0])
out_stream.write(item[1])
else:
out_stream.write(item)
def _text(self, pml):
pml_pages = []
text_sizes = b''
index = 0
while index < len(pml):
'''
Split on the space character closest to MAX_RECORD_SIZE when possible.
'''
split = pml.rfind(b' ', index, MAX_RECORD_SIZE)
if split == -1:
len_end = len(pml[index:])
if len_end > MAX_RECORD_SIZE:
split = MAX_RECORD_SIZE
else:
split = len_end
if split == 0:
split = 1
pml_pages.append(zlib.compress(pml[index:index+split]))
text_sizes += struct.pack('>H', split)
index += split
return pml_pages, text_sizes
def _index_item(self, regex, pml):
index = []
for mo in re.finditer(regex, pml):
item = b''
if 'text' in mo.groupdict().keys():
item += struct.pack('>L', mo.start())
text = mo.group('text')
# Strip all PML tags from text
text = re.sub(br'\\U[0-9a-z]{4}', b'', text)
text = re.sub(br'\\a\d{3}', b'', text)
text = re.sub(br'\\.', b'', text)
# Add appropriate spacing to denote the various levels of headings
if 'val' in mo.groupdict().keys():
text = b'%s%s' % (b' ' * 4 * int(mo.group('val')), text)
item += text
item += b'\x00'
if item:
index.append(item)
return index
def _images(self, manifest, image_hrefs):
'''
Image format.
0-4 : 'PNG '. There must be a space after PNG.
4-36 : Image name. Must be exactly 32 bytes long. Pad with \x00 for names shorter than 32 bytes
36-58 : Unknown.
58-60 : Width.
60-62 : Height.
62-...: Raw image data in 8 bit PNG format.
'''
images = []
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
for item in manifest:
if item.media_type in OEB_RASTER_IMAGES and item.href in image_hrefs.keys():
try:
im = Image.open(io.BytesIO(item.data)).convert('P')
im.thumbnail((300,300), Image.Resampling.LANCZOS)
data = io.BytesIO()
im.save(data, 'PNG')
data = data.getvalue()
href = as_bytes(image_hrefs[item.href])
header = b'PNG '
header += href.ljust(32, b'\x00')[:32]
header = header.ljust(58, b'\x00')
header += struct.pack('>HH', im.size[0], im.size[1])
header = header.ljust(62, b'\x00')
if len(data) + len(header) < 65505:
images.append((header, data))
except Exception as e:
self.log.error('Error: Could not include file %s because '
'%s.' % (item.href, e))
return images
def _metadata(self, metadata):
'''
Metadata takes the form:
title\x00
author\x00
copyright\x00
publisher\x00
isbn\x00
'''
title = _('Unknown')
author = _('Unknown')
copyright = ''
publisher = ''
isbn = ''
if metadata:
if len(metadata.title) >= 1:
title = metadata.title[0].value
if len(metadata.creator) >= 1:
from calibre.ebooks.metadata import authors_to_string
author = authors_to_string([x.value for x in metadata.creator])
if len(metadata.rights) >= 1:
copyright = metadata.rights[0].value
if len(metadata.publisher) >= 1:
publisher = metadata.publisher[0].value
return as_bytes(f'{title}\x00{author}\x00{copyright}\x00{publisher}\x00{isbn}\x00')
def _header_record(self, text_count, chapter_count, link_count, image_count):
'''
text_count = the number of text pages
image_count = the number of images
'''
compression = 10 # zlib compression.
non_text_offset = text_count + 1
chapter_offset = non_text_offset
link_offset = chapter_offset + chapter_count
if image_count > 0:
image_data_offset = link_offset + link_count
meta_data_offset = image_data_offset + image_count
last_data_offset = meta_data_offset + 1
else:
meta_data_offset = link_offset + link_count
last_data_offset = meta_data_offset + 1
image_data_offset = last_data_offset
if chapter_count == 0:
chapter_offset = last_data_offset
if link_count == 0:
link_offset = last_data_offset
record = b''
record += struct.pack('>H', compression) # [0:2] # Compression. Specifies compression and drm. 2 = palmdoc, 10 = zlib. 260 and 272 = DRM
record += struct.pack('>H', 0) # [2:4] # Unknown.
record += struct.pack('>H', 0) # [4:6] # Unknown.
record += struct.pack('>H', 25152) # [6:8] # 25152 is MAGIC. Somehow represents the cp1252 encoding of the text
record += struct.pack('>H', 0) # [8:10] # Number of small font pages. 0 if page index is not built.
record += struct.pack('>H', 0) # [10:12] # Number of large font pages. 0 if page index is not built.
record += struct.pack('>H', non_text_offset) # [12:14] # Non-Text record start.
record += struct.pack('>H', chapter_count) # [14:16] # Number of chapter index records.
record += struct.pack('>H', 0) # [16:18] # Number of small font page index records.
record += struct.pack('>H', 0) # [18:20] # Number of large font page index records.
record += struct.pack('>H', image_count) # [20:22] # Number of images.
record += struct.pack('>H', link_count) # [22:24] # Number of links.
record += struct.pack('>H', 1) # [24:26] # 1 if has metadata, 0 if not.
record += struct.pack('>H', 0) # [26:28] # Unknown.
record += struct.pack('>H', 0) # [28:30] # Number of Footnotes.
record += struct.pack('>H', 0) # [30:32] # Number of Sidebars.
record += struct.pack('>H', chapter_offset) # [32:34] # Chapter index offset.
record += struct.pack('>H', 2560) # [34:36] # 2560 is MAGIC.
record += struct.pack('>H', last_data_offset) # [36:38] # Small font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [38:40] # Large font page offset. This will be the last data offset if there are none.
record += struct.pack('>H', image_data_offset) # [40:42] # Image offset. This will be the last data offset if there are none.
record += struct.pack('>H', link_offset) # [42:44] # Links offset. This will be the last data offset if there are none.
record += struct.pack('>H', meta_data_offset) # [44:46] # Metadata offset. This will be the last data offset if there are none.
record += struct.pack('>H', 0) # [46:48] # Unknown.
record += struct.pack('>H', last_data_offset) # [48:50] # Footnote offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [50:52] # Sidebar offset. This will be the last data offset if there are none.
record += struct.pack('>H', last_data_offset) # [52:54] # Last data offset.
for i in range(54, 132, 2):
record += struct.pack('>H', 0) # [54:132]
return record
| 10,729 | Python | .py | 209 | 40.263158 | 158 | 0.539254 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,334 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
class EreaderError(Exception):
pass
def image_name(name, taken_names=()):
name = os.path.basename(name)
if len(name) > 32:
cut = len(name) - 32
names = name[:10]
namee = name[10+cut:]
name = f'{names}{namee}.png'
i = 0
base_name, ext = os.path.splitext(name)
while name in taken_names:
i += 1
name = f'{base_name}{i}{ext}'
return name.ljust(32, '\x00')[:32]
| 576 | Python | .py | 19 | 24.894737 | 60 | 0.595628 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,335 | reader132.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/reader132.py | '''
Read content from ereader pdb file with a 132 byte header created by Dropbook.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import re
import struct
import zlib
from calibre import CurrentDir
from calibre.ebooks import DRMError
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.pdb.ereader import EreaderError
from calibre.ebooks.pdb.formatreader import FormatReader
class HeaderRecord:
'''
The first record in the file is always the header record. It holds
information related to the location of text, images, and so on
in the file. This is used in conjunction with the sections
defined in the file header.
'''
def __init__(self, raw):
self.compression, = struct.unpack('>H', raw[0:2])
self.non_text_offset, = struct.unpack('>H', raw[12:14])
self.chapter_count, = struct.unpack('>H', raw[14:16])
self.image_count, = struct.unpack('>H', raw[20:22])
self.link_count, = struct.unpack('>H', raw[22:24])
self.has_metadata, = struct.unpack('>H', raw[24:26])
self.footnote_count, = struct.unpack('>H', raw[28:30])
self.sidebar_count, = struct.unpack('>H', raw[30:32])
self.chapter_offset, = struct.unpack('>H', raw[32:34])
self.small_font_page_offset, = struct.unpack('>H', raw[36:38])
self.large_font_page_offset, = struct.unpack('>H', raw[38:40])
self.image_data_offset, = struct.unpack('>H', raw[40:42])
self.link_offset, = struct.unpack('>H', raw[42:44])
self.metadata_offset, = struct.unpack('>H', raw[44:46])
self.footnote_offset, = struct.unpack('>H', raw[48:50])
self.sidebar_offset, = struct.unpack('>H', raw[50:52])
self.last_data_offset, = struct.unpack('>H', raw[52:54])
self.num_text_pages = self.non_text_offset - 1
self.num_image_pages = self.metadata_offset - self.image_data_offset
class Reader132(FormatReader):
def __init__(self, header, stream, log, options):
self.log = log
self.encoding = options.input_encoding
self.log.debug('132 byte header version found.')
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
self.header_record = HeaderRecord(self.section_data(0))
if self.header_record.compression not in (2, 10):
if self.header_record.compression in (260, 272):
raise DRMError('eReader DRM is not supported.')
else:
raise EreaderError('Unknown book compression %i.' % self.header_record.compression)
from calibre.ebooks.metadata.pdb import get_metadata
self.mi = get_metadata(stream, False)
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
if self.header_record.compression == 2:
from calibre.ebooks.compression.palmdoc import decompress_doc
return decompress_doc(self.section_data(number)).decode('cp1252' if self.encoding is None else self.encoding, 'replace')
if self.header_record.compression == 10:
return zlib.decompress(self.section_data(number)).decode('cp1252' if self.encoding is None else self.encoding, 'replace')
def get_image(self, number):
if number < self.header_record.image_data_offset or number > self.header_record.image_data_offset + self.header_record.num_image_pages - 1:
return 'empty', b''
data = self.section_data(number)
name = data[4:4 + 32].strip(b'\x00').decode(self.encoding or 'cp1252')
img = data[62:]
return name, img
def get_text_page(self, number):
'''
Only palmdoc and zlib compressed are supported. The text is
assumed to be encoded as Windows-1252. The encoding is part of
the eReader file spec and should always be this encoding.
'''
if not (1 <= number <= self.header_record.num_text_pages):
return ''
return self.decompress_text(number)
def extract_content(self, output_dir):
from calibre.ebooks.pml.pmlconverter import PML_HTMLizer, footnote_to_html, sidebar_to_html
output_dir = os.path.abspath(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
title = self.mi.title
if not isinstance(title, str):
title = title.decode('utf-8', 'replace')
html = '<html><head><title>%s</title></head><body>' % title
pml = ''
for i in range(1, self.header_record.num_text_pages + 1):
self.log.debug('Extracting text page %i' % i)
pml += self.get_text_page(i)
hizer = PML_HTMLizer()
html += hizer.parse_pml(pml, 'index.html')
toc = hizer.get_toc()
if self.header_record.footnote_count > 0:
html += '<br /><h1>%s</h1>' % _('Footnotes')
footnoteids = re.findall(
'\\w+(?=\x00)', self.section_data(self.header_record.footnote_offset).decode('cp1252' if self.encoding is None else self.encoding))
for fid, i in enumerate(range(self.header_record.footnote_offset + 1, self.header_record.footnote_offset + self.header_record.footnote_count)):
self.log.debug('Extracting footnote page %i' % i)
if fid < len(footnoteids):
fid = footnoteids[fid]
else:
fid = ''
html += footnote_to_html(fid, self.decompress_text(i))
if self.header_record.sidebar_count > 0:
html += '<br /><h1>%s</h1>' % _('Sidebar')
sidebarids = re.findall(
'\\w+(?=\x00)', self.section_data(self.header_record.sidebar_offset).decode('cp1252' if self.encoding is None else self.encoding))
for sid, i in enumerate(range(self.header_record.sidebar_offset + 1, self.header_record.sidebar_offset + self.header_record.sidebar_count)):
self.log.debug('Extracting sidebar page %i' % i)
if sid < len(sidebarids):
sid = sidebarids[sid]
else:
sid = ''
html += sidebar_to_html(sid, self.decompress_text(i))
html += '</body></html>'
with CurrentDir(output_dir):
with open('index.html', 'wb') as index:
self.log.debug('Writing text to index.html')
index.write(html.encode('utf-8'))
if not os.path.exists(os.path.join(output_dir, 'images/')):
os.makedirs(os.path.join(output_dir, 'images/'))
images = []
with CurrentDir(os.path.join(output_dir, 'images/')):
for i in range(0, self.header_record.num_image_pages):
name, img = self.get_image(self.header_record.image_data_offset + i)
images.append(name)
with open(name, 'wb') as imgf:
self.log.debug('Writing image %s to images/' % name)
imgf.write(img)
opf_path = self.create_opf(output_dir, images, toc)
return opf_path
def create_opf(self, output_dir, images, toc):
with CurrentDir(output_dir):
if 'cover.png' in images:
self.mi.cover = os.path.join('images', 'cover.png')
opf = OPFCreator(output_dir, self.mi)
manifest = [('index.html', None)]
for i in images:
manifest.append((os.path.join('images', i), None))
opf.create_manifest(manifest)
opf.create_spine(['index.html'])
opf.set_toc(toc)
with open('metadata.opf', 'wb') as opffile:
with open('toc.ncx', 'wb') as tocfile:
opf.render(opffile, tocfile, 'toc.ncx')
return os.path.join(output_dir, 'metadata.opf')
def dump_pml(self):
'''
This is primarily used for debugging and 3rd party tools to
get the plm markup that comprises the text in the file.
'''
pml = ''
for i in range(1, self.header_record.num_text_pages + 1):
pml += self.get_text_page(i)
return pml
def dump_images(self, output_dir):
'''
This is primarily used for debugging and 3rd party tools to
get the images in the file.
'''
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with CurrentDir(output_dir):
for i in range(0, self.header_record.num_image_pages):
name, img = self.get_image(self.header_record.image_data_offset + i)
with open(name, 'wb') as imgf:
imgf.write(img)
| 8,809 | Python | .py | 173 | 40.514451 | 155 | 0.608169 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,336 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/reader.py | '''
Read content from ereader pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.pdb.ereader import EreaderError
from calibre.ebooks.pdb.ereader.reader132 import Reader132
from calibre.ebooks.pdb.ereader.reader202 import Reader202
from calibre.ebooks.pdb.formatreader import FormatReader
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
record0_size = len(header.section_data(0))
if record0_size == 132:
self.reader = Reader132(header, stream, log, options)
elif record0_size in (116, 202):
self.reader = Reader202(header, stream, log, options)
else:
raise EreaderError('Size mismatch. eReader header record size %s KB is not supported.' % record0_size)
def extract_content(self, output_dir):
return self.reader.extract_content(output_dir)
def dump_pml(self):
return self.reader.dump_pml()
def dump_images(self, out_dir):
return self.reader.dump_images(out_dir)
| 1,115 | Python | .py | 25 | 38.92 | 114 | 0.704903 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,337 | reader202.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/ereader/reader202.py | '''
Read content from ereader pdb file with a 116 and 202 byte header created by Makebook.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import struct
from calibre import CurrentDir
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.pdb.ereader import EreaderError
from calibre.ebooks.pdb.formatreader import FormatReader
from polyglot.builtins import as_unicode
class HeaderRecord:
'''
The first record in the file is always the header record. It holds
information related to the location of text, images, and so on
in the file. This is used in conjunction with the sections
defined in the file header.
'''
def __init__(self, raw):
self.version, = struct.unpack('>H', raw[0:2])
self.non_text_offset, = struct.unpack('>H', raw[8:10])
self.num_text_pages = self.non_text_offset - 1
class Reader202(FormatReader):
def __init__(self, header, stream, log, options):
self.log = log
self.encoding = options.input_encoding
self.log.debug('202 byte header version found.')
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
self.header_record = HeaderRecord(self.section_data(0))
if self.header_record.version not in (2, 4):
raise EreaderError('Unknown book version %i.' % self.header_record.version)
from calibre.ebooks.metadata.pdb import get_metadata
self.mi = get_metadata(stream, False)
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
from calibre.ebooks.compression.palmdoc import decompress_doc
data = bytearray(self.section_data(number))
data = bytes(bytearray(x ^ 0xA5 for x in data))
return decompress_doc(data).decode(self.encoding or 'cp1252', 'replace')
def get_image(self, number):
name = None
img = None
data = self.section_data(number)
if data.startswith(b'PNG'):
name = data[4:4 + 32].strip(b'\x00')
img = data[62:]
return name, img
def get_text_page(self, number):
'''
Only palmdoc compression is supported. The text is xored with 0xA5 and
assumed to be encoded as Windows-1252. The encoding is part of
the eReader file spec and should always be this encoding.
'''
if not (1 <= number <= self.header_record.num_text_pages):
return ''
return self.decompress_text(number)
def extract_content(self, output_dir):
from calibre.ebooks.pml.pmlconverter import pml_to_html
output_dir = os.path.abspath(output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
pml = ''
for i in range(1, self.header_record.num_text_pages + 1):
self.log.debug('Extracting text page %i' % i)
pml += self.get_text_page(i)
title = self.mi.title
if not isinstance(title, str):
title = title.decode('utf-8', 'replace')
html = '<html><head><title>%s</title></head><body>%s</body></html>' % \
(title, pml_to_html(pml))
with CurrentDir(output_dir):
with open('index.html', 'wb') as index:
self.log.debug('Writing text to index.html')
index.write(html.encode('utf-8'))
if not os.path.exists(os.path.join(output_dir, 'images/')):
os.makedirs(os.path.join(output_dir, 'images/'))
images = []
with CurrentDir(os.path.join(output_dir, 'images/')):
for i in range(self.header_record.non_text_offset, len(self.sections)):
name, img = self.get_image(i)
if name:
name = as_unicode(name)
images.append(name)
with open(name, 'wb') as imgf:
self.log.debug('Writing image %s to images/' % name)
imgf.write(img)
opf_path = self.create_opf(output_dir, images)
return opf_path
def create_opf(self, output_dir, images):
with CurrentDir(output_dir):
opf = OPFCreator(output_dir, self.mi)
manifest = [('index.html', None)]
for i in images:
manifest.append((os.path.join('images/', i), None))
opf.create_manifest(manifest)
opf.create_spine(['index.html'])
with open('metadata.opf', 'wb') as opffile:
opf.render(opffile)
return os.path.join(output_dir, 'metadata.opf')
def dump_pml(self):
'''
This is primarily used for debugging and 3rd party tools to
get the plm markup that comprises the text in the file.
'''
pml = ''
for i in range(1, self.header_record.num_text_pages + 1):
pml += self.get_text_page(i)
return pml
def dump_images(self, output_dir):
'''
This is primarily used for debugging and 3rd party tools to
get the images in the file.
'''
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with CurrentDir(output_dir):
for i in range(0, self.header_record.num_image_pages):
name, img = self.get_image(self.header_record.image_data_offset + i)
with open(name, 'wb') as imgf:
imgf.write(img)
| 5,578 | Python | .py | 125 | 34.912 | 87 | 0.61105 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,338 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/haodoo/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 149 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,339 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/pdb/haodoo/reader.py | '''
Read content from Haodoo.net pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2012, Kan-Ru Chen <kanru@kanru.info>'
__docformat__ = 'restructuredtext en'
import os
import struct
from calibre import prepare_string_for_xml
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.pdb.formatreader import FormatReader
from calibre.ebooks.txt.processor import HTML_TEMPLATE, opf_writer
BPDB_IDENT = 'BOOKMTIT'
UPDB_IDENT = 'BOOKMTIU'
punct_table = {
"︵": "(",
"︶": ")",
"︷": "{",
"︸": "}",
"︹": "〔",
"︺": "〕",
"︻": "【",
"︼": "】",
"︗": "〖",
"︘": "〗",
"﹇": "[]",
"﹈": "[]",
"︽": "《",
"︾": "》",
"︿": "〈",
"﹀": "〉",
"﹁": "「",
"﹂": "」",
"﹃": "『",
"﹄": "』",
"|": "—",
"︙": "…",
"ⸯ": "~",
"│": "…",
"¦": "…",
" ": " ",
}
def fix_punct(line):
for (key, value) in punct_table.items():
line = line.replace(key, value)
return line
class LegacyHeaderRecord:
def __init__(self, raw):
fields = raw.lstrip().replace(b'\x1b\x1b\x1b', b'\x1b').split(b'\x1b')
self.title = fix_punct(fields[0].decode('cp950', 'replace'))
self.num_records = int(fields[1])
self.chapter_titles = list(map(
lambda x: fix_punct(x.decode('cp950', 'replace').rstrip('\x00')),
fields[2:]))
class UnicodeHeaderRecord:
def __init__(self, raw):
fields = raw.lstrip().replace(b'\x1b\x00\x1b\x00\x1b\x00',
b'\x1b\x00').split(b'\x1b\x00')
self.title = fix_punct(fields[0].decode('utf_16_le', 'ignore'))
self.num_records = int(fields[1])
self.chapter_titles = list(map(
lambda x: fix_punct(x.decode('utf_16_le', 'replace').rstrip('\x00')),
fields[2].split(b'\r\x00\n\x00')))
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.stream = stream
self.log = log
self.sections = []
for i in range(header.num_sections):
self.sections.append(header.section_data(i))
if header.ident == BPDB_IDENT:
self.header_record = LegacyHeaderRecord(self.section_data(0))
self.encoding = 'cp950'
else:
self.header_record = UnicodeHeaderRecord(self.section_data(0))
self.encoding = 'utf_16_le'
def author(self):
self.stream.seek(35)
version = struct.unpack('>b', self.stream.read(1))[0]
if version == 2:
self.stream.seek(0)
author = self.stream.read(35).rstrip(b'\x00').decode(self.encoding, 'replace')
return author
else:
return 'Unknown'
def get_metadata(self):
mi = MetaInformation(self.header_record.title,
[self.author()])
mi.language = 'zh-tw'
return mi
def section_data(self, number):
return self.sections[number]
def decompress_text(self, number):
return self.section_data(number).decode(self.encoding,
'replace').rstrip('\x00')
def extract_content(self, output_dir):
txt = ''
self.log.info('Decompressing text...')
for i in range(1, self.header_record.num_records + 1):
self.log.debug('\tDecompressing text section %i' % i)
title = self.header_record.chapter_titles[i-1]
lines = []
title_added = False
for line in self.decompress_text(i).splitlines():
line = fix_punct(line)
line = line.strip()
if not title_added and title in line:
line = '<h1 class="chapter">' + line + '</h1>\n'
title_added = True
else:
line = prepare_string_for_xml(line)
lines.append('<p>%s</p>' % line)
if not title_added:
lines.insert(0, '<h1 class="chapter">' + title + '</h1>\n')
txt += '\n'.join(lines)
self.log.info('Converting text to OEB...')
html = HTML_TEMPLATE % (self.header_record.title, txt)
with open(os.path.join(output_dir, 'index.html'), 'wb') as index:
index.write(html.encode('utf-8'))
mi = self.get_metadata()
manifest = [('index.html', None)]
spine = ['index.html']
opf_writer(output_dir, 'metadata.opf', manifest, spine, mi)
return os.path.join(output_dir, 'metadata.opf')
| 4,593 | Python | .py | 124 | 27.717742 | 90 | 0.541071 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,340 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/azw4/reader.py | '''
Read content from azw4 file.
azw4 is essentially a PDF stuffed into a MOBI container.
'''
__license__ = 'GPL v3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import re
from calibre.ebooks.pdb.formatreader import FormatReader
def unwrap(stream, output_path):
raw_data = stream.read()
m = re.search(br'%PDF.+%%EOF', raw_data, flags=re.DOTALL)
if m is None:
raise ValueError('No embedded PDF found in AZW4 file')
with open(output_path, 'wb') as f:
f.write(m.group())
class Reader(FormatReader):
def __init__(self, header, stream, log, options):
self.header = header
self.stream = stream
self.log = log
self.options = options
def extract_content(self, output_dir):
self.log.info('Extracting PDF from AZW4 Container...')
self.stream.seek(0)
raw_data = self.stream.read()
data = b''
mo = re.search(br'%PDF.+%%EOF', raw_data, flags=re.DOTALL)
if mo:
data = mo.group()
pdf_n = os.path.join(os.getcwd(), 'tmp.pdf')
with open(pdf_n, 'wb') as pdf:
pdf.write(data)
from calibre.customize.ui import plugin_for_input_format
pdf_plugin = plugin_for_input_format('pdf')
for opt in pdf_plugin.options:
if not hasattr(self.options, opt.option.name):
setattr(self.options, opt.option.name, opt.recommended_value)
return pdf_plugin.convert(open(pdf_n, 'rb'), self.options, 'pdf', self.log, {})
| 1,579 | Python | .py | 40 | 32.6 | 87 | 0.633858 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,341 | pmlml.py | kovidgoyal_calibre/src/calibre/ebooks/pml/pmlml.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Transform OEB content into PML markup
'''
import re
from lxml import etree
from calibre.ebooks.pdb.ereader import image_name
from calibre.ebooks.pml import unipmlcode
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import string_or_bytes
TAG_MAP = {
'b' : 'B',
'strong' : 'B',
'i' : 'i',
'small' : 'k',
'sub' : 'Sb',
'sup' : 'Sp',
'big' : 'l',
'del' : 'o',
'h1' : 'x',
'h2' : 'X0',
'h3' : 'X1',
'h4' : 'X2',
'h5' : 'X3',
'h6' : 'X4',
'!--' : 'v',
}
STYLES = [
('font-weight', {'bold' : 'B', 'bolder' : 'B'}),
('font-style', {'italic' : 'i'}),
('text-decoration', {'underline' : 'u'}),
('text-align', {'right' : 'r', 'center' : 'c'}),
]
BLOCK_TAGS = [
'p',
'div',
]
BLOCK_STYLES = [
'block',
]
LINK_TAGS = [
'a',
]
IMAGE_TAGS = [
'img',
]
SEPARATE_TAGS = [
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'img',
'li',
'tr',
]
class PMLMLizer:
def __init__(self, log):
self.log = log
self.image_hrefs = {}
self.link_hrefs = {}
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to PML markup...')
self.oeb_book = oeb_book
self.opts = opts
# This is used for adding \CX tags chapter markers. This is separate
# from the optional inline toc.
self.toc = {}
self.create_flat_toc(self.oeb_book.toc)
return self.pmlmlize_spine()
def create_flat_toc(self, nodes, level=0):
for item in nodes:
href, mid, id = item.href.partition('#')
self.get_anchor_id(href, id)
if not self.toc.get(href, None):
self.toc[href] = {}
self.toc[href][id] = (item.title, level)
self.create_flat_toc(item.nodes, level + 1)
def pmlmlize_spine(self):
self.image_hrefs = {}
self.link_hrefs = {}
output = ['']
output.append(self.get_cover_page())
output.append(self.get_text())
output = ''.join(output)
output = self.clean_text(output)
return output
def get_cover_page(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
output = ''
if 'cover' in self.oeb_book.guide:
output += '\\m="cover.png"\n'
self.image_hrefs[self.oeb_book.guide['cover'].href] = 'cover.png'
if 'titlepage' in self.oeb_book.guide:
self.log.debug('Generating title page...')
href = self.oeb_book.guide['titlepage'].href
item = self.oeb_book.manifest.hrefs[href]
if item.spine_position is None:
stylizer = Stylizer(item.data, item.href, self.oeb_book,
self.opts, self.opts.output_profile)
output += ''.join(self.dump_text(item.data.find(XHTML('body')), stylizer, item))
return output
def get_text(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
text = ['']
for item in self.oeb_book.spine:
self.log.debug('Converting %s to PML markup...' % item.href)
content = etree.tostring(item.data, encoding='unicode')
content = self.prepare_text(content)
content = safe_xml_fromstring(content)
stylizer = Stylizer(content, item.href, self.oeb_book, self.opts, self.opts.output_profile)
text.append(self.add_page_anchor(item))
text += self.dump_text(content.find(XHTML('body')), stylizer, item)
return ''.join(text)
def add_page_anchor(self, page):
return self.get_anchor(page, '')
def get_anchor_id(self, href, aid):
aid = f'{href}#{aid}'
if aid not in self.link_hrefs.keys():
self.link_hrefs[aid] = 'calibre_link-%s' % len(self.link_hrefs.keys())
aid = self.link_hrefs[aid]
return aid
def get_anchor(self, page, aid):
aid = self.get_anchor_id(page.href, aid)
return r'\Q="%s"' % aid
def remove_newlines(self, text):
text = text.replace('\r\n', ' ')
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
return text
def prepare_string_for_pml(self, text):
text = self.remove_newlines(text)
# Replace \ with \\ so \ in the text is not interpreted as
# a pml code.
text = text.replace('\\', '\\\\')
# Replace sequences of \\c \\c with pml sequences denoting
# empty lines.
text = text.replace('\\\\c \\\\c', '\\c \n\\c\n')
return text
def prepare_text(self, text):
# Replace empty paragraphs with \c pml codes used to denote empty lines.
text = re.sub(r'(?<=</p>)\s*<p[^>]*>[\xc2\xa0\s]*</p>', r'\\c\n\\c', text)
return text
def clean_text(self, text):
# Remove excessive \p tags
text = re.sub(r'\\p\s*\\p', '', text)
# Remove anchors that do not have links
anchors = set(re.findall(r'(?<=\\Q=").+?(?=")', text))
links = set(re.findall(r'(?<=\\q="#).+?(?=")', text))
for unused in anchors.difference(links):
text = text.replace(r'\Q="%s"' % unused, '')
# Remove \Cn tags that are within \x and \Xn tags
text = re.sub(r'(?msu)(?P<t>\\(x|X[0-4]))(?P<a>.*?)(?P<c>\\C[0-4]\s*=\s*"[^"]*")(?P<b>.*?)(?P=t)', r'\g<t>\g<a>\g<b>\g<t>', text)
# Replace bad characters.
text = text.replace('\xc2', '')
text = text.replace('\xa0', ' ')
# Turn all characters that cannot be represented by themself into their
# PML code equivalent
text = re.sub('[^\x00-\x7f]', lambda x: unipmlcode(x.group()), text)
# Remove excess spaces at beginning and end of lines
text = re.sub('(?m)^[ ]+', '', text)
text = re.sub('(?m)[ ]+$', '', text)
# Remove excessive spaces
text = re.sub('[ ]{2,}', ' ', text)
# Condense excessive \c empty line sequences.
text = re.sub(r'(\\c\s*\\c\s*){2,}', r'\\c \n\\c\n', text)
# Remove excessive newlines.
text = re.sub('\n[ ]+\n', '\n\n', text)
if self.opts.remove_paragraph_spacing:
text = re.sub('\n{2,}', '\n', text)
# Only indent lines that don't have special formatting
text = re.sub('(?imu)^(?P<text>.+)$', lambda mo: mo.group('text')
if re.search(r'\\[XxCmrctTp]', mo.group('text')) else ' %s' % mo.group('text'), text)
else:
text = re.sub('\n{3,}', '\n\n', text)
return text
def dump_text(self, elem, stylizer, page, tag_stack=[]):
from calibre.ebooks.oeb.base import XHTML_NS, barename, namespace
if not isinstance(elem.tag, string_or_bytes) or namespace(elem.tag) != XHTML_NS:
p = elem.getparent()
if p is not None and isinstance(p.tag, string_or_bytes) and namespace(p.tag) == XHTML_NS \
and elem.tail:
return [elem.tail]
return []
text = []
tags = []
style = stylizer.style(elem)
if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \
or style['visibility'] == 'hidden':
if hasattr(elem, 'tail') and elem.tail:
return [elem.tail]
return []
tag = barename(elem.tag)
# Are we in a paragraph block?
if tag in BLOCK_TAGS or style['display'] in BLOCK_STYLES:
tags.append('block')
# Process tags that need special processing and that do not have inner
# text. Usually these require an argument.
if tag in IMAGE_TAGS:
if elem.attrib.get('src', None):
if page.abshref(elem.attrib['src']) not in self.image_hrefs.keys():
if len(self.image_hrefs.keys()) == 0:
self.image_hrefs[page.abshref(elem.attrib['src'])] = 'cover.png'
else:
self.image_hrefs[page.abshref(elem.attrib['src'])] = image_name(
'%s.png' % len(self.image_hrefs.keys()), self.image_hrefs.keys()).strip('\x00')
text.append('\\m="%s"' % self.image_hrefs[page.abshref(elem.attrib['src'])])
elif tag == 'hr':
w = r'\w'
width = elem.get('width')
if width:
if not width.endswith('%'):
width += '%'
w += '="%s"' % width
else:
w += '="50%"'
text.append(w)
elif tag == 'br':
text.append('\n\\c \n\\c\n')
# TOC markers.
toc_name = elem.attrib.get('name', None)
toc_id = elem.attrib.get('id', None)
# Only write the TOC marker if the tag isn't a heading and we aren't in one.
if (toc_id or toc_name) and tag not in ('h1', 'h2','h3','h4','h5','h6') and \
'x' not in tag_stack+tags and 'X0' not in tag_stack+tags and \
'X1' not in tag_stack+tags and 'X2' not in tag_stack+tags and \
'X3' not in tag_stack+tags and 'X4' not in tag_stack+tags:
toc_page = page.href
if self.toc.get(toc_page, None):
for toc_x in (toc_name, toc_id):
toc_title, toc_depth = self.toc[toc_page].get(toc_x, (None, 0))
if toc_title:
toc_depth = max(min(toc_depth, 4), 0)
text.append(fr'\C{toc_depth}="{toc_title}"')
# Process style information that needs holds a single tag.
# Commented out because every page in an OEB book starts with this style.
if style['page-break-before'] == 'always':
text.append(r'\p')
# Process basic PML tags.
pml_tag = TAG_MAP.get(tag, None)
if pml_tag and pml_tag not in tag_stack+tags:
text.append(r'\%s' % pml_tag)
tags.append(pml_tag)
# Special processing of tags that require an argument.
# Anchors links
if tag in LINK_TAGS and 'q' not in tag_stack+tags:
href = elem.get('href')
if href:
href = page.abshref(href)
if '://' not in href:
if '#' not in href:
href += '#'
if href not in self.link_hrefs.keys():
self.link_hrefs[href] = 'calibre_link-%s' % len(self.link_hrefs.keys())
href = '#%s' % self.link_hrefs[href]
text.append(r'\q="%s"' % href)
tags.append('q')
# Anchor ids
id_name = elem.get('id')
name_name = elem.get('name')
for name_x in (id_name, name_name):
if name_x:
text.append(self.get_anchor(page, name_x))
# Processes style information
for s in STYLES:
style_tag = s[1].get(style[s[0]], None)
if style_tag and style_tag not in tag_stack+tags:
text.append(r'\%s' % style_tag)
tags.append(style_tag)
# margin left
try:
mms = int(float(style['margin-left']) * 100 / style.height)
if mms:
text.append(r'\T="%s%%"' % mms)
except:
pass
# Soft scene breaks.
try:
ems = int(round((float(style.marginTop) / style.fontSize) - 1))
if ems >= 1:
text.append('\n\\c \n\\c\n')
except:
pass
# Process text within this tag.
if hasattr(elem, 'text') and elem.text:
text.append(self.prepare_string_for_pml(elem.text))
# Process inner tags
for item in elem:
text += self.dump_text(item, stylizer, page, tag_stack+tags)
# Close opened tags.
tags.reverse()
text += self.close_tags(tags)
# if tag in SEPARATE_TAGS:
# text.append('\n\n')
if style['page-break-after'] == 'always':
text.append(r'\p')
# Process text after this tag but not within another.
if hasattr(elem, 'tail') and elem.tail:
text.append(self.prepare_string_for_pml(elem.tail))
return text
def close_tags(self, tags):
text = []
for tag in tags:
# block isn't a real tag we just use
# it to determine when we need to start
# a new text block.
if tag == 'block':
text.append('\n\n')
else:
# closing \c and \r need to be placed
# on the next line per PML spec.
if tag in ('c', 'r'):
text.append('\n\\%s' % tag)
else:
text.append(r'\%s' % tag)
return text
| 13,145 | Python | .py | 322 | 30.568323 | 137 | 0.522806 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,342 | pmlconverter.py | kovidgoyal_calibre/src/calibre/ebooks/pml/pmlconverter.py | '''
Convert pml markup to and from html
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import io
import os
import re
from copy import deepcopy
from calibre import my_unichr, prepare_string_for_xml
from calibre.ebooks.metadata.toc import TOC
class PML_HTMLizer:
STATES = [
'i',
'u',
'd',
'b',
'sp',
'sb',
'h1',
'h1c',
'h2',
'h3',
'h4',
'h5',
'h6',
'a',
'ra',
'c',
'r',
's',
'l',
'k',
'FN',
'SB',
]
STATES_VALUE_REQ = [
'a',
'FN',
'SB',
]
STATES_VALUE_REQ_2 = [
'ra',
]
STATES_CLOSE_VALUE_REQ = [
'FN',
'SB',
]
STATES_TAGS = {
'h1': ('<h1 style="page-break-before: always;">', '</h1>'),
'h1c': ('<h1>', '</h1>'),
'h2': ('<h2>', '</h2>'),
'h3': ('<h3>', '</h3>'),
'h4': ('<h4>', '</h4>'),
'h5': ('<h5>', '</h5>'),
'h6': ('<h6>', '</h6>'),
'sp': ('<sup>', '</sup>'),
'sb': ('<sub>', '</sub>'),
'a': ('<a href="#%s">', '</a>'),
'ra': ('<span id="r%s"></span><a href="#%s">', '</a>'),
'c': ('<div style="text-align: center; margin: auto;">', '</div>'),
'r': ('<div style="text-align: right;">', '</div>'),
't': ('<div style="margin-left: 5%;">', '</div>'),
'T': ('<div style="text-indent: %s;">', '</div>'),
'i': ('<span style="font-style: italic;">', '</span>'),
'u': ('<span style="text-decoration: underline;">', '</span>'),
'd': ('<span style="text-decoration: line-through;">', '</span>'),
'b': ('<span style="font-weight: bold;">', '</span>'),
'l': ('<span style="font-size: 150%;">', '</span>'),
'k': ('<span style="font-size: 75%; font-variant: small-caps;">', '</span>'),
'FN': ('<br /><br style="page-break-after: always;" /><div id="fn-%s"><p>', '</p><small><a href="#rfn-%s">return</a></small></div>'),
'SB': ('<br /><br style="page-break-after: always;" /><div id="sb-%s"><p>', '</p><small><a href="#rsb-%s">return</a></small></div>'),
}
CODE_STATES = {
'q': 'a',
'x': 'h1',
'X0': 'h2',
'X1': 'h3',
'X2': 'h4',
'X3': 'h5',
'X4': 'h6',
'Sp': 'sp',
'Sb': 'sb',
'c': 'c',
'r': 'r',
'i': 'i',
'I': 'i',
'u': 'u',
'o': 'd',
'b': 'b',
'B': 'b',
'l': 'l',
'k': 'k',
'Fn': 'ra',
'Sd': 'ra',
'FN': 'FN',
'SB': 'SB',
}
LINK_STATES = [
'a',
'ra',
]
BLOCK_STATES = [
'a',
'ra',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'sb',
'sp',
]
DIV_STATES = [
'c',
'r',
'FN',
'SB',
]
SPAN_STATES = [
'l',
'k',
'i',
'u',
'd',
'b',
]
NEW_LINE_EXCHANGE_STATES = {
'h1': 'h1c',
}
def __init__(self):
self.state = {}
# toc consists of a tuple
# (level, (href, id, text))
self.toc = []
self.file_name = ''
def prepare_pml(self, pml):
# Give Chapters the form \\*='text'text\\*. This is used for generating
# the TOC later.
pml = re.sub(r'(?msu)(?P<c>\\x)(?P<text>.*?)(?P=c)', lambda match: '%s="%s"%s%s' %
(match.group('c'), self.strip_pml(match.group('text')), match.group('text'), match.group('c')), pml)
pml = re.sub(r'(?msu)(?P<c>\\X[0-4])(?P<text>.*?)(?P=c)', lambda match: '%s="%s"%s%s' %
(match.group('c'), self.strip_pml(match.group('text')), match.group('text'), match.group('c')), pml)
# Remove comments
pml = re.sub(r'(?mus)\\v(?P<text>.*?)\\v', '', pml)
# Remove extra white spaces.
pml = re.sub(r'(?mus)[ ]{2,}', ' ', pml)
pml = re.sub(r'(?mus)^[ ]*(?=.)', '', pml)
pml = re.sub(r'(?mus)(?<=.)[ ]*$', '', pml)
pml = re.sub(r'(?mus)^[ ]*$', '', pml)
# Footnotes and Sidebars.
pml = re.sub(r'(?mus)<footnote\s+id="(?P<target>.+?)">\s*(?P<text>.*?)\s*</footnote>', lambda match: '\\FN="%s"%s\\FN' %
(match.group('target'), match.group('text')) if match.group('text') else '', pml)
pml = re.sub(r'(?mus)<sidebar\s+id="(?P<target>.+?)">\s*(?P<text>.*?)\s*</sidebar>', lambda match: '\\SB="%s"%s\\SB' %
(match.group('target'), match.group('text')) if match.group('text') else '', pml)
# Convert &'s into entities so & in the text doesn't get turned into
# &. It will display as &
pml = pml.replace('&', '&')
# Replace \\a and \\U with either the unicode character or the entity.
pml = re.sub(r'\\a(?P<num>\d{3})', lambda match: '&#%s;' % match.group('num'), pml)
pml = re.sub(r'\\U(?P<num>[0-9a-f]{4})', lambda match: '%s' % my_unichr(int(match.group('num'), 16)), pml)
pml = prepare_string_for_xml(pml)
return pml
def strip_pml(self, pml):
pml = re.sub(r'\\C\d=".*"', '', pml)
pml = re.sub(r'\\Fn=".*"', '', pml)
pml = re.sub(r'\\Sd=".*"', '', pml)
pml = re.sub(r'\\.=".*"', '', pml)
pml = re.sub(r'\\X\d', '', pml)
pml = re.sub(r'\\S[pbd]', '', pml)
pml = re.sub(r'\\Fn', '', pml)
pml = re.sub(r'\\a\d\d\d', '', pml)
pml = re.sub(r'\\U\d\d\d\d', '', pml)
pml = re.sub(r'\\.', '', pml)
pml = pml.replace('\r\n', ' ')
pml = pml.replace('\n', ' ')
pml = pml.replace('\r', ' ')
pml = pml.strip()
return pml
def cleanup_html(self, html):
old = html
html = self.cleanup_html_remove_redundant(html)
while html != old:
old = html
html = self.cleanup_html_remove_redundant(html)
html = re.sub(r'(?imu)^\s*', '', html)
return html
def cleanup_html_remove_redundant(self, html):
for key in self.STATES_TAGS:
open, close = self.STATES_TAGS[key]
if key in self.STATES_VALUE_REQ:
html = re.sub(r'(?u){}\s*{}'.format(open % '.*?', close), '', html)
else:
html = re.sub(fr'(?u){open}\s*{close}', '', html)
html = re.sub(r'(?imu)<p>\s*</p>', '', html)
return html
def start_line(self):
start = ''
state = deepcopy(self.state)
div = []
span = []
other = []
for key, val in state.items():
if key in self.NEW_LINE_EXCHANGE_STATES and val[0]:
state[self.NEW_LINE_EXCHANGE_STATES[key]] = val
state[key] = [False, '']
for key, val in state.items():
if val[0]:
if key in self.DIV_STATES:
div.append((key, val[1]))
elif key in self.SPAN_STATES:
span.append((key, val[1]))
else:
other.append((key, val[1]))
for key, val in other+div+span:
if key in self.STATES_VALUE_REQ:
start += self.STATES_TAGS[key][0] % val
elif key in self.STATES_VALUE_REQ_2:
start += self.STATES_TAGS[key][0] % (val, val)
else:
start += self.STATES_TAGS[key][0]
return '<p>%s' % start
def end_line(self):
end = ''
div = []
span = []
other = []
for key, val in self.state.items():
if val[0]:
if key in self.DIV_STATES:
div.append(key)
elif key in self.SPAN_STATES:
span.append(key)
else:
other.append(key)
for key in span+div+other:
if key in self.STATES_CLOSE_VALUE_REQ:
end += self.STATES_TAGS[key][1] % self.state[key][1]
else:
end += self.STATES_TAGS[key][1]
return '%s</p>' % end
def process_code(self, code, stream, pre=''):
text = ''
code = self.CODE_STATES.get(code, None)
if not code:
return text
if code in self.DIV_STATES:
# Ignore multilple T's on the same line. They do not have a closing
# code. They get closed at the end of the line.
if code == 'T' and self.state['T'][0]:
self.code_value(stream)
return text
text = self.process_code_div(code, stream)
elif code in self.SPAN_STATES:
text = self.process_code_span(code, stream)
elif code in self.BLOCK_STATES:
text = self.process_code_block(code, stream, pre)
else:
text = self.process_code_simple(code, stream)
self.state[code][0] = not self.state[code][0]
return text
def process_code_simple(self, code, stream):
text = ''
if self.state[code][0]:
if code in self.STATES_CLOSE_VALUE_REQ:
text = self.STATES_TAGS[code][1] % self.state[code][1]
else:
text = self.STATES_TAGS[code][1]
else:
if code in self.STATES_VALUE_REQ or code in self.STATES_VALUE_REQ_2:
val = self.code_value(stream)
if code in self.STATES_VALUE_REQ:
text = self.STATES_TAGS[code][0] % val
else:
text = self.STATES_TAGS[code][0] % (val, val)
self.state[code][1] = val
else:
text = self.STATES_TAGS[code][0]
return text
def process_code_div(self, code, stream):
text = ''
# Close code.
if self.state[code][0]:
# Close all.
for c in self.SPAN_STATES+self.DIV_STATES:
if self.state[c][0]:
if c in self.STATES_CLOSE_VALUE_REQ:
text += self.STATES_TAGS[c][1] % self.state[c][1]
else:
text += self.STATES_TAGS[c][1]
# Reopen the based on state.
for c in self.DIV_STATES+self.SPAN_STATES:
if code == c:
continue
if self.state[c][0]:
if c in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[self.CODE_STATES[c]][0] % self.state[c][1]
elif c in self.STATES_VALUE_REQ_2:
text += self.STATES_TAGS[self.CODE_STATES[c]][0] % (self.state[c][1], self.state[c][1])
else:
text += self.STATES_TAGS[c][0]
# Open code.
else:
# Close all spans.
for c in self.SPAN_STATES:
if self.state[c][0]:
if c in self.STATES_CLOSE_VALUE_REQ:
text += self.STATES_TAGS[c][1] % self.state[c][1]
else:
text += self.STATES_TAGS[c][1]
# Process the code
if code in self.STATES_VALUE_REQ or code in self.STATES_VALUE_REQ_2:
val = self.code_value(stream)
if code in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[code][0] % val
else:
text += self.STATES_TAGS[code][0] % (val, val)
self.state[code][1] = val
else:
text += self.STATES_TAGS[code][0]
# Re-open all spans based on state
for c in self.SPAN_STATES:
if self.state[c][0]:
if c in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[self.CODE_STATES[c]][0] % self.state[c][1]
elif c in self.STATES_VALUE_REQ_2:
text += self.STATES_TAGS[self.CODE_STATES[c]][0] % (self.state[c][1], self.state[c][1])
else:
text += self.STATES_TAGS[c][0]
return text
def process_code_span(self, code, stream):
text = ''
# Close code.
if self.state[code][0]:
# Close all spans
for c in self.SPAN_STATES:
if self.state[c][0]:
if c in self.STATES_CLOSE_VALUE_REQ:
text += self.STATES_TAGS[c][1] % self.state[c][1]
else:
text += self.STATES_TAGS[c][1]
# Re-open the spans based on state except for code which will be
# left closed.
for c in self.SPAN_STATES:
if code == c:
continue
if self.state[c][0]:
if c in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[code][0] % self.state[c][1]
elif c in self.STATES_VALUE_REQ_2:
text += self.STATES_TAGS[code][0] % (self.state[c][1], self.state[c][1])
else:
text += self.STATES_TAGS[c][0]
# Open code.
else:
if code in self.STATES_VALUE_REQ or code in self.STATES_VALUE_REQ_2:
val = self.code_value(stream)
if code in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[code][0] % val
else:
text += self.STATES_TAGS[code][0] % (val, val)
self.state[code][1] = val
else:
text += self.STATES_TAGS[code][0]
return text
def process_code_block(self, code, stream, pre=''):
text = ''
# Close all spans
for c in self.SPAN_STATES:
if self.state[c][0]:
if c in self.STATES_CLOSE_VALUE_REQ:
text += self.STATES_TAGS[c][1] % self.state[c][1]
else:
text += self.STATES_TAGS[c][1]
# Process the code
if self.state[code][0]:
# Close tag
if code in self.STATES_CLOSE_VALUE_REQ:
text += self.STATES_TAGS[code][1] % self.state[code][1]
else:
text += self.STATES_TAGS[code][1]
else:
# Open tag
if code in self.STATES_VALUE_REQ or code in self.STATES_VALUE_REQ_2:
val = self.code_value(stream)
if code in self.LINK_STATES:
val = val.lstrip('#')
if pre:
val = f'{pre}-{val}'
if code in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[code][0] % val
else:
text += self.STATES_TAGS[code][0] % (val, val)
self.state[code][1] = val
else:
text += self.STATES_TAGS[code][0]
# Re-open all spans if code was a div based on state
for c in self.SPAN_STATES:
if self.state[c][0]:
if c in self.STATES_VALUE_REQ:
text += self.STATES_TAGS[code][0] % self.state[c][1]
elif c in self.STATES_VALUE_REQ_2:
text += self.STATES_TAGS[code][0] % (self.state[c][1], self.state[c][1])
else:
text += self.STATES_TAGS[c][0]
return text
def code_value(self, stream):
value = ''
# state 0 is before =
# state 1 is before the first "
# state 2 is before the second "
# state 3 is after the second "
state = 0
loc = stream.tell()
c = stream.read(1)
while c != '':
if state == 0:
if c == '=':
state = 1
elif c != ' ':
# A code that requires an argument should have = after the
# code but sometimes has spaces. If it has anything other
# than a space or = after the code then we can assume the
# markup is invalid. We will stop looking for the value
# and continue to hopefully not lose any data.
break
elif state == 1:
if c == '"':
state = 2
elif c != ' ':
# " should always follow = but we will allow for blank
# space after the =.
break
elif state == 2:
if c == '"':
state = 3
break
else:
value += c
c = stream.read(1)
if state != 3:
# Unable to complete the sequence to reterieve the value. Reset
# the stream to the location it started.
stream.seek(loc)
value = ''
return value.strip()
def parse_pml(self, pml, file_name=''):
pml = self.prepare_pml(pml)
output = []
self.state = {}
self.toc = []
self.file_name = file_name
# t: Are we in an open \t tag set?
# T: Are we in an open \T?
# st: Did the \t start the line?
# sT: Did the \T start the line?
# et: Did the \t end the line?
indent_state = {'t': False, 'T': False, 'st': False, 'sT': False, 'et': False}
basic_indent = False
adv_indent_val = ''
# Keep track of the number of empty lines
# between paragraphs. When we reach a set number
# we assume it's a soft scene break.
empty_count = 0
for s in self.STATES:
self.state[s] = [False, '']
for line in pml.splitlines():
parsed = []
empty = True
basic_indent = indent_state['t']
indent_state['T'] = False
# Determine if the \t starts the line or if we are
# in an open \t block.
if line.lstrip().startswith('\\t') or basic_indent:
basic_indent = True
indent_state['st'] = True
else:
indent_state['st'] = False
# Determine if the \T starts the line.
if line.lstrip().startswith('\\T'):
indent_state['sT'] = True
else:
indent_state['sT'] = False
# Determine if the \t ends the line.
if line.rstrip().endswith('\\t'):
indent_state['et'] = True
else:
indent_state['et'] = False
if isinstance(line, bytes):
line = line.decode('utf-8')
line = io.StringIO(line)
parsed.append(self.start_line())
c = line.read(1)
while c != '':
text = ''
if c == '\\':
c = line.read(1)
if c in 'qcriIuobBlk':
text = self.process_code(c, line)
elif c in 'FS':
l = line.read(1)
if f'{c}{l}' == 'Fn':
text = self.process_code('Fn', line, 'fn')
elif f'{c}{l}' == 'FN':
text = self.process_code('FN', line)
elif f'{c}{l}' == 'SB':
text = self.process_code('SB', line)
elif f'{c}{l}' == 'Sd':
text = self.process_code('Sd', line, 'sb')
elif c in 'xXC':
empty = False
# The PML was modified eariler so x and X put the text
# inside of ="" so we don't have do special processing
# for C.
t = ''
level = 0
if c in 'XC':
level = line.read(1)
id = 'pml_toc-%s' % len(self.toc)
value = self.code_value(line)
if c == 'x':
t = self.process_code(c, line)
elif c == 'X':
t = self.process_code(f'{c}{level}', line)
if not value or value == '':
text = t
else:
self.toc.append((level, (os.path.basename(self.file_name), id, value)))
text = f'{t}<span id="{id}"></span>'
elif c == 'm':
empty = False
src = self.code_value(line)
text = '<img src="images/%s" />' % src
elif c == 'Q':
empty = False
id = self.code_value(line)
text = '<span id="%s"></span>' % id
elif c == 'p':
empty = False
text = '<br /><br style="page-break-after: always;" />'
elif c == 'n':
pass
elif c == 'w':
empty = False
text = '<hr style="width: %s" />' % self.code_value(line)
elif c == 't':
indent_state['t'] = not indent_state['t']
elif c == 'T':
# Ensure we only store the value on the first T set for the line.
if not indent_state['T']:
adv_indent_val = self.code_value(line)
else:
# We detected a T previously on this line.
# Don't replace the first detected value.
self.code_value(line)
indent_state['T'] = True
elif c == '-':
empty = False
text = '­'
elif c == '\\':
empty = False
text = '\\'
else:
if c != ' ':
empty = False
text = c
parsed.append(text)
c = line.read(1)
if empty:
empty_count += 1
if empty_count == 2:
output.append('<p> </p>')
else:
empty_count = 0
text = self.end_line()
parsed.append(text)
# Basic indent will be set if the \t starts the line or
# if we are in a continuing \t block.
if basic_indent:
# if the \t started the line and either it ended the line or the \t
# block is still open use a left margin.
if indent_state['st'] and (indent_state['et'] or indent_state['t']):
parsed.insert(0, self.STATES_TAGS['t'][0])
parsed.append(self.STATES_TAGS['t'][1])
# Use a text indent instead of a margin.
# This handles cases such as:
# \tO\tne upon a time...
else:
parsed.insert(0, self.STATES_TAGS['T'][0] % '5%')
parsed.append(self.STATES_TAGS['T'][1])
# \t will override \T's on the line.
# We only handle \T's that started the line.
elif indent_state['T'] and indent_state['sT']:
parsed.insert(0, self.STATES_TAGS['T'][0] % adv_indent_val)
parsed.append(self.STATES_TAGS['T'][1])
indent_state['T'] = False
adv_indent_val = ''
output.append(''.join(parsed))
line.close()
output = self.cleanup_html('\n'.join(output))
return output
def get_toc(self):
'''
Toc can have up to 5 levels, 0 - 4 inclusive.
This function will add items to their appropriate
depth in the TOC tree. If the specified depth is
invalid (item would not have a valid parent) add
it to the next valid level above the specified
level.
'''
# Base toc object all items will be added to.
n_toc = TOC()
# Used to track nodes in the toc so we can add
# sub items to the appropriate place in tree.
t_l0 = None
t_l1 = None
t_l2 = None
t_l3 = None
for level, (href, id, text) in self.toc:
if level == '0':
t_l0 = n_toc.add_item(href, id, text)
t_l1 = None
t_l2 = None
t_l3 = None
elif level == '1':
if t_l0 is None:
t_l0 = n_toc
t_l1 = t_l0.add_item(href, id, text)
t_l2 = None
t_l3 = None
elif level == '2':
if t_l1 is None:
if t_l0 is None:
t_l1 = n_toc
else:
t_l1 = t_l0
t_l2 = t_l1.add_item(href, id, text)
t_l3 = None
elif level == '3':
if t_l2 is None:
if t_l1 is None:
if t_l0 is None:
t_l2 = n_toc
else:
t_l2 = t_l0
else:
t_l2 = t_l1
t_l3 = t_l2.add_item(href, id, text)
# Level 4.
# Anything above 4 is invalid but we will count
# it as level 4.
else:
if t_l3 is None:
if t_l2 is None:
if t_l1 is None:
if t_l0 is None:
t_l3 = n_toc
else:
t_l3 = t_l0
else:
t_l3 = t_l1
else:
t_l3 = t_l2
t_l3.add_item(href, id, text)
return n_toc
def pml_to_html(pml):
hizer = PML_HTMLizer()
return hizer.parse_pml(pml)
def footnote_sidebar_to_html(pre_id, id, pml):
id = id.strip('\x01')
if id.strip():
html = '<br /><br style="page-break-after: always;" /><div id="{}-{}">{}<small><a href="#r{}-{}">return</a></small></div>'.format(
pre_id, id, pml_to_html(pml), pre_id, id)
else:
html = '<br /><br style="page-break-after: always;" /><div>%s</div>' % pml_to_html(pml)
return html
def footnote_to_html(id, pml):
return footnote_sidebar_to_html('fn', id, pml)
def sidebar_to_html(id, pml):
return footnote_sidebar_to_html('sb', id, pml)
| 27,092 | Python | .py | 682 | 25.539589 | 141 | 0.430856 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,343 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pml/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
def r(*a):
return list(range(*a))
# Uncommon Characters supported by PML. \\a tag codes
A_CHARS = r(160, 256) + r(130, 136) + r(138, 141) + \
r(145, 152) + r(153, 157) + [159]
# Extended Unicode characters supported by PML
Latin_ExtendedA = r(0x0100, 0x0104) + [0x0105, 0x0107, 0x010C, 0x010D,
0x0112, 0x0113, 0x0115, 0x0117, 0x0119, 0x011B, 0x011D, 0x011F, 0x012A,
0x012B, 0x012D, 0x012F, 0x0131, 0x0141, 0x0142, 0x0144, 0x0148] + \
r(0x014B, 0x014E) + [0x014F, 0x0151, 0x0155] + r(0x0159, 0x015C) + \
[0x015F, 0x0163, 0x0169, 0x016B, 0x016D, 0x0177, 0x017A, 0x017D, 0x017E]
Latin_ExtendedB = [0x01BF, 0x01CE, 0x01D0, 0x01D2, 0x01D4, 0x01E1, 0x01E3,
0x01E7, 0x01EB, 0x01F0, 0x0207, 0x021D, 0x0227, 0x022F, 0x0233]
IPA_Extensions = [0x0251, 0x0251, 0x0254, 0x0259, 0x025C, 0x0265, 0x026A,
0x0272, 0x0283, 0x0289, 0x028A, 0x028C, 0x028F, 0x0292, 0x0294, 0x029C]
Spacing_Modifier_Letters = [0x02BE, 0x02BF, 0x02C7, 0x02C8, 0x02CC, 0x02D0,
0x02D8, 0x02D9]
Greek_and_Coptic = r(0x0391, 0x03A2) + r(0x03A3, 0x03AA) + \
r(0x03B1, 0x03CA) + [0x03D1, 0x03DD]
Hebrew = r(0x05D0, 0x05EB)
Latin_Extended_Additional = [0x1E0B, 0x1E0D, 0x1E17, 0x1E22, 0x1E24, 0x1E25,
0x1E2B, 0x1E33, 0x1E37, 0x1E41, 0x1E43, 0x1E45, 0x1E47, 0x1E53] + \
r(0x1E59, 0x1E5C) + [0x1E61, 0x1E63, 0x1E6B, 0x1E6D, 0x1E6F, 0x1E91,
0x1E93, 0x1E96, 0x1EA1, 0x1ECD, 0x1EF9]
General_Punctuation = [0x2011, 0x2038, 0x203D, 0x2042]
Arrows = [0x2190, 0x2192]
Mathematical_Operators = [0x2202, 0x221A, 0x221E, 0x2225, 0x222B, 0x2260,
0x2294, 0x2295, 0x22EE]
Enclosed_Alphanumerics = [0x24CA]
Miscellaneous_Symbols = r(0x261C, 0x2641) + r(0x2642, 0x2648) + \
r(0x2660, 0x2664) + r(0x266D, 0x2670)
Dingbats = [0x2713, 0x2720]
Private_Use_Area = r(0xE000, 0xE01D) + r(0xE01E, 0xE029) + \
r(0xE02A, 0xE052)
Alphabetic_Presentation_Forms = [0xFB02, 0xFB2A, 0xFB2B]
# \\U tag codes.
U_CHARS = Latin_ExtendedA + Latin_ExtendedB + IPA_Extensions + \
Spacing_Modifier_Letters + Greek_and_Coptic + Hebrew + \
Latin_Extended_Additional + General_Punctuation + Arrows + \
Mathematical_Operators + Enclosed_Alphanumerics + Miscellaneous_Symbols + \
Dingbats + Private_Use_Area + Alphabetic_Presentation_Forms
def unipmlcode(char):
try:
val = ord(char.encode('cp1252'))
if val in A_CHARS:
return '\\a%i' % val
except Exception:
pass
val = ord(char)
if val in U_CHARS:
return '\\U%04x'.upper() % val
else:
return '?'
| 2,639 | Python | .py | 56 | 43.267857 | 79 | 0.68932 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,344 | input.py | kovidgoyal_calibre/src/calibre/ebooks/comic/input.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Based on ideas from comiclrf created by FangornUK.
'''
import os
import time
import traceback
from calibre import extract, prints, walk
from calibre.constants import filesystem_encoding
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.cleantext import clean_ascii_chars
from calibre.utils.icu import numeric_sort_key
from calibre.utils.ipc.job import ParallelJob
from calibre.utils.ipc.server import Server
from polyglot.queue import Empty
# If the specified screen has either dimension larger than this value, no image
# rescaling is done (we assume that it is a tablet output profile)
MAX_SCREEN_SIZE = 3000
def extract_comic(path_to_comic_file):
'''
Un-archive the comic file.
'''
tdir = PersistentTemporaryDirectory(suffix='_comic_extract')
if not isinstance(tdir, str):
# Needed in case the zip file has wrongly encoded unicode file/dir
# names
tdir = tdir.decode(filesystem_encoding)
extract(path_to_comic_file, tdir)
for x in walk(tdir):
bn = os.path.basename(x)
nbn = clean_ascii_chars(bn.replace('#', '_'))
if nbn and nbn != bn:
os.rename(x, os.path.join(os.path.dirname(x), nbn))
return tdir
def generate_entries_from_dir(path):
from functools import partial
from calibre import walk
ans = {}
for x in walk(path):
x = os.path.abspath(x)
ans[x] = partial(os.path.getmtime, x)
return ans
def find_pages(dir_or_items, sort_on_mtime=False, verbose=False):
'''
Find valid comic pages in a previously un-archived comic.
:param dir_or_items: Directory in which extracted comic lives or a dict of paths to function getting mtime
:param sort_on_mtime: If True sort pages based on their last modified time.
Otherwise, sort alphabetically.
'''
from calibre.libunzip import comic_exts
items = generate_entries_from_dir(dir_or_items) if isinstance(dir_or_items, str) else dir_or_items
sep_counts = set()
pages = []
for path in items:
if '__MACOSX' in path:
continue
ext = path.rpartition('.')[2].lower()
if ext in comic_exts:
sep_counts.add(path.replace('\\', '/').count('/'))
pages.append(path)
# Use the full path to sort unless the files are in folders of different
# levels, in which case simply use the filenames.
basename = os.path.basename if len(sep_counts) > 1 else lambda x: x
if sort_on_mtime:
def key(x):
return items[x]()
else:
def key(x):
return numeric_sort_key(basename(x))
pages.sort(key=key)
if verbose:
prints('Found comic pages...')
try:
base = os.path.commonpath(pages)
except ValueError:
pass
else:
prints('\t'+'\n\t'.join([os.path.relpath(p, base) for p in pages]))
return pages
class PageProcessor(list): # {{{
'''
Contains the actual image rendering logic. See :method:`render` and
:method:`process_pages`.
'''
def __init__(self, path_to_page, dest, opts, num):
list.__init__(self)
self.path_to_page = path_to_page
self.opts = opts
self.num = num
self.dest = dest
self.rotate = False
self.src_img_was_grayscale = False
self.src_img_format = None
self.render()
def render(self):
from qt.core import QImage
from calibre.utils.filenames import make_long_path_useable
from calibre.utils.img import crop_image, image_from_data, scale_image
with open(make_long_path_useable(self.path_to_page), 'rb') as f:
img = image_from_data(f.read())
width, height = img.width(), img.height()
if self.num == 0: # First image so create a thumbnail from it
with open(os.path.join(self.dest, 'thumbnail.png'), 'wb') as f:
f.write(scale_image(img, as_png=True)[-1])
self.src_img_format = img.format()
self.src_img_was_grayscale = self.src_img_format in (QImage.Format.Format_Grayscale8, QImage.Format.Format_Grayscale16) or (
img.format() == QImage.Format.Format_Indexed8 and img.allGray())
self.pages = [img]
if width > height:
if self.opts.landscape:
self.rotate = True
else:
half = width // 2
split1 = crop_image(img, 0, 0, half, height)
split2 = crop_image(img, half, 0, width - half, height)
self.pages = [split2, split1] if self.opts.right2left else [split1, split2]
self.process_pages()
def process_pages(self):
from qt.core import QImage
from calibre.utils.img import (
add_borders_to_image,
despeckle_image,
gaussian_sharpen_image,
image_to_data,
normalize_image,
quantize_image,
remove_borders_from_image,
resize_image,
rotate_image,
)
for i, img in enumerate(self.pages):
if self.rotate:
img = rotate_image(img, -90)
if not self.opts.disable_trim:
img = remove_borders_from_image(img)
# Do the Photoshop "Auto Levels" equivalent
if not self.opts.dont_normalize:
img = normalize_image(img)
sizex, sizey = img.width(), img.height()
SCRWIDTH, SCRHEIGHT = self.opts.output_profile.comic_screen_size
try:
if self.opts.comic_image_size:
SCRWIDTH, SCRHEIGHT = map(int, [x.strip() for x in
self.opts.comic_image_size.split('x')])
except:
pass # Ignore
if self.opts.keep_aspect_ratio:
# Preserve the aspect ratio by adding border
aspect = float(sizex) / float(sizey)
if aspect <= (float(SCRWIDTH) / float(SCRHEIGHT)):
newsizey = SCRHEIGHT
newsizex = int(newsizey * aspect)
deltax = (SCRWIDTH - newsizex) // 2
deltay = 0
else:
newsizex = SCRWIDTH
newsizey = int(newsizex // aspect)
deltax = 0
deltay = (SCRHEIGHT - newsizey) // 2
if newsizex < MAX_SCREEN_SIZE and newsizey < MAX_SCREEN_SIZE:
# Too large and resizing fails, so better
# to leave it as original size
img = resize_image(img, newsizex, newsizey)
img = add_borders_to_image(img, left=deltax, right=deltax, top=deltay, bottom=deltay)
elif self.opts.wide:
# Keep aspect and Use device height as scaled image width so landscape mode is clean
aspect = float(sizex) / float(sizey)
screen_aspect = float(SCRWIDTH) / float(SCRHEIGHT)
# Get dimensions of the landscape mode screen
# Add 25px back to height for the battery bar.
wscreenx = SCRHEIGHT + 25
wscreeny = int(wscreenx // screen_aspect)
if aspect <= screen_aspect:
newsizey = wscreeny
newsizex = int(newsizey * aspect)
deltax = (wscreenx - newsizex) // 2
deltay = 0
else:
newsizex = wscreenx
newsizey = int(newsizex // aspect)
deltax = 0
deltay = (wscreeny - newsizey) // 2
if newsizex < MAX_SCREEN_SIZE and newsizey < MAX_SCREEN_SIZE:
# Too large and resizing fails, so better
# to leave it as original size
img = resize_image(img, newsizex, newsizey)
img = add_borders_to_image(img, left=deltax, right=deltax, top=deltay, bottom=deltay)
else:
if SCRWIDTH < MAX_SCREEN_SIZE and SCRHEIGHT < MAX_SCREEN_SIZE:
img = resize_image(img, SCRWIDTH, SCRHEIGHT)
if not self.opts.dont_sharpen:
img = gaussian_sharpen_image(img, 0.0, 1.0)
if self.opts.despeckle:
img = despeckle_image(img)
img_is_grayscale = self.src_img_was_grayscale
if not self.opts.dont_grayscale:
img = img.convertToFormat(QImage.Format.Format_Grayscale16)
img_is_grayscale = True
if self.opts.output_format.lower() == 'png':
if self.opts.colors:
img = quantize_image(img, max_colors=min(256, self.opts.colors))
elif img_is_grayscale:
uses_256_colors = self.src_img_format in (QImage.Format.Format_Indexed8, QImage.Format.Format_Grayscale8)
final_fmt = QImage.Format.Format_Indexed8 if uses_256_colors else QImage.Format.Format_Grayscale16
if img.format() != final_fmt:
img = img.convertToFormat(final_fmt)
dest = '%d_%d.%s'%(self.num, i, self.opts.output_format)
dest = os.path.join(self.dest, dest)
with open(dest, 'wb') as f:
f.write(image_to_data(img, fmt=self.opts.output_format))
self.append(dest)
# }}}
def render_pages(tasks, dest, opts, notification=lambda x, y: x):
'''
Entry point for the job server.
'''
failures, pages = [], []
for num, path in tasks:
try:
pages.extend(PageProcessor(path, dest, opts, num))
msg = _('Rendered %s')%path
except:
failures.append(path)
msg = _('Failed %s')%path
if opts.verbose:
msg += '\n' + traceback.format_exc()
prints(msg)
notification(0.5, msg)
return pages, failures
class Progress:
def __init__(self, total, update):
self.total = total
self.update = update
self.done = 0
def __call__(self, percent, msg=''):
self.done += 1
# msg = msg%os.path.basename(job.args[0])
self.update(float(self.done)/self.total, msg)
def process_pages(pages, opts, update, tdir):
'''
Render all identified comic pages.
'''
progress = Progress(len(pages), update)
server = Server()
jobs = []
tasks = [(p, os.path.join(tdir, os.path.basename(p))) for p in pages]
tasks = server.split(pages)
for task in tasks:
jobs.append(ParallelJob('render_pages', '', progress,
args=[task, tdir, opts]))
server.add_job(jobs[-1])
while True:
time.sleep(1)
running = False
for job in jobs:
while True:
try:
x = job.notifications.get_nowait()
progress(*x)
except Empty:
break
job.update()
if not job.is_finished:
running = True
if not running:
break
server.close()
ans, failures = [], []
for job in jobs:
if job.failed or job.result is None:
raise Exception(_('Failed to process comic: \n\n%s')%
job.log_file.read())
pages, failures_ = job.result
ans += pages
failures += failures_
return ans, failures
| 11,674 | Python | .py | 278 | 30.928058 | 132 | 0.57168 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,345 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/comic/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Convert CBR/CBZ files to LRF.
'''
import sys
def main(args=sys.argv):
return 0
if __name__ == '__main__':
sys.exit(main())
| 285 | Python | .py | 12 | 21.416667 | 56 | 0.65283 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,346 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/tcr/__init__.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
| 121 | Python | .py | 3 | 39.333333 | 60 | 0.686441 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,347 | djvubzzdec.py | kovidgoyal_calibre/src/calibre/ebooks/djvu/djvubzzdec.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>'
# Copyright (C) 2011 Anthon van der Neut, Ruamel bvba
# Adapted from Leon Bottou's djvulibre C++ code,
# ( ZPCodec.{cpp,h} and BSByteStream.{cpp,h} )
# that code was first converted to C removing any dependencies on the DJVU libre
# framework for ByteStream, making it into a ctypes callable shared object
# then to python, and remade into a class
original_copyright_notice = '''
//C- -------------------------------------------------------------------
//C- DjVuLibre-3.5
//C- Copyright (c) 2002 Leon Bottou and Yann Le Cun.
//C- Copyright (c) 2001 AT&T
//C-
//C- This software is subject to, and may be distributed under, the
//C- GNU General Public License, either Version 2 of the license,
//C- or (at your option) any later version. The license should have
//C- accompanied the software or you may obtain a copy of the license
//C- from the Free Software Foundation at http://www.fsf.org .
//C-
//C- This program is distributed in the hope that it will be useful,
//C- but WITHOUT ANY WARRANTY; without even the implied warranty of
//C- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
//C- GNU General Public License for more details.
//C-
//C- DjVuLibre-3.5 is derived from the DjVu(r) Reference Library from
//C- Lizardtech Software. Lizardtech Software has authorized us to
//C- replace the original DjVu(r) Reference Library notice by the following
//C- text (see doc/lizard2002.djvu and doc/lizardtech2007.djvu):
//C-
//C- ------------------------------------------------------------------
//C- | DjVu (r) Reference Library (v. 3.5)
//C- | Copyright (c) 1999-2001 LizardTech, Inc. All Rights Reserved.
//C- | The DjVu Reference Library is protected by U.S. Pat. No.
//C- | 6,058,214 and patents pending.
//C- |
//C- | This software is subject to, and may be distributed under, the
//C- | GNU General Public License, either Version 2 of the license,
//C- | or (at your option) any later version. The license should have
//C- | accompanied the software or you may obtain a copy of the license
//C- | from the Free Software Foundation at http://www.fsf.org .
//C- |
//C- | The computer code originally released by LizardTech under this
//C- | license and unmodified by other parties is deemed "the LIZARDTECH
//C- | ORIGINAL CODE." Subject to any third party intellectual property
//C- | claims, LizardTech grants recipient a worldwide, royalty-free,
//C- | non-exclusive license to make, use, sell, or otherwise dispose of
//C- | the LIZARDTECH ORIGINAL CODE or of programs derived from the
//C- | LIZARDTECH ORIGINAL CODE in compliance with the terms of the GNU
//C- | General Public License. This grant only confers the right to
//C- | infringe patent claims underlying the LIZARDTECH ORIGINAL CODE to
//C- | the extent such infringement is reasonably necessary to enable
//C- | recipient to make, have made, practice, sell, or otherwise dispose
//C- | of the LIZARDTECH ORIGINAL CODE (or portions thereof) and not to
//C- | any greater extent that may be necessary to utilize further
//C- | modifications or combinations.
//C- |
//C- | The LIZARDTECH ORIGINAL CODE is provided "AS IS" WITHOUT WARRANTY
//C- | OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
//C- | TO ANY WARRANTY OF NON-INFRINGEMENT, OR ANY IMPLIED WARRANTY OF
//C- | MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
//C- +------------------------------------------------------------------
//
// $Id: BSByteStream.cpp,v 1.9 2007/03/25 20:48:29 leonb Exp $
// $Name: release_3_5_23 $
'''
MAXBLOCK = 4096
FREQMAX = 4
CTXIDS = 3
MAXLEN = 1024 ** 2
# Exception classes used by this module.
class BZZDecoderError(Exception):
"""This exception is raised when BZZDecode runs into trouble
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "BZZDecoderError: %s" % (self.msg)
# This table has been designed for the ZPCoder
# * by running the following command in file 'zptable.sn':
# * (fast-crude (steady-mat 0.0035 0.0002) 260)))
default_ztable = [ # {{{
(0x8000, 0x0000, 84, 145), # 000: p=0.500000 ( 0, 0)
(0x8000, 0x0000, 3, 4), # 001: p=0.500000 ( 0, 0)
(0x8000, 0x0000, 4, 3), # 002: p=0.500000 ( 0, 0)
(0x6bbd, 0x10a5, 5, 1), # 003: p=0.465226 ( 0, 0)
(0x6bbd, 0x10a5, 6, 2), # 004: p=0.465226 ( 0, 0)
(0x5d45, 0x1f28, 7, 3), # 005: p=0.430708 ( 0, 0)
(0x5d45, 0x1f28, 8, 4), # 006: p=0.430708 ( 0, 0)
(0x51b9, 0x2bd3, 9, 5), # 007: p=0.396718 ( 0, 0)
(0x51b9, 0x2bd3, 10, 6), # 008: p=0.396718 ( 0, 0)
(0x4813, 0x36e3, 11, 7), # 009: p=0.363535 ( 0, 0)
(0x4813, 0x36e3, 12, 8), # 010: p=0.363535 ( 0, 0)
(0x3fd5, 0x408c, 13, 9), # 011: p=0.331418 ( 0, 0)
(0x3fd5, 0x408c, 14, 10), # 012: p=0.331418 ( 0, 0)
(0x38b1, 0x48fd, 15, 11), # 013: p=0.300585 ( 0, 0)
(0x38b1, 0x48fd, 16, 12), # 014: p=0.300585 ( 0, 0)
(0x3275, 0x505d, 17, 13), # 015: p=0.271213 ( 0, 0)
(0x3275, 0x505d, 18, 14), # 016: p=0.271213 ( 0, 0)
(0x2cfd, 0x56d0, 19, 15), # 017: p=0.243438 ( 0, 0)
(0x2cfd, 0x56d0, 20, 16), # 018: p=0.243438 ( 0, 0)
(0x2825, 0x5c71, 21, 17), # 019: p=0.217391 ( 0, 0)
(0x2825, 0x5c71, 22, 18), # 020: p=0.217391 ( 0, 0)
(0x23ab, 0x615b, 23, 19), # 021: p=0.193150 ( 0, 0)
(0x23ab, 0x615b, 24, 20), # 022: p=0.193150 ( 0, 0)
(0x1f87, 0x65a5, 25, 21), # 023: p=0.170728 ( 0, 0)
(0x1f87, 0x65a5, 26, 22), # 024: p=0.170728 ( 0, 0)
(0x1bbb, 0x6962, 27, 23), # 025: p=0.150158 ( 0, 0)
(0x1bbb, 0x6962, 28, 24), # 026: p=0.150158 ( 0, 0)
(0x1845, 0x6ca2, 29, 25), # 027: p=0.131418 ( 0, 0)
(0x1845, 0x6ca2, 30, 26), # 028: p=0.131418 ( 0, 0)
(0x1523, 0x6f74, 31, 27), # 029: p=0.114460 ( 0, 0)
(0x1523, 0x6f74, 32, 28), # 030: p=0.114460 ( 0, 0)
(0x1253, 0x71e6, 33, 29), # 031: p=0.099230 ( 0, 0)
(0x1253, 0x71e6, 34, 30), # 032: p=0.099230 ( 0, 0)
(0x0fcf, 0x7404, 35, 31), # 033: p=0.085611 ( 0, 0)
(0x0fcf, 0x7404, 36, 32), # 034: p=0.085611 ( 0, 0)
(0x0d95, 0x75d6, 37, 33), # 035: p=0.073550 ( 0, 0)
(0x0d95, 0x75d6, 38, 34), # 036: p=0.073550 ( 0, 0)
(0x0b9d, 0x7768, 39, 35), # 037: p=0.062888 ( 0, 0)
(0x0b9d, 0x7768, 40, 36), # 038: p=0.062888 ( 0, 0)
(0x09e3, 0x78c2, 41, 37), # 039: p=0.053539 ( 0, 0)
(0x09e3, 0x78c2, 42, 38), # 040: p=0.053539 ( 0, 0)
(0x0861, 0x79ea, 43, 39), # 041: p=0.045365 ( 0, 0)
(0x0861, 0x79ea, 44, 40), # 042: p=0.045365 ( 0, 0)
(0x0711, 0x7ae7, 45, 41), # 043: p=0.038272 ( 0, 0)
(0x0711, 0x7ae7, 46, 42), # 044: p=0.038272 ( 0, 0)
(0x05f1, 0x7bbe, 47, 43), # 045: p=0.032174 ( 0, 0)
(0x05f1, 0x7bbe, 48, 44), # 046: p=0.032174 ( 0, 0)
(0x04f9, 0x7c75, 49, 45), # 047: p=0.026928 ( 0, 0)
(0x04f9, 0x7c75, 50, 46), # 048: p=0.026928 ( 0, 0)
(0x0425, 0x7d0f, 51, 47), # 049: p=0.022444 ( 0, 0)
(0x0425, 0x7d0f, 52, 48), # 050: p=0.022444 ( 0, 0)
(0x0371, 0x7d91, 53, 49), # 051: p=0.018636 ( 0, 0)
(0x0371, 0x7d91, 54, 50), # 052: p=0.018636 ( 0, 0)
(0x02d9, 0x7dfe, 55, 51), # 053: p=0.015421 ( 0, 0)
(0x02d9, 0x7dfe, 56, 52), # 054: p=0.015421 ( 0, 0)
(0x0259, 0x7e5a, 57, 53), # 055: p=0.012713 ( 0, 0)
(0x0259, 0x7e5a, 58, 54), # 056: p=0.012713 ( 0, 0)
(0x01ed, 0x7ea6, 59, 55), # 057: p=0.010419 ( 0, 0)
(0x01ed, 0x7ea6, 60, 56), # 058: p=0.010419 ( 0, 0)
(0x0193, 0x7ee6, 61, 57), # 059: p=0.008525 ( 0, 0)
(0x0193, 0x7ee6, 62, 58), # 060: p=0.008525 ( 0, 0)
(0x0149, 0x7f1a, 63, 59), # 061: p=0.006959 ( 0, 0)
(0x0149, 0x7f1a, 64, 60), # 062: p=0.006959 ( 0, 0)
(0x010b, 0x7f45, 65, 61), # 063: p=0.005648 ( 0, 0)
(0x010b, 0x7f45, 66, 62), # 064: p=0.005648 ( 0, 0)
(0x00d5, 0x7f6b, 67, 63), # 065: p=0.004506 ( 0, 0)
(0x00d5, 0x7f6b, 68, 64), # 066: p=0.004506 ( 0, 0)
(0x00a5, 0x7f8d, 69, 65), # 067: p=0.003480 ( 0, 0)
(0x00a5, 0x7f8d, 70, 66), # 068: p=0.003480 ( 0, 0)
(0x007b, 0x7faa, 71, 67), # 069: p=0.002602 ( 0, 0)
(0x007b, 0x7faa, 72, 68), # 070: p=0.002602 ( 0, 0)
(0x0057, 0x7fc3, 73, 69), # 071: p=0.001843 ( 0, 0)
(0x0057, 0x7fc3, 74, 70), # 072: p=0.001843 ( 0, 0)
(0x003b, 0x7fd7, 75, 71), # 073: p=0.001248 ( 0, 0)
(0x003b, 0x7fd7, 76, 72), # 074: p=0.001248 ( 0, 0)
(0x0023, 0x7fe7, 77, 73), # 075: p=0.000749 ( 0, 0)
(0x0023, 0x7fe7, 78, 74), # 076: p=0.000749 ( 0, 0)
(0x0013, 0x7ff2, 79, 75), # 077: p=0.000402 ( 0, 0)
(0x0013, 0x7ff2, 80, 76), # 078: p=0.000402 ( 0, 0)
(0x0007, 0x7ffa, 81, 77), # 079: p=0.000153 ( 0, 0)
(0x0007, 0x7ffa, 82, 78), # 080: p=0.000153 ( 0, 0)
(0x0001, 0x7fff, 81, 79), # 081: p=0.000027 ( 0, 0)
(0x0001, 0x7fff, 82, 80), # 082: p=0.000027 ( 0, 0)
(0x5695, 0x0000, 9, 85), # 083: p=0.411764 ( 2, 3)
(0x24ee, 0x0000, 86, 226), # 084: p=0.199988 ( 1, 0)
(0x8000, 0x0000, 5, 6), # 085: p=0.500000 ( 3, 3)
(0x0d30, 0x0000, 88, 176), # 086: p=0.071422 ( 4, 0)
(0x481a, 0x0000, 89, 143), # 087: p=0.363634 ( 1, 2)
(0x0481, 0x0000, 90, 138), # 088: p=0.024388 ( 13, 0)
(0x3579, 0x0000, 91, 141), # 089: p=0.285711 ( 1, 3)
(0x017a, 0x0000, 92, 112), # 090: p=0.007999 ( 41, 0)
(0x24ef, 0x0000, 93, 135), # 091: p=0.199997 ( 1, 5)
(0x007b, 0x0000, 94, 104), # 092: p=0.002611 ( 127, 0)
(0x1978, 0x0000, 95, 133), # 093: p=0.137929 ( 1, 8)
(0x0028, 0x0000, 96, 100), # 094: p=0.000849 ( 392, 0)
(0x10ca, 0x0000, 97, 129), # 095: p=0.090907 ( 1, 13)
(0x000d, 0x0000, 82, 98), # 096: p=0.000276 ( 1208, 0)
(0x0b5d, 0x0000, 99, 127), # 097: p=0.061537 ( 1, 20)
(0x0034, 0x0000, 76, 72), # 098: p=0.001102 ( 1208, 1)
(0x078a, 0x0000, 101, 125), # 099: p=0.040815 ( 1, 31)
(0x00a0, 0x0000, 70, 102), # 100: p=0.003387 ( 392, 1)
(0x050f, 0x0000, 103, 123), # 101: p=0.027397 ( 1, 47)
(0x0117, 0x0000, 66, 60), # 102: p=0.005912 ( 392, 2)
(0x0358, 0x0000, 105, 121), # 103: p=0.018099 ( 1, 72)
(0x01ea, 0x0000, 106, 110), # 104: p=0.010362 ( 127, 1)
(0x0234, 0x0000, 107, 119), # 105: p=0.011940 ( 1, 110)
(0x0144, 0x0000, 66, 108), # 106: p=0.006849 ( 193, 1)
(0x0173, 0x0000, 109, 117), # 107: p=0.007858 ( 1, 168)
(0x0234, 0x0000, 60, 54), # 108: p=0.011925 ( 193, 2)
(0x00f5, 0x0000, 111, 115), # 109: p=0.005175 ( 1, 256)
(0x0353, 0x0000, 56, 48), # 110: p=0.017995 ( 127, 2)
(0x00a1, 0x0000, 69, 113), # 111: p=0.003413 ( 1, 389)
(0x05c5, 0x0000, 114, 134), # 112: p=0.031249 ( 41, 1)
(0x011a, 0x0000, 65, 59), # 113: p=0.005957 ( 2, 389)
(0x03cf, 0x0000, 116, 132), # 114: p=0.020618 ( 63, 1)
(0x01aa, 0x0000, 61, 55), # 115: p=0.009020 ( 2, 256)
(0x0285, 0x0000, 118, 130), # 116: p=0.013652 ( 96, 1)
(0x0286, 0x0000, 57, 51), # 117: p=0.013672 ( 2, 168)
(0x01ab, 0x0000, 120, 128), # 118: p=0.009029 ( 146, 1)
(0x03d3, 0x0000, 53, 47), # 119: p=0.020710 ( 2, 110)
(0x011a, 0x0000, 122, 126), # 120: p=0.005961 ( 222, 1)
(0x05c5, 0x0000, 49, 41), # 121: p=0.031250 ( 2, 72)
(0x00ba, 0x0000, 124, 62), # 122: p=0.003925 ( 338, 1)
(0x08ad, 0x0000, 43, 37), # 123: p=0.046979 ( 2, 47)
(0x007a, 0x0000, 72, 66), # 124: p=0.002586 ( 514, 1)
(0x0ccc, 0x0000, 39, 31), # 125: p=0.069306 ( 2, 31)
(0x01eb, 0x0000, 60, 54), # 126: p=0.010386 ( 222, 2)
(0x1302, 0x0000, 33, 25), # 127: p=0.102940 ( 2, 20)
(0x02e6, 0x0000, 56, 50), # 128: p=0.015695 ( 146, 2)
(0x1b81, 0x0000, 29, 131), # 129: p=0.148935 ( 2, 13)
(0x045e, 0x0000, 52, 46), # 130: p=0.023648 ( 96, 2)
(0x24ef, 0x0000, 23, 17), # 131: p=0.199999 ( 3, 13)
(0x0690, 0x0000, 48, 40), # 132: p=0.035533 ( 63, 2)
(0x2865, 0x0000, 23, 15), # 133: p=0.218748 ( 2, 8)
(0x09de, 0x0000, 42, 136), # 134: p=0.053434 ( 41, 2)
(0x3987, 0x0000, 137, 7), # 135: p=0.304346 ( 2, 5)
(0x0dc8, 0x0000, 38, 32), # 136: p=0.074626 ( 41, 3)
(0x2c99, 0x0000, 21, 139), # 137: p=0.241378 ( 2, 7)
(0x10ca, 0x0000, 140, 172), # 138: p=0.090907 ( 13, 1)
(0x3b5f, 0x0000, 15, 9), # 139: p=0.312499 ( 3, 7)
(0x0b5d, 0x0000, 142, 170), # 140: p=0.061537 ( 20, 1)
(0x5695, 0x0000, 9, 85), # 141: p=0.411764 ( 2, 3)
(0x078a, 0x0000, 144, 168), # 142: p=0.040815 ( 31, 1)
(0x8000, 0x0000, 141, 248), # 143: p=0.500000 ( 2, 2)
(0x050f, 0x0000, 146, 166), # 144: p=0.027397 ( 47, 1)
(0x24ee, 0x0000, 147, 247), # 145: p=0.199988 ( 0, 1)
(0x0358, 0x0000, 148, 164), # 146: p=0.018099 ( 72, 1)
(0x0d30, 0x0000, 149, 197), # 147: p=0.071422 ( 0, 4)
(0x0234, 0x0000, 150, 162), # 148: p=0.011940 ( 110, 1)
(0x0481, 0x0000, 151, 95), # 149: p=0.024388 ( 0, 13)
(0x0173, 0x0000, 152, 160), # 150: p=0.007858 ( 168, 1)
(0x017a, 0x0000, 153, 173), # 151: p=0.007999 ( 0, 41)
(0x00f5, 0x0000, 154, 158), # 152: p=0.005175 ( 256, 1)
(0x007b, 0x0000, 155, 165), # 153: p=0.002611 ( 0, 127)
(0x00a1, 0x0000, 70, 156), # 154: p=0.003413 ( 389, 1)
(0x0028, 0x0000, 157, 161), # 155: p=0.000849 ( 0, 392)
(0x011a, 0x0000, 66, 60), # 156: p=0.005957 ( 389, 2)
(0x000d, 0x0000, 81, 159), # 157: p=0.000276 ( 0, 1208)
(0x01aa, 0x0000, 62, 56), # 158: p=0.009020 ( 256, 2)
(0x0034, 0x0000, 75, 71), # 159: p=0.001102 ( 1, 1208)
(0x0286, 0x0000, 58, 52), # 160: p=0.013672 ( 168, 2)
(0x00a0, 0x0000, 69, 163), # 161: p=0.003387 ( 1, 392)
(0x03d3, 0x0000, 54, 48), # 162: p=0.020710 ( 110, 2)
(0x0117, 0x0000, 65, 59), # 163: p=0.005912 ( 2, 392)
(0x05c5, 0x0000, 50, 42), # 164: p=0.031250 ( 72, 2)
(0x01ea, 0x0000, 167, 171), # 165: p=0.010362 ( 1, 127)
(0x08ad, 0x0000, 44, 38), # 166: p=0.046979 ( 47, 2)
(0x0144, 0x0000, 65, 169), # 167: p=0.006849 ( 1, 193)
(0x0ccc, 0x0000, 40, 32), # 168: p=0.069306 ( 31, 2)
(0x0234, 0x0000, 59, 53), # 169: p=0.011925 ( 2, 193)
(0x1302, 0x0000, 34, 26), # 170: p=0.102940 ( 20, 2)
(0x0353, 0x0000, 55, 47), # 171: p=0.017995 ( 2, 127)
(0x1b81, 0x0000, 30, 174), # 172: p=0.148935 ( 13, 2)
(0x05c5, 0x0000, 175, 193), # 173: p=0.031249 ( 1, 41)
(0x24ef, 0x0000, 24, 18), # 174: p=0.199999 ( 13, 3)
(0x03cf, 0x0000, 177, 191), # 175: p=0.020618 ( 1, 63)
(0x2b74, 0x0000, 178, 222), # 176: p=0.235291 ( 4, 1)
(0x0285, 0x0000, 179, 189), # 177: p=0.013652 ( 1, 96)
(0x201d, 0x0000, 180, 218), # 178: p=0.173910 ( 6, 1)
(0x01ab, 0x0000, 181, 187), # 179: p=0.009029 ( 1, 146)
(0x1715, 0x0000, 182, 216), # 180: p=0.124998 ( 9, 1)
(0x011a, 0x0000, 183, 185), # 181: p=0.005961 ( 1, 222)
(0x0fb7, 0x0000, 184, 214), # 182: p=0.085105 ( 14, 1)
(0x00ba, 0x0000, 69, 61), # 183: p=0.003925 ( 1, 338)
(0x0a67, 0x0000, 186, 212), # 184: p=0.056337 ( 22, 1)
(0x01eb, 0x0000, 59, 53), # 185: p=0.010386 ( 2, 222)
(0x06e7, 0x0000, 188, 210), # 186: p=0.037382 ( 34, 1)
(0x02e6, 0x0000, 55, 49), # 187: p=0.015695 ( 2, 146)
(0x0496, 0x0000, 190, 208), # 188: p=0.024844 ( 52, 1)
(0x045e, 0x0000, 51, 45), # 189: p=0.023648 ( 2, 96)
(0x030d, 0x0000, 192, 206), # 190: p=0.016529 ( 79, 1)
(0x0690, 0x0000, 47, 39), # 191: p=0.035533 ( 2, 63)
(0x0206, 0x0000, 194, 204), # 192: p=0.010959 ( 120, 1)
(0x09de, 0x0000, 41, 195), # 193: p=0.053434 ( 2, 41)
(0x0155, 0x0000, 196, 202), # 194: p=0.007220 ( 183, 1)
(0x0dc8, 0x0000, 37, 31), # 195: p=0.074626 ( 3, 41)
(0x00e1, 0x0000, 198, 200), # 196: p=0.004750 ( 279, 1)
(0x2b74, 0x0000, 199, 243), # 197: p=0.235291 ( 1, 4)
(0x0094, 0x0000, 72, 64), # 198: p=0.003132 ( 424, 1)
(0x201d, 0x0000, 201, 239), # 199: p=0.173910 ( 1, 6)
(0x0188, 0x0000, 62, 56), # 200: p=0.008284 ( 279, 2)
(0x1715, 0x0000, 203, 237), # 201: p=0.124998 ( 1, 9)
(0x0252, 0x0000, 58, 52), # 202: p=0.012567 ( 183, 2)
(0x0fb7, 0x0000, 205, 235), # 203: p=0.085105 ( 1, 14)
(0x0383, 0x0000, 54, 48), # 204: p=0.019021 ( 120, 2)
(0x0a67, 0x0000, 207, 233), # 205: p=0.056337 ( 1, 22)
(0x0547, 0x0000, 50, 44), # 206: p=0.028571 ( 79, 2)
(0x06e7, 0x0000, 209, 231), # 207: p=0.037382 ( 1, 34)
(0x07e2, 0x0000, 46, 38), # 208: p=0.042682 ( 52, 2)
(0x0496, 0x0000, 211, 229), # 209: p=0.024844 ( 1, 52)
(0x0bc0, 0x0000, 40, 34), # 210: p=0.063636 ( 34, 2)
(0x030d, 0x0000, 213, 227), # 211: p=0.016529 ( 1, 79)
(0x1178, 0x0000, 36, 28), # 212: p=0.094593 ( 22, 2)
(0x0206, 0x0000, 215, 225), # 213: p=0.010959 ( 1, 120)
(0x19da, 0x0000, 30, 22), # 214: p=0.139999 ( 14, 2)
(0x0155, 0x0000, 217, 223), # 215: p=0.007220 ( 1, 183)
(0x24ef, 0x0000, 26, 16), # 216: p=0.199998 ( 9, 2)
(0x00e1, 0x0000, 219, 221), # 217: p=0.004750 ( 1, 279)
(0x320e, 0x0000, 20, 220), # 218: p=0.269229 ( 6, 2)
(0x0094, 0x0000, 71, 63), # 219: p=0.003132 ( 1, 424)
(0x432a, 0x0000, 14, 8), # 220: p=0.344827 ( 6, 3)
(0x0188, 0x0000, 61, 55), # 221: p=0.008284 ( 2, 279)
(0x447d, 0x0000, 14, 224), # 222: p=0.349998 ( 4, 2)
(0x0252, 0x0000, 57, 51), # 223: p=0.012567 ( 2, 183)
(0x5ece, 0x0000, 8, 2), # 224: p=0.434782 ( 4, 3)
(0x0383, 0x0000, 53, 47), # 225: p=0.019021 ( 2, 120)
(0x8000, 0x0000, 228, 87), # 226: p=0.500000 ( 1, 1)
(0x0547, 0x0000, 49, 43), # 227: p=0.028571 ( 2, 79)
(0x481a, 0x0000, 230, 246), # 228: p=0.363634 ( 2, 1)
(0x07e2, 0x0000, 45, 37), # 229: p=0.042682 ( 2, 52)
(0x3579, 0x0000, 232, 244), # 230: p=0.285711 ( 3, 1)
(0x0bc0, 0x0000, 39, 33), # 231: p=0.063636 ( 2, 34)
(0x24ef, 0x0000, 234, 238), # 232: p=0.199997 ( 5, 1)
(0x1178, 0x0000, 35, 27), # 233: p=0.094593 ( 2, 22)
(0x1978, 0x0000, 138, 236), # 234: p=0.137929 ( 8, 1)
(0x19da, 0x0000, 29, 21), # 235: p=0.139999 ( 2, 14)
(0x2865, 0x0000, 24, 16), # 236: p=0.218748 ( 8, 2)
(0x24ef, 0x0000, 25, 15), # 237: p=0.199998 ( 2, 9)
(0x3987, 0x0000, 240, 8), # 238: p=0.304346 ( 5, 2)
(0x320e, 0x0000, 19, 241), # 239: p=0.269229 ( 2, 6)
(0x2c99, 0x0000, 22, 242), # 240: p=0.241378 ( 7, 2)
(0x432a, 0x0000, 13, 7), # 241: p=0.344827 ( 3, 6)
(0x3b5f, 0x0000, 16, 10), # 242: p=0.312499 ( 7, 3)
(0x447d, 0x0000, 13, 245), # 243: p=0.349998 ( 2, 4)
(0x5695, 0x0000, 10, 2), # 244: p=0.411764 ( 3, 2)
(0x5ece, 0x0000, 7, 1), # 245: p=0.434782 ( 3, 4)
(0x8000, 0x0000, 244, 83), # 246: p=0.500000 ( 2, 2)
(0x8000, 0x0000, 249, 250), # 247: p=0.500000 ( 1, 1)
(0x5695, 0x0000, 10, 2), # 248: p=0.411764 ( 3, 2)
(0x481a, 0x0000, 89, 143), # 249: p=0.363634 ( 1, 2)
(0x481a, 0x0000, 230, 246), # 250: p=0.363634 ( 2, 1)
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 0, 0),
]
xmtf = (
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
0xD8, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
)
# }}}
class BZZDecoder():
def __init__(self, infile, outfile):
self.instream = infile
self.inptr = 0
self.outf = outfile
self.ieof = False
self.bptr = None
self.xsize = None
self.outbuf = [0] * (MAXBLOCK * 1024)
self.byte = None
self.scount = 0
self.delay = 25
self.a = 0
self.code = 0
self.bufint = 0
self.ctx = [0] * 300
# table
self.p = [0] * 256
self.m = [0] * 256
self.up = [0] * 256
self.dn = [0] * 256
# machine independent ffz
self.ffzt = [0] * 256
# Create machine independent ffz table
for i in range(256):
j = i
while(j & 0x80):
self.ffzt[i] += 1
j <<= 1
# Initialize table
self.newtable(default_ztable)
# Codebit counter
# Read first 16 bits of code
if not self.read_byte():
self.byte = 0xff
self.code = (self.byte << 8)
if not self.read_byte():
self.byte = 0xff
self.code = self.code | self.byte
# Preload buffer
self.preload()
# Compute initial fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
def convert(self, sz):
if self.ieof:
return 0
copied = 0
while sz > 0 and not self.ieof:
# Decode if needed
if not self.xsize:
self.bptr = 0
if not self.decode(): # input block size set in decode
self.xsize = 1
self.ieof = True
self.xsize -= 1
# Compute remaining
remaining = min(sz, self.xsize)
# Transfer
if remaining > 0:
self.outf.extend(self.outbuf[self.bptr:self.bptr + remaining])
self.xsize -= remaining
self.bptr += remaining
sz -= remaining
copied += remaining
# offset += bytes; // for tell()
return copied
def preload(self):
while self.scount <= 24:
if not self.read_byte():
self.byte = 0xff
self.delay -= 1
if self.delay < 1:
raise BZZDecoderError("BiteStream EOF")
self.bufint = (self.bufint << 8) | self.byte
self.scount += 8
def newtable(self, table):
for i in range(256):
self.p[i] = table[i][0]
self.m[i] = table[i][1]
self.up[i] = table[i][2]
self.dn[i] = table[i][3]
def decode(self):
outbuf = self.outbuf
# Decode block size
self.xsize = self.decode_raw(24)
if not self.xsize:
return 0
if self.xsize > MAXBLOCK * 1024: # 4MB (4096 * 1024) is max block
raise BZZDecoderError("BiteStream.corrupt")
# Dec11ode Estimation Speed
fshift = 0
if self.zpcodec_decoder():
fshift += 1
if self.zpcodec_decoder():
fshift += 1
# Prepare Quasi MTF
mtf = list(xmtf) # unsigned chars
freq = [0] * FREQMAX
fadd = 4
# Decode
mtfno = 3
markerpos = -1
def zc(i):
return self.zpcodec_decode(self.ctx, i)
def dc(i, bits):
return self.decode_binary(self.ctx, i, bits)
for i in range(self.xsize):
ctxid = CTXIDS - 1
if ctxid > mtfno:
ctxid = mtfno
if zc(ctxid):
mtfno = 0
outbuf[i] = mtf[mtfno]
elif zc(ctxid + CTXIDS):
mtfno = 1
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS):
mtfno = 2 + dc(2*CTXIDS + 1, 1)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS+2):
mtfno = 4 + dc(2*CTXIDS+2 + 1, 2)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 6):
mtfno = 8 + dc(2*CTXIDS + 6 + 1, 3)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 14):
mtfno = 16 + dc(2*CTXIDS + 14 + 1, 4)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 30):
mtfno = 32 + dc(2*CTXIDS + 30 + 1, 5)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 62):
mtfno = 64 + dc(2*CTXIDS + 62 + 1, 6)
outbuf[i] = mtf[mtfno]
elif zc(2*CTXIDS + 126):
mtfno = 128 + dc(2*CTXIDS + 126 + 1, 7)
outbuf[i] = mtf[mtfno]
else:
mtfno = 256 # EOB
outbuf[i] = 0
markerpos = i
continue
# Rotate mtf according to empirical frequencies (new!)
# :rotate label
# Adjust frequencies for overflow
fadd = fadd + (fadd >> fshift)
if fadd > 0x10000000:
fadd >>= 24
freq[0] >>= 24
freq[1] >>= 24
freq[2] >>= 24
freq[3] >>= 24
for k in range(4, FREQMAX):
freq[k] = freq[k] >> 24
# Relocate new char according to new freq
fc = fadd
if mtfno < FREQMAX:
fc += freq[mtfno]
k = mtfno
while (k >= FREQMAX):
mtf[k] = mtf[k - 1]
k -= 1
while (k > 0 and fc >= freq[k - 1]):
mtf[k] = mtf[k - 1]
freq[k] = freq[k - 1]
k -= 1
mtf[k] = outbuf[i]
freq[k] = fc
# ///////////////////////////////
# //////// Reconstruct the string
if markerpos < 1 or markerpos >= self.xsize:
raise BZZDecoderError("BiteStream.corrupt")
# Allocate pointers
posn = [0] * self.xsize
# Prepare count buffer
count = [0] * 256
# Fill count buffer
for i in range(markerpos):
c = outbuf[i]
posn[i] = (c << 24) | (count[c] & 0xffffff)
count[c] += 1
for i in range(markerpos + 1, self.xsize):
c = outbuf[i]
posn[i] = (c << 24) | (count[c] & 0xffffff)
count[c] += 1
# Compute sorted char positions
last = 1
for i in range(256):
tmp = count[i]
count[i] = last
last += tmp
# Undo the sort transform
i = 0
last = self.xsize - 1
while last > 0:
n = posn[i]
c = (posn[i] >> 24)
last -= 1
outbuf[last] = c
i = count[c] + (n & 0xffffff)
# Free and check
if i != markerpos:
raise BZZDecoderError("BiteStream.corrupt")
return self.xsize
def decode_raw(self, bits):
n = 1
m = (1 << bits)
while n < m:
b = self.zpcodec_decoder()
n = (n << 1) | b
return n - m
def decode_binary(self, ctx, index, bits):
n = 1
m = (1 << bits)
while n < m:
b = self.zpcodec_decode(ctx, index + n - 1)
n = (n << 1) | b
return n - m
def zpcodec_decoder(self):
return self.decode_sub_simple(0, 0x8000 + (self.a >> 1))
def decode_sub_simple(self, mps, z):
# Test MPS/LPS
if z > self.code:
# LPS branch
z = 0x10000 - z
self.a += +z
self.code = self.code + z
# LPS renormalization
shift = self.ffz()
self.scount -= shift
self.a = self.a << shift
self.a &= 0xffff
self.code = (self.code << shift) | ((self.bufint >> self.scount) & ((1 << shift) - 1))
self.code &= 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
result = mps ^ 1
else:
# MPS renormalization
self.scount -= 1
self.a = (z << 1) & 0xffff
self.code = ((self.code << 1) | ((self.bufint >> self.scount) & 1))
self.code &= 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
result = mps
return result
def decode_sub(self, ctx, index, z):
# Save bit
bit = (ctx[index] & 1)
# Avoid interval reversion
d = 0x6000 + ((z + self.a) >> 2)
if z > d:
z = d
# Test MPS/LPS
if z > self.code:
# LPS branch
z = 0x10000 - z
self.a += +z
self.code = self.code + z
# LPS adaptation
ctx[index] = self.dn[ctx[index]]
# LPS renormalization
shift = self.ffz()
self.scount -= shift
self.a = (self.a << shift) & 0xffff
self.code = ((self.code << shift) | ((self.bufint >> self.scount) & ((1 << shift) - 1))) & 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
return bit ^ 1
else:
# MPS adaptation
if self.a >= self.m[ctx[index]]:
ctx[index] = self.up[ctx[index]]
# MPS renormalization
self.scount -= 1
self.a = z << 1 & 0xffff
self.code = ((self.code << 1) | ((self.bufint >> self.scount) & 1)) & 0xffff
if self.scount < 16:
self.preload()
# Adjust fence
self.fence = self.code
if self.code >= 0x8000:
self.fence = 0x7fff
return bit
def zpcodec_decode(self, ctx, index):
z = self.a + self.p[ctx[index]]
if z <= self.fence:
self.a = z
res = (ctx[index] & 1)
else:
res = self.decode_sub(ctx, index, z)
return res
def read_byte(self):
try:
self.byte = self.instream[self.inptr]
self.inptr += 1
return True
except IndexError:
return False
def ffz(self):
x = self.a
if (x >= 0xff00):
return (self.ffzt[x & 0xff] + 8)
else:
return (self.ffzt[(x >> 8) & 0xff])
# for testing
def main():
import sys
from calibre_extensions import bzzdec as d
with open(sys.argv[1], "rb") as f:
raw = f.read()
print(d.decompress(raw))
if __name__ == "__main__":
main()
| 32,859 | Python | .py | 704 | 39.694602 | 109 | 0.505558 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,348 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/djvu/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <anthon@mnt.org>'
__docformat__ = 'restructuredtext en'
'''
Used for DJVU input
'''
| 178 | Python | .py | 7 | 23.857143 | 60 | 0.664671 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,349 | djvu.py | kovidgoyal_calibre/src/calibre/ebooks/djvu/djvu.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Anthon van der Neut <A.van.der.Neut@ruamel.eu>'
# this code is based on:
# Lizardtech DjVu Reference
# DjVu v3
# November 2005
import struct
import sys
from calibre.ebooks.djvu.djvubzzdec import BZZDecoder
class DjvuChunk:
def __init__(self, buf, start, end, align=True, bigendian=True,
inclheader=False, verbose=0):
from calibre_extensions import bzzdec
self.speedup = bzzdec
self.subtype = None
self._subchunks = []
self.buf = buf
pos = start + 4
self.type = buf[start:pos]
self.align = align # whether to align to word (2-byte) boundaries
self.headersize = 0 if inclheader else 8
if bigendian:
self.strflag = b'>'
else:
self.strflag = b'<'
oldpos, pos = pos, pos+4
self.size = struct.unpack(self.strflag+b'L', buf[oldpos:pos])[0]
self.dataend = pos + self.size - (8 if inclheader else 0)
if self.type == b'FORM':
oldpos, pos = pos, pos+4
# print oldpos, pos
self.subtype = buf[oldpos:pos]
# self.headersize += 4
self.datastart = pos
if verbose > 0:
print('found', self.type, self.subtype, pos, self.size)
if self.type in b'FORM'.split():
if verbose > 0:
print('processing substuff %d %d (%x)' % (pos, self.dataend,
self.dataend))
numchunks = 0
while pos < self.dataend:
x = DjvuChunk(buf, pos, start+self.size, verbose=verbose)
numchunks += 1
self._subchunks.append(x)
newpos = pos + x.size + x.headersize + (1 if (x.size % 2) else 0)
if verbose > 0:
print('newpos %d %d (%x, %x) %d' % (newpos, self.dataend,
newpos, self.dataend, x.headersize))
pos = newpos
if verbose > 0:
print(' end of chunk %d (%x)' % (pos, pos))
def dump(self, verbose=0, indent=1, out=None, txtout=None, maxlevel=100):
if out:
out.write(b' ' * indent)
out.write(b'%s%s [%d]\n' % (self.type,
b':' + self.subtype if self.subtype else b'', self.size))
if txtout and self.type == b'TXTz':
if True:
# Use the C BZZ decode implementation
txtout.write(self.speedup.decompress(self.buf[self.datastart:self.dataend]))
else:
inbuf = bytearray(self.buf[self.datastart: self.dataend])
outbuf = bytearray()
decoder = BZZDecoder(inbuf, outbuf)
while True:
xxres = decoder.convert(1024 * 1024)
if not xxres:
break
res = bytes(outbuf)
if not res.strip(b'\0'):
raise ValueError('TXTz block is completely null')
l = 0
for x in bytearray(res[:3]):
l <<= 8
l += x
if verbose > 0 and out:
print(l, file=out)
txtout.write(res[3:3+l])
txtout.write(b'\037')
if txtout and self.type == b'TXTa':
res = self.buf[self.datastart: self.dataend]
l = 0
for x in bytearray(res[:3]):
l <<= 8
l += x
if verbose > 0 and out:
print(l, file=out)
txtout.write(res[3:3+l])
txtout.write(b'\037')
if indent >= maxlevel:
return
for schunk in self._subchunks:
schunk.dump(verbose=verbose, indent=indent+1, out=out, txtout=txtout)
class DJVUFile:
def __init__(self, instream, verbose=0):
self.instream = instream
buf = self.instream.read(4)
assert(buf == b'AT&T')
buf = self.instream.read()
self.dc = DjvuChunk(buf, 0, len(buf), verbose=verbose)
def get_text(self, outfile=None):
self.dc.dump(txtout=outfile)
def dump(self, outfile=None, maxlevel=0):
self.dc.dump(out=outfile, maxlevel=maxlevel)
def main():
f = DJVUFile(open(sys.argv[-1], 'rb'))
print(f.get_text(sys.stdout))
if __name__ == '__main__':
main()
| 4,418 | Python | .py | 111 | 28.153153 | 92 | 0.521567 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,350 | writer.py | kovidgoyal_calibre/src/calibre/ebooks/rb/writer.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import io
import struct
import zlib
from PIL import Image
from calibre.constants import __appname__, __version__
from calibre.ebooks.rb import HEADER, unique_name
from calibre.ebooks.rb.rbml import RBMLizer
TEXT_RECORD_SIZE = 4096
class TocItem:
def __init__(self, name, size, flags):
self.name = name
self.size = size
self.flags = flags
class RBWriter:
def __init__(self, opts, log):
self.opts = opts
self.log = log
self.name_map = {}
def write_content(self, oeb_book, out_stream, metadata=None):
info = [('info.info', self._info_section(metadata))]
images = self._images(oeb_book.manifest)
text_size, chunks = self._text(oeb_book)
chunck_sizes = [len(x) for x in chunks]
text = [('index.html', chunks)]
hidx = [('index.hidx', ' ')]
toc_items = []
page_count = 0
for name, data in info+text+hidx+images:
page_count += 1
size = len(data)
if (name, data) in text:
flags = 8
size = 0
for c in chunck_sizes:
size += c
size += 8 + (len(chunck_sizes) * 4)
elif (name, data) in info:
flags = 2
else:
flags = 0
toc_items.append(TocItem(name.ljust(32, '\x00')[:32], size, flags))
self.log.debug('Writing file header...')
out_stream.write(HEADER)
out_stream.write(struct.pack('<I', 0))
out_stream.write(struct.pack('<IH', 0, 0))
out_stream.write(struct.pack('<I', 0x128))
out_stream.write(struct.pack('<I', 0))
for i in range(0x20, 0x128, 4):
out_stream.write(struct.pack('<I', 0))
out_stream.write(struct.pack('<I', page_count))
offset = out_stream.tell() + (len(toc_items) * 44)
for item in toc_items:
out_stream.write(item.name.encode('utf-8'))
out_stream.write(struct.pack('<I', item.size))
out_stream.write(struct.pack('<I', offset))
out_stream.write(struct.pack('<I', item.flags))
offset += item.size
out_stream.write(info[0][1].encode('utf-8'))
self.log.debug('Writing compressed RB HTHML...')
# Compressed text with proper heading
out_stream.write(struct.pack('<I', len(text[0][1])))
out_stream.write(struct.pack('<I', text_size))
for size in chunck_sizes:
out_stream.write(struct.pack('<I', size))
for chunk in text[0][1]:
out_stream.write(chunk)
self.log.debug('Writing images...')
for item in hidx+images:
w = item[1]
if not isinstance(w, bytes):
w = w.encode('utf-8')
out_stream.write(w)
total_size = out_stream.tell()
out_stream.seek(0x1c)
out_stream.write(struct.pack('<I', total_size))
def _text(self, oeb_book):
rbmlizer = RBMLizer(self.log, name_map=self.name_map)
text = rbmlizer.extract_content(oeb_book, self.opts).encode('cp1252', 'xmlcharrefreplace')
size = len(text)
pages = []
for i in range(0, (len(text) + TEXT_RECORD_SIZE-1) // TEXT_RECORD_SIZE):
zobj = zlib.compressobj(9, zlib.DEFLATED, 13, 8, 0)
pages.append(zobj.compress(text[i * TEXT_RECORD_SIZE : (i * TEXT_RECORD_SIZE) + TEXT_RECORD_SIZE]) + zobj.flush())
return (size, pages)
def _images(self, manifest):
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
images = []
used_names = []
for item in manifest:
if item.media_type in OEB_RASTER_IMAGES:
try:
data = b''
im = Image.open(io.BytesIO(item.data)).convert('L')
data = io.BytesIO()
im.save(data, 'PNG')
data = data.getvalue()
name = '%s.png' % len(used_names)
name = unique_name(name, used_names)
used_names.append(name)
self.name_map[item.href] = name
images.append((name, data))
except Exception as e:
self.log.error('Error: Could not include file %s because '
'%s.' % (item.href, e))
return images
def _info_section(self, metadata):
text = 'TYPE=2\n'
if metadata:
if len(metadata.title) >= 1:
text += 'TITLE=%s\n' % metadata.title[0].value
if len(metadata.creator) >= 1:
from calibre.ebooks.metadata import authors_to_string
text += 'AUTHOR=%s\n' % authors_to_string([x.value for x in metadata.creator])
text += f'GENERATOR={__appname__} - {__version__}\n'
text += 'PARSE=1\n'
text += 'OUTPUT=1\n'
text += 'BODY=index.html\n'
return text
| 5,107 | Python | .py | 121 | 31.190083 | 126 | 0.545381 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,351 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/rb/__init__.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
HEADER = b'\xb0\x0c\xb0\x0c\x02\x00NUVO\x00\x00\x00\x00'
class RocketBookError(Exception):
pass
def unique_name(name, used_names):
name = os.path.basename(name)
if len(name) < 32 and name not in used_names:
return name
else:
ext = os.path.splitext(name)[1][:3]
base_name = name[:22]
for i in range(0, 9999):
name = '{}-{}.{}'.format(str(i).rjust('0', 4)[:4], base_name, ext)
if name not in used_names:
break
return name
| 653 | Python | .py | 19 | 28 | 78 | 0.601911 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,352 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/rb/reader.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import os
import struct
import zlib
from calibre import CurrentDir
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.metadata.rb import get_metadata
from calibre.ebooks.rb import HEADER, RocketBookError
from polyglot.builtins import as_unicode
from polyglot.urllib import unquote
class RBToc(list):
class Item:
def __init__(self, name='', size=0, offset=0, flags=0):
self.name = name
self.size = size
self.offset = offset
self.flags = flags
class Reader:
def __init__(self, stream, log, encoding=None):
self.stream = stream
self.log = log
self.encoding = encoding
self.verify_file()
self.mi = get_metadata(self.stream)
self.toc = self.get_toc()
def read_i32(self):
return struct.unpack('<I', self.stream.read(4))[0]
def verify_file(self):
self.stream.seek(0)
if self.stream.read(14) != HEADER:
raise RocketBookError('Could not read file: %s. Does not contain a valid RocketBook Header.' % self.stream.name)
self.stream.seek(28)
size = self.read_i32()
self.stream.seek(0, os.SEEK_END)
real_size = self.stream.tell()
if size != real_size:
raise RocketBookError('File is corrupt. The file size recorded in the header does not match the actual file size.')
def get_toc(self):
self.stream.seek(24)
toc_offset = self.read_i32()
self.stream.seek(toc_offset)
pages = self.read_i32()
toc = RBToc()
for i in range(pages):
name = unquote(self.stream.read(32).strip(b'\x00'))
size, offset, flags = self.read_i32(), self.read_i32(), self.read_i32()
toc.append(RBToc.Item(name=name, size=size, offset=offset, flags=flags))
return toc
def get_text(self, toc_item, output_dir):
if toc_item.flags in (1, 2):
return
output = ''
self.stream.seek(toc_item.offset)
if toc_item.flags == 8:
count = self.read_i32()
self.read_i32() # Uncompressed size.
chunck_sizes = []
for i in range(count):
chunck_sizes.append(self.read_i32())
for size in chunck_sizes:
cm_chunck = self.stream.read(size)
output += zlib.decompress(cm_chunck).decode('cp1252' if self.encoding is None else self.encoding, 'replace')
else:
output += self.stream.read(toc_item.size).decode('cp1252' if self.encoding is None else self.encoding, 'replace')
with open(os.path.join(output_dir, toc_item.name.decode('utf-8')), 'wb') as html:
html.write(output.replace('<TITLE>', '<TITLE> ').encode('utf-8'))
def get_image(self, toc_item, output_dir):
if toc_item.flags != 0:
return
self.stream.seek(toc_item.offset)
data = self.stream.read(toc_item.size)
with open(os.path.join(output_dir, toc_item.name.decode('utf-8')), 'wb') as img:
img.write(data)
def extract_content(self, output_dir):
self.log.debug('Extracting content from file...')
html = []
images = []
for item in self.toc:
iname = as_unicode(item.name)
if iname.lower().endswith('html'):
self.log.debug('HTML item %s found...' % iname)
html.append(iname)
self.get_text(item, output_dir)
if iname.lower().endswith('png'):
self.log.debug('PNG item %s found...' % iname)
images.append(iname)
self.get_image(item, output_dir)
opf_path = self.create_opf(output_dir, html, images)
return opf_path
def create_opf(self, output_dir, pages, images):
with CurrentDir(output_dir):
opf = OPFCreator(output_dir, self.mi)
manifest = []
for page in pages+images:
manifest.append((page, None))
opf.create_manifest(manifest)
opf.create_spine(pages)
with open('metadata.opf', 'wb') as opffile:
opf.render(opffile)
return os.path.join(output_dir, 'metadata.opf')
| 4,388 | Python | .py | 102 | 33.176471 | 127 | 0.598071 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,353 | rbml.py | kovidgoyal_calibre/src/calibre/ebooks/rb/rbml.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Transform OEB content into RB compatible markup.
'''
import re
from calibre import prepare_string_for_xml
from calibre.ebooks.rb import unique_name
from polyglot.builtins import string_or_bytes
TAGS = [
'b',
'big',
'blockquote',
'br',
'center',
'code',
'div',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'i',
'li',
'ol',
'p',
'pre',
'small',
'sub',
'sup',
'ul',
]
LINK_TAGS = [
'a',
]
IMAGE_TAGS = [
'img',
]
STYLES = [
('font-weight', {'bold' : 'b', 'bolder' : 'b'}),
('font-style', {'italic' : 'i'}),
('text-align', {'center' : 'center'}),
]
class RBMLizer:
def __init__(self, log, name_map={}):
self.log = log
self.name_map = name_map
self.link_hrefs = {}
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to RB markup...')
self.oeb_book = oeb_book
self.opts = opts
return self.mlize_spine()
def mlize_spine(self):
self.link_hrefs = {}
output = ['<HTML><HEAD><TITLE></TITLE></HEAD><BODY>']
output.append(self.get_cover_page())
output.append('ghji87yhjko0Caliblre-toc-placeholder-for-insertion-later8ujko0987yjk')
output.append(self.get_text())
output.append('</BODY></HTML>')
output = ''.join(output).replace('ghji87yhjko0Caliblre-toc-placeholder-for-insertion-later8ujko0987yjk', self.get_toc())
output = self.clean_text(output)
return output
def get_cover_page(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
output = ''
if 'cover' in self.oeb_book.guide:
if self.name_map.get(self.oeb_book.guide['cover'].href, None):
output += '<IMG SRC="%s">' % self.name_map[self.oeb_book.guide['cover'].href]
if 'titlepage' in self.oeb_book.guide:
self.log.debug('Generating cover page...')
href = self.oeb_book.guide['titlepage'].href
item = self.oeb_book.manifest.hrefs[href]
if item.spine_position is None:
stylizer = Stylizer(item.data, item.href, self.oeb_book,
self.opts, self.opts.output_profile)
output += ''.join(self.dump_text(item.data.find(XHTML('body')), stylizer, item))
return output
def get_toc(self):
toc = ['']
if self.opts.inline_toc:
self.log.debug('Generating table of contents...')
toc.append('<H1>%s</H1><UL>\n' % _('Table of Contents:'))
for item in self.oeb_book.toc:
if item.href in self.link_hrefs.keys():
toc.append(f'<LI><A HREF="#{self.link_hrefs[item.href]}">{item.title}</A></LI>\n')
else:
self.oeb.warn('Ignoring toc item: %s not found in document.' % item)
toc.append('</UL>')
return ''.join(toc)
def get_text(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
output = ['']
for item in self.oeb_book.spine:
self.log.debug('Converting %s to RocketBook HTML...' % item.href)
stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile)
output.append(self.add_page_anchor(item))
output += self.dump_text(item.data.find(XHTML('body')), stylizer, item)
return ''.join(output)
def add_page_anchor(self, page):
return self.get_anchor(page, '')
def get_anchor(self, page, aid):
aid = f'{page.href}#{aid}'
if aid not in self.link_hrefs.keys():
self.link_hrefs[aid] = 'calibre_link-%s' % len(self.link_hrefs.keys())
aid = self.link_hrefs[aid]
return '<A NAME="%s"></A>' % aid
def clean_text(self, text):
# Remove anchors that do not have links
anchors = set(re.findall(r'(?<=<A NAME=").+?(?="></A>)', text))
links = set(re.findall(r'(?<=<A HREF="#).+?(?=">)', text))
for unused in anchors.difference(links):
text = text.replace('<A NAME="%s"></A>' % unused, '')
return text
def dump_text(self, elem, stylizer, page, tag_stack=[]):
from calibre.ebooks.oeb.base import XHTML_NS, barename, namespace
if not isinstance(elem.tag, string_or_bytes) or namespace(elem.tag) != XHTML_NS:
p = elem.getparent()
if p is not None and isinstance(p.tag, string_or_bytes) and namespace(p.tag) == XHTML_NS \
and elem.tail:
return [elem.tail]
return ['']
text = ['']
style = stylizer.style(elem)
if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \
or style['visibility'] == 'hidden':
if hasattr(elem, 'tail') and elem.tail:
return [elem.tail]
return ['']
tag = barename(elem.tag)
tag_count = 0
# Process tags that need special processing and that do not have inner
# text. Usually these require an argument
if tag in IMAGE_TAGS:
if elem.attrib.get('src', None):
if page.abshref(elem.attrib['src']) not in self.name_map.keys():
self.name_map[page.abshref(elem.attrib['src'])] = unique_name('%s' % len(self.name_map.keys()), self.name_map.keys())
text.append('<IMG SRC="%s">' % self.name_map[page.abshref(elem.attrib['src'])])
rb_tag = tag.upper() if tag in TAGS else None
if rb_tag:
tag_count += 1
text.append('<%s>' % rb_tag)
tag_stack.append(rb_tag)
# Anchors links
if tag in LINK_TAGS:
href = elem.get('href')
if href:
href = page.abshref(href)
if '://' not in href:
if '#' not in href:
href += '#'
if href not in self.link_hrefs.keys():
self.link_hrefs[href] = 'calibre_link-%s' % len(self.link_hrefs.keys())
href = self.link_hrefs[href]
text.append('<A HREF="#%s">' % href)
tag_count += 1
tag_stack.append('A')
# Anchor ids
id_name = elem.get('id')
if id_name:
text.append(self.get_anchor(page, id_name))
# Processes style information
for s in STYLES:
style_tag = s[1].get(style[s[0]], None)
if style_tag:
style_tag = style_tag.upper()
tag_count += 1
text.append('<%s>' % style_tag)
tag_stack.append(style_tag)
# Process tags that contain text.
if hasattr(elem, 'text') and elem.text:
text.append(prepare_string_for_xml(elem.text))
for item in elem:
text += self.dump_text(item, stylizer, page, tag_stack)
close_tag_list = []
for i in range(0, tag_count):
close_tag_list.insert(0, tag_stack.pop())
text += self.close_tags(close_tag_list)
if hasattr(elem, 'tail') and elem.tail:
text.append(prepare_string_for_xml(elem.tail))
return text
def close_tags(self, tags):
text = ['']
for i in range(0, len(tags)):
tag = tags.pop()
text.append('</%s>' % tag)
return text
| 7,651 | Python | .py | 192 | 30.052083 | 137 | 0.55033 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,354 | input.py | kovidgoyal_calibre/src/calibre/ebooks/odt/input.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Convert an ODT file into a Open Ebook
'''
import logging
import os
from css_parser import CSSParser
from css_parser.css import CSSRule
from lxml import etree
from odf.draw import Frame as odFrame
from odf.draw import Image as odImage
from odf.namespaces import TEXTNS as odTEXTNS
from odf.odf2xhtml import ODF2XHTML
from odf.opendocument import load as odLoad
from calibre import CurrentDir, walk
from calibre.ebooks.oeb.base import _css_logger
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import as_bytes, string_or_bytes
class Extract(ODF2XHTML):
def extract_pictures(self, zf):
if not os.path.exists('Pictures'):
os.makedirs('Pictures')
for name in zf.namelist():
if name.startswith('Pictures') and name not in {'Pictures', 'Pictures/'}:
data = zf.read(name)
with open(name, 'wb') as f:
f.write(data)
def apply_list_starts(self, root, log):
if not self.list_starts:
return
list_starts = frozenset(self.list_starts)
for ol in root.xpath('//*[local-name() = "ol" and @class]'):
classes = {'.' + x for x in ol.get('class', '').split()}
found = classes & list_starts
if found:
val = self.list_starts[next(iter(found))]
ol.set('start', val)
def fix_markup(self, html, log):
root = safe_xml_fromstring(html)
self.filter_css(root, log)
self.extract_css(root, log)
self.epubify_markup(root, log)
self.apply_list_starts(root, log)
html = etree.tostring(root, encoding='utf-8',
xml_declaration=True)
return html
def extract_css(self, root, log):
ans = []
for s in root.xpath('//*[local-name() = "style" and @type="text/css"]'):
ans.append(s.text)
s.getparent().remove(s)
head = root.xpath('//*[local-name() = "head"]')
if head:
head = head[0]
ns = head.nsmap.get(None, '')
if ns:
ns = '{%s}'%ns
etree.SubElement(head, ns+'link', {'type':'text/css',
'rel':'stylesheet', 'href':'odfpy.css'})
css = '\n\n'.join(ans)
parser = CSSParser(loglevel=logging.WARNING,
log=_css_logger)
self.css = parser.parseString(css, validate=False)
with open('odfpy.css', 'wb') as f:
f.write(css.encode('utf-8'))
def get_css_for_class(self, cls):
if not cls:
return None
for rule in self.css.cssRules.rulesOfType(CSSRule.STYLE_RULE):
for sel in rule.selectorList:
q = sel.selectorText
if q == '.' + cls:
return rule
def epubify_markup(self, root, log):
from calibre.ebooks.oeb.base import XHTML, XPath
# Fix empty title tags
for t in XPath('//h:title')(root):
if not t.text:
t.text = ' '
# Fix <p><div> constructs as the asinine epubchecker complains
# about them
pdiv = XPath('//h:p/h:div')
for div in pdiv(root):
div.getparent().tag = XHTML('div')
# Remove the position:relative as it causes problems with some epub
# renderers. Remove display: block on an image inside a div as it is
# redundant and prevents text-align:center from working in ADE
# Also ensure that the img is contained in its containing div
imgpath = XPath('//h:div/h:img[@style]')
for img in imgpath(root):
div = img.getparent()
if len(div) == 1:
style = div.attrib.get('style', '')
if style and not style.endswith(';'):
style = style + ';'
style += 'position:static' # Ensures position of containing div is static
# Ensure that the img is always contained in its frame
div.attrib['style'] = style
img.attrib['style'] = 'max-width: 100%; max-height: 100%'
# Handle anchored images. The default markup + CSS produced by
# odf2xhtml works with WebKit but not with ADE. So we convert the
# common cases of left/right/center aligned block images to work on
# both webkit and ADE. We detect the case of setting the side margins
# to auto and map it to an appropriate text-align directive, which
# works in both WebKit and ADE.
# https://bugs.launchpad.net/bugs/1063207
# https://bugs.launchpad.net/calibre/+bug/859343
imgpath = XPath('descendant::h:div/h:div/h:img')
for img in imgpath(root):
div2 = img.getparent()
div1 = div2.getparent()
if (len(div1), len(div2)) != (1, 1):
continue
cls = div1.get('class', '')
first_rules = list(filter(None, [self.get_css_for_class(x) for x in
cls.split()]))
has_align = False
for r in first_rules:
if r.style.getProperty('text-align') is not None:
has_align = True
ml = mr = None
if not has_align:
aval = None
cls = div2.get('class', '')
rules = list(filter(None, [self.get_css_for_class(x) for x in
cls.split()]))
for r in rules:
ml = r.style.getPropertyCSSValue('margin-left') or ml
mr = r.style.getPropertyCSSValue('margin-right') or mr
ml = getattr(ml, 'value', None)
mr = getattr(mr, 'value', None)
if ml == mr == 'auto':
aval = 'center'
elif ml == 'auto' and mr != 'auto':
aval = 'right'
elif ml != 'auto' and mr == 'auto':
aval = 'left'
if aval is not None:
style = div1.attrib.get('style', '').strip()
if style and not style.endswith(';'):
style = style + ';'
style += 'text-align:%s'%aval
has_align = True
div1.attrib['style'] = style
if has_align:
# This is needed for ADE, without it the text-align has no
# effect
style = div2.attrib['style']
div2.attrib['style'] = 'display:inline;'+style
def filter_css(self, root, log):
style = root.xpath('//*[local-name() = "style" and @type="text/css"]')
if style:
style = style[0]
css = style.text
if css:
css, sel_map = self.do_filter_css(css)
if not isinstance(css, str):
css = css.decode('utf-8', 'ignore')
style.text = css
for x in root.xpath('//*[@class]'):
extra = []
orig = x.get('class')
for cls in orig.split():
extra.extend(sel_map.get(cls, []))
if extra:
x.set('class', orig + ' ' + ' '.join(extra))
def do_filter_css(self, css):
from css_parser import parseString
from css_parser.css import CSSRule
sheet = parseString(css, validate=False)
rules = list(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE))
sel_map = {}
count = 0
for r in rules:
# Check if we have only class selectors for this rule
nc = [x for x in r.selectorList if not
x.selectorText.startswith('.')]
if len(r.selectorList) > 1 and not nc:
# Replace all the class selectors with a single class selector
# This will be added to the class attribute of all elements
# that have one of these selectors.
replace_name = 'c_odt%d'%count
count += 1
for sel in r.selectorList:
s = sel.selectorText[1:]
if s not in sel_map:
sel_map[s] = []
sel_map[s].append(replace_name)
r.selectorText = '.'+replace_name
return sheet.cssText, sel_map
def search_page_img(self, mi, log):
for frm in self.document.topnode.getElementsByType(odFrame):
try:
if frm.getAttrNS(odTEXTNS,'anchor-type') == 'page':
log.warn('Document has Pictures anchored to Page, will all end up before first page!')
break
except ValueError:
pass
def filter_cover(self, mi, log):
# filter the Element tree (remove the detected cover)
if mi.cover and mi.odf_cover_frame:
for frm in self.document.topnode.getElementsByType(odFrame):
# search the right frame
if frm.getAttribute('name') == mi.odf_cover_frame:
img = frm.getElementsByType(odImage)
# only one draw:image allowed in the draw:frame
if len(img) == 1 and img[0].getAttribute('href') == mi.cover:
# ok, this is the right frame with the right image
# check if there are more children
if len(frm.childNodes) != 1:
break
# check if the parent paragraph more children
para = frm.parentNode
if para.tagName != 'text:p' or len(para.childNodes) != 1:
break
# now it should be safe to remove the text:p
parent = para.parentNode
parent.removeChild(para)
log("Removed cover image paragraph from document...")
break
def filter_load(self, odffile, mi, log):
""" This is an adaption from ODF2XHTML. It adds a step between
load and parse of the document where the Element tree can be
modified.
"""
# first load the odf structure
self.lines = []
self._wfunc = self._wlines
if isinstance(odffile, string_or_bytes) \
or hasattr(odffile, 'read'): # Added by Kovid
self.document = odLoad(odffile)
else:
self.document = odffile
# filter stuff
self.search_page_img(mi, log)
try:
self.filter_cover(mi, log)
except:
pass
# parse the modified tree and generate xhtml
self._walknode(self.document.topnode)
def __call__(self, stream, odir, log):
from calibre.ebooks.metadata.odt import get_metadata
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.zipfile import ZipFile
if not os.path.exists(odir):
os.makedirs(odir)
with CurrentDir(odir):
log('Extracting ODT file...')
stream.seek(0)
mi = get_metadata(stream, 'odt')
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
self.filter_load(stream, mi, log)
html = self.xhtml()
# A blanket img specification like this causes problems
# with EPUB output as the containing element often has
# an absolute height and width set that is larger than
# the available screen real estate
html = html.replace('img { width: 100%; height: 100%; }', '')
# odf2xhtml creates empty title tag
html = html.replace('<title></title>','<title>%s</title>'%(mi.title,))
try:
html = self.fix_markup(html, log)
except:
log.exception('Failed to filter CSS, conversion may be slow')
with open('index.xhtml', 'wb') as f:
f.write(as_bytes(html))
zf = ZipFile(stream, 'r')
self.extract_pictures(zf)
opf = OPFCreator(os.path.abspath(os.getcwd()), mi)
opf.create_manifest([(os.path.abspath(f2), None) for f2 in
walk(os.getcwd())])
opf.create_spine([os.path.abspath('index.xhtml')])
with open('metadata.opf', 'wb') as f:
opf.render(f)
return os.path.abspath('metadata.opf')
| 12,728 | Python | .py | 281 | 32.067616 | 106 | 0.5357 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,355 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/odt/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Handle the Open Document Format
'''
| 185 | Python | .py | 7 | 25 | 56 | 0.697143 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,356 | fb2ml.py | kovidgoyal_calibre/src/calibre/ebooks/fb2/fb2ml.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Transform OEB content into FB2 markup
'''
import re
import textwrap
import uuid
from datetime import datetime
from lxml import etree
from calibre import prepare_string_for_xml
from calibre.constants import __appname__, __version__
from calibre.ebooks.oeb.base import urlnormalize
from calibre.utils.img import save_cover_data_to
from calibre.utils.localization import lang_as_iso639_1
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.binary import as_base64_unicode
from polyglot.builtins import string_or_bytes
from polyglot.urllib import urlparse
class FB2MLizer:
'''
Todo: * Include more FB2 specific tags in the conversion.
* Handle notes and anchor links.
'''
def __init__(self, log):
self.log = log
self.reset_state()
def reset_state(self):
# Used to ensure text and tags are always within <p> and </p>
self.in_p = False
# Mapping of image names. OEB allows for images to have the same name but be stored
# in different directories. FB2 images are all in a flat layout so we rename all images
# into a sequential numbering system to ensure there are no collisions between image names.
self.image_hrefs = {}
# Mapping of toc items and their
self.toc = {}
# Used to see whether a new <section> needs to be opened
self.section_level = 0
def extract_content(self, oeb_book, opts):
self.log.info('Converting XHTML to FB2 markup...')
self.oeb_book = oeb_book
self.opts = opts
self.reset_state()
# Used for adding <section>s and <title>s to allow readers
# to generate toc from the document.
if self.opts.sectionize == 'toc':
self.create_flat_toc(self.oeb_book.toc, 1)
return self.fb2mlize_spine()
def fb2mlize_spine(self):
output = (
self.fb2_header(),
self.get_text(),
self.fb2mlize_images(),
self.fb2_footer(),
)
output = self.clean_text('\n'.join(output))
if self.opts.pretty_print:
output = etree.tostring(safe_xml_fromstring(output), encoding='unicode', pretty_print=True)
return '<?xml version="1.0" encoding="UTF-8"?>\n' + output
def clean_text(self, text):
# Remove pointless tags, but keep their contents.
text = re.sub(r'(?mu)<(strong|emphasis|strikethrough|sub|sup)>(\s*)</\1>', r'\2', text)
# Clean up paragraphs endings.
text = re.sub(r'(?ma)\s+</p>', '</p>', text)
# Condense empty paragraphs into a line break.
text = re.sub(r'(?mu)(?:<p></p>\s*){3,}', '<empty-line/>', text)
# Remove empty paragraphs.
text = re.sub(r'(?mu)<p></p>\s*', '', text)
# Put the paragraph following a paragraph on a separate line.
text = re.sub(r'(?mu)</p>\s*<p>', '</p>\n<p>', text)
if self.opts.insert_blank_line:
text = re.sub(r'(?mu)</p>', '</p><empty-line/>', text)
# Clean up title endings.
text = re.sub(r'(?mu)\s+</title>', '</title>', text)
# Remove empty title elements.
text = re.sub(r'(?mu)<title></title>\s*', '', text)
# Put the paragraph following a title on a separate line.
text = re.sub(r'(?mu)</title>\s*<p>', '</title>\n<p>', text)
# Put line breaks between paragraphs on a separate line.
text = re.sub(r'(?mu)</(p|title)>\s*<empty-line/>', r'</\1>\n<empty-line/>', text)
text = re.sub(r'(?mu)<empty-line/>\s*<p>', '<empty-line/>\n<p>', text)
# Remove empty sections.
text = re.sub(r'(?mu)<section>\s*</section>', '', text)
# Clean up sections starts and ends.
text = re.sub(r'(?mu)\s*<section>', '\n<section>', text)
text = re.sub(r'(?mu)<section>\s*', '<section>\n', text)
text = re.sub(r'(?mu)\s*</section>', '\n</section>', text)
text = re.sub(r'(?mu)</section>\s*', '</section>\n', text)
return text
def fb2_header(self):
from calibre.ebooks.oeb.base import OPF
metadata = {}
metadata['title'] = self.oeb_book.metadata.title[0].value
metadata['appname'] = __appname__
metadata['version'] = __version__
metadata['date'] = '%i.%i.%i' % (datetime.now().day, datetime.now().month, datetime.now().year)
if self.oeb_book.metadata.language:
lc = lang_as_iso639_1(self.oeb_book.metadata.language[0].value)
if not lc:
lc = self.oeb_book.metadata.language[0].value
metadata['lang'] = lc or 'en'
else:
metadata['lang'] = 'en'
metadata['id'] = None
metadata['cover'] = self.get_cover()
metadata['genre'] = self.opts.fb2_genre
metadata['author'] = ''
for auth in self.oeb_book.metadata.creator:
author_first = ''
author_middle = ''
author_last = ''
author_parts = auth.value.split(' ')
if len(author_parts) == 1:
author_last = author_parts[0]
elif len(author_parts) == 2:
author_first = author_parts[0]
author_last = author_parts[1]
else:
author_first = author_parts[0]
author_middle = ' '.join(author_parts[1:-1])
author_last = author_parts[-1]
metadata['author'] += '<author>'
metadata['author'] += '<first-name>%s</first-name>' % prepare_string_for_xml(author_first)
if author_middle:
metadata['author'] += '<middle-name>%s</middle-name>' % prepare_string_for_xml(author_middle)
metadata['author'] += '<last-name>%s</last-name>' % prepare_string_for_xml(author_last)
metadata['author'] += '</author>'
if not metadata['author']:
metadata['author'] = '<author><first-name></first-name><last-name></last-name></author>'
metadata['keywords'] = ''
tags = list(map(str, self.oeb_book.metadata.subject))
if tags:
tags = ', '.join(prepare_string_for_xml(x) for x in tags)
metadata['keywords'] = '<keywords>%s</keywords>'%tags
metadata['sequence'] = ''
if self.oeb_book.metadata.series:
index = '1'
if self.oeb_book.metadata.series_index:
index = self.oeb_book.metadata.series_index[0]
metadata['sequence'] = '<sequence name="{}" number="{}"/>'.format(prepare_string_for_xml('%s' % self.oeb_book.metadata.series[0]), index)
year = publisher = isbn = ''
identifiers = self.oeb_book.metadata['identifier']
for x in identifiers:
if x.get(OPF('scheme'), None).lower() == 'uuid' or str(x).startswith('urn:uuid:'):
metadata['id'] = str(x).split(':')[-1]
break
if metadata['id'] is None:
self.log.warn('No UUID identifier found')
metadata['id'] = str(uuid.uuid4())
try:
date = self.oeb_book.metadata['date'][0]
except IndexError:
pass
else:
year = '<year>%s</year>' % prepare_string_for_xml(date.value.partition('-')[0])
try:
publisher = self.oeb_book.metadata['publisher'][0]
except IndexError:
pass
else:
publisher = '<publisher>%s</publisher>' % prepare_string_for_xml(publisher.value)
for x in identifiers:
if x.get(OPF('scheme'), None).lower() == 'isbn':
isbn = '<isbn>%s</isbn>' % prepare_string_for_xml(x.value)
metadata['year'], metadata['isbn'], metadata['publisher'] = year, isbn, publisher
for key, value in metadata.items():
if key not in ('author', 'cover', 'sequence', 'keywords', 'year', 'publisher', 'isbn'):
metadata[key] = prepare_string_for_xml(value)
try:
comments = self.oeb_book.metadata['description'][0]
except Exception:
metadata['comments'] = ''
else:
from calibre.utils.html2text import html2text
metadata['comments'] = f'<annotation><p>{prepare_string_for_xml(html2text(comments.value).strip())}</p></annotation>'
# Keep the indentation level of the description the same as the body.
header = textwrap.dedent('''\
<FictionBook xmlns="http://www.gribuser.ru/xml/fictionbook/2.0" xmlns:l="http://www.w3.org/1999/xlink">
<description>
<title-info>
<genre>%(genre)s</genre>
%(author)s
<book-title>%(title)s</book-title>
%(cover)s
<lang>%(lang)s</lang>
%(keywords)s
%(sequence)s
%(comments)s
</title-info>
<document-info>
%(author)s
<program-used>%(appname)s %(version)s</program-used>
<date>%(date)s</date>
<id>%(id)s</id>
<version>1.0</version>
</document-info>
<publish-info>
%(publisher)s
%(year)s
%(isbn)s
</publish-info>
</description>''') % metadata
# Remove empty lines.
return '\n'.join(filter(str.strip, header.splitlines()))
def fb2_footer(self):
return '</FictionBook>'
def get_cover(self):
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
cover_href = None
# Get the raster cover if it's available.
if self.oeb_book.metadata.cover and str(self.oeb_book.metadata.cover[0]) in self.oeb_book.manifest.ids:
id = str(self.oeb_book.metadata.cover[0])
cover_item = self.oeb_book.manifest.ids[id]
if cover_item.media_type in OEB_RASTER_IMAGES:
cover_href = cover_item.href
else:
# Figure out if we have a title page or a cover page
page_name = ''
if 'titlepage' in self.oeb_book.guide:
page_name = 'titlepage'
elif 'cover' in self.oeb_book.guide:
page_name = 'cover'
if page_name:
cover_item = self.oeb_book.manifest.hrefs[self.oeb_book.guide[page_name].href]
# Get the first image in the page
for img in cover_item.xpath('//img'):
cover_href = cover_item.abshref(img.get('src'))
break
if cover_href:
# Only write the image tag if it is in the manifest.
if cover_href in self.oeb_book.manifest.hrefs and cover_href not in self.image_hrefs:
self.image_hrefs[cover_href] = 'img_%s' % len(self.image_hrefs)
return '<coverpage><image l:href="#%s"/></coverpage>' % self.image_hrefs[cover_href]
return ''
def get_text(self):
from calibre.ebooks.oeb.base import XHTML
from calibre.ebooks.oeb.stylizer import Stylizer
text = ['<body>']
# Create main section if there are no others to create
if self.opts.sectionize == 'nothing':
text.append('<section>')
self.section_level += 1
for item in self.oeb_book.spine:
self.log.debug('Converting %s to FictionBook2 XML' % item.href)
stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile)
# Start a <section> if we must sectionize each file or if the TOC references this page
page_section_open = False
if self.opts.sectionize == 'files' or None in self.toc.get(item.href, ()):
text.append('<section>')
page_section_open = True
self.section_level += 1
text += self.dump_text(item.data.find(XHTML('body')), stylizer, item)
if page_section_open:
text.append('</section>')
self.section_level -= 1
# Close any open sections
while self.section_level > 0:
text.append('</section>')
self.section_level -= 1
text.append('</body>')
return ''.join(text)
def fb2mlize_images(self):
'''
This function uses the self.image_hrefs dictionary mapping. It is populated by the dump_text function.
'''
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
images = []
for item in self.oeb_book.manifest:
# Don't write the image if it's not referenced in the document's text.
if item.href not in self.image_hrefs:
continue
if item.media_type in OEB_RASTER_IMAGES:
try:
if item.media_type not in ('image/jpeg', 'image/png'):
imdata = save_cover_data_to(item.data, compression_quality=70)
raw_data = as_base64_unicode(imdata)
content_type = 'image/jpeg'
else:
raw_data = as_base64_unicode(item.data)
content_type = item.media_type
# Don't put the encoded image on a single line.
step = 72
data = '\n'.join(raw_data[i:i+step] for i in range(0, len(raw_data), step))
images.append(f'<binary id="{self.image_hrefs[item.href]}" content-type="{content_type}">{data}</binary>')
except Exception as e:
self.log.error('Error: Could not include file %s because '
'%s.' % (item.href, e))
return '\n'.join(images)
def create_flat_toc(self, nodes, level):
for item in nodes:
href, mid, id = item.href.partition('#')
if not id:
self.toc[href] = {None: 'page'}
else:
if not self.toc.get(href, None):
self.toc[href] = {}
self.toc[href][id] = level
self.create_flat_toc(item.nodes, level + 1)
def ensure_p(self):
if self.in_p:
return [], []
else:
self.in_p = True
return ['<p>'], ['p']
def close_open_p(self, tags):
text = ['']
added_p = False
if self.in_p:
# Close all up to p. Close p. Reopen all closed tags including p.
closed_tags = []
tags.reverse()
for t in tags:
text.append('</%s>' % t)
closed_tags.append(t)
if t == 'p':
break
closed_tags.reverse()
for t in closed_tags:
text.append('<%s>' % t)
else:
text.append('<p>')
added_p = True
self.in_p = True
return text, added_p
def handle_simple_tag(self, tag, tags):
s_out = []
s_tags = []
if tag not in tags:
p_out, p_tags = self.ensure_p()
s_out += p_out
s_tags += p_tags
s_out.append('<%s>' % tag)
s_tags.append(tag)
return s_out, s_tags
def dump_text(self, elem_tree, stylizer, page, tag_stack=[]):
'''
This function is intended to be used in a recursive manner. dump_text will
run though all elements in the elem_tree and call itself on each element.
self.image_hrefs will be populated by calling this function.
@param elem_tree: etree representation of XHTML content to be transformed.
@param stylizer: Used to track the style of elements within the tree.
@param page: OEB page used to determine absolute urls.
@param tag_stack: List of open FB2 tags to take into account.
@return: List of string representing the XHTML converted to FB2 markup.
'''
from calibre.ebooks.oeb.base import XHTML_NS, barename, namespace
elem = elem_tree
# Ensure what we are converting is not a string and that the fist tag is part of the XHTML namespace.
if not isinstance(elem_tree.tag, string_or_bytes) or namespace(elem_tree.tag) != XHTML_NS:
p = elem.getparent()
if p is not None and isinstance(p.tag, string_or_bytes) and namespace(p.tag) == XHTML_NS \
and elem.tail:
return [elem.tail]
return []
style = stylizer.style(elem_tree)
if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \
or style['visibility'] == 'hidden':
if hasattr(elem, 'tail') and elem.tail:
return [elem.tail]
return []
# FB2 generated output.
fb2_out = []
# FB2 tags in the order they are opened. This will be used to close the tags.
tags = []
# First tag in tree
tag = barename(elem_tree.tag)
# Number of blank lines above tag
try:
ems = int(round((float(style.marginTop) / style.fontSize) - 1))
if ems < 0:
ems = 0
except:
ems = 0
# Convert TOC entries to <title>s and add <section>s
if self.opts.sectionize == 'toc':
# A section cannot be a child of any other element than another section,
# so leave the tag alone if there are parents
if not tag_stack:
# There are two reasons to start a new section here: the TOC pointed to
# this page (then we use the first non-<body> on the page as a <title>), or
# the TOC pointed to a specific element
newlevel = 0
toc_entry = self.toc.get(page.href, None)
if toc_entry is not None:
if None in toc_entry:
if tag != 'body' and hasattr(elem_tree, 'text') and elem_tree.text:
newlevel = 1
self.toc[page.href] = None
if not newlevel and elem_tree.attrib.get('id', None) is not None:
newlevel = toc_entry.get(elem_tree.attrib.get('id', None), None)
# Start a new section if necessary
if newlevel:
while newlevel <= self.section_level:
fb2_out.append('</section>')
self.section_level -= 1
fb2_out.append('<section>')
self.section_level += 1
fb2_out.append('<title>')
tags.append('title')
if self.section_level == 0:
# If none of the prior processing made a section, make one now to be FB2 spec compliant
fb2_out.append('<section>')
self.section_level += 1
# Process the XHTML tag and styles. Converted to an FB2 tag.
# Use individual if statement not if else. There can be
# only one XHTML tag but it can have multiple styles.
if tag == 'img' and elem_tree.attrib.get('src', None):
# Only write the image tag if it is in the manifest.
ihref = urlnormalize(page.abshref(elem_tree.attrib['src']))
if ihref in self.oeb_book.manifest.hrefs:
if ihref not in self.image_hrefs:
self.image_hrefs[ihref] = 'img_%s' % len(self.image_hrefs)
p_txt, p_tag = self.ensure_p()
fb2_out += p_txt
tags += p_tag
fb2_out.append('<image l:href="#%s"/>' % self.image_hrefs[ihref])
else:
self.log.warn('Ignoring image not in manifest: %s' % ihref)
if tag in ('br', 'hr') or ems >= 1:
if ems < 1:
multiplier = 1
else:
multiplier = ems
if self.in_p:
closed_tags = []
open_tags = tag_stack+tags
open_tags.reverse()
for t in open_tags:
fb2_out.append('</%s>' % t)
closed_tags.append(t)
if t == 'p':
break
fb2_out.append('<empty-line/>' * multiplier)
closed_tags.reverse()
for t in closed_tags:
fb2_out.append('<%s>' % t)
else:
fb2_out.append('<empty-line/>' * multiplier)
if tag in ('div', 'li', 'p'):
p_text, added_p = self.close_open_p(tag_stack+tags)
fb2_out += p_text
if added_p:
tags.append('p')
if tag == 'a' and elem_tree.attrib.get('href', None):
# Handle only external links for now
if urlparse(elem_tree.attrib['href']).netloc:
p_txt, p_tag = self.ensure_p()
fb2_out += p_txt
tags += p_tag
fb2_out.append('<a l:href="%s">' % urlnormalize(elem_tree.attrib['href']))
tags.append('a')
if tag == 'b' or style['font-weight'] in ('bold', 'bolder'):
s_out, s_tags = self.handle_simple_tag('strong', tag_stack+tags)
fb2_out += s_out
tags += s_tags
if tag == 'i' or style['font-style'] == 'italic':
s_out, s_tags = self.handle_simple_tag('emphasis', tag_stack+tags)
fb2_out += s_out
tags += s_tags
if tag in ('del', 'strike') or style['text-decoration'] == 'line-through':
s_out, s_tags = self.handle_simple_tag('strikethrough', tag_stack+tags)
fb2_out += s_out
tags += s_tags
if tag == 'sub':
s_out, s_tags = self.handle_simple_tag('sub', tag_stack+tags)
fb2_out += s_out
tags += s_tags
if tag == 'sup':
s_out, s_tags = self.handle_simple_tag('sup', tag_stack+tags)
fb2_out += s_out
tags += s_tags
# Process element text.
if hasattr(elem_tree, 'text') and elem_tree.text:
if not self.in_p:
fb2_out.append('<p>')
fb2_out.append(prepare_string_for_xml(elem_tree.text))
if not self.in_p:
fb2_out.append('</p>')
# Process sub-elements.
for item in elem_tree:
fb2_out += self.dump_text(item, stylizer, page, tag_stack+tags)
# Close open FB2 tags.
tags.reverse()
fb2_out += self.close_tags(tags)
# Process element text that comes after the close of the XHTML tag but before the next XHTML tag.
if hasattr(elem_tree, 'tail') and elem_tree.tail:
if not self.in_p:
fb2_out.append('<p>')
fb2_out.append(prepare_string_for_xml(elem_tree.tail))
if not self.in_p:
fb2_out.append('</p>')
return fb2_out
def close_tags(self, tags):
text = []
for tag in tags:
text.append('</%s>' % tag)
if tag == 'p':
self.in_p = False
return text
| 23,319 | Python | .py | 500 | 34.122 | 149 | 0.539963 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,357 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/fb2/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
def base64_decode(raw):
from io import BytesIO
from polyglot.binary import from_base64_bytes
# First try the python implementation as it is faster
try:
return from_base64_bytes(raw)
except Exception:
pass
# Try a more robust version (adapted from FBReader sources)
A, Z, a, z, zero, nine, plus, slash, equal = bytearray(b'AZaz09+/=')
raw = bytearray(raw)
out = BytesIO()
pos = 0
while pos < len(raw):
tot = 0
i = 0
while i < 4 and pos < len(raw):
byt = raw[pos]
pos += 1
num = 0
if A <= byt <= Z:
num = byt - A
elif a <= byt <= z:
num = byt - a + 26
elif zero <= byt <= nine:
num = byt - zero + 52
else:
num = {plus:62, slash:63, equal:64}.get(byt, None)
if num is None:
# Ignore this byte
continue
tot += num << (6 * (3 - i))
i += 1
triple = bytearray(3)
for j in (2, 1, 0):
triple[j] = tot & 0xff
tot >>= 8
out.write(bytes(triple))
return out.getvalue()
| 1,371 | Python | .py | 43 | 22.255814 | 72 | 0.494322 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,358 | stylizer.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/stylizer.py | '''
CSS property propagation class.
'''
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import copy
import logging
import numbers
import os
import re
import unicodedata
from operator import itemgetter
from weakref import WeakKeyDictionary
from xml.dom import SyntaxErr as CSSSyntaxError
from css_parser import CSSParser, parseString, parseStyle, profiles, replaceUrls
from css_parser import log as css_parser_log
from css_parser import profile as cssprofiles
from css_parser.css import CSSFontFaceRule, CSSPageRule, CSSStyleRule, cssproperties
from css_selectors import INAPPROPRIATE_PSEUDO_CLASSES, Select, SelectorError
from tinycss.media3 import CSSMedia3Parser
from calibre import as_unicode, force_unicode
from calibre.ebooks import unit_convert
from calibre.ebooks.oeb.base import CSS_MIME, OEB_STYLES, SVG, XHTML, XHTML_NS, urlnormalize, xpath
from calibre.ebooks.oeb.normalize_css import DEFAULTS, normalizers
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
css_parser_log.setLevel(logging.WARN)
_html_css_stylesheet = None
def validate_color(col):
return cssprofiles.validateWithProfile('color',
col,
profiles=[profiles.Profiles.CSS_LEVEL_2])[1]
def html_css_stylesheet():
global _html_css_stylesheet
if _html_css_stylesheet is None:
with open(P('templates/html.css'), 'rb') as f:
html_css = f.read().decode('utf-8')
_html_css_stylesheet = parseString(html_css, validate=False)
return _html_css_stylesheet
INHERITED = {
'azimuth', 'border-collapse', 'border-spacing', 'caption-side', 'color',
'cursor', 'direction', 'elevation', 'empty-cells', 'font-family',
'font-size', 'font-style', 'font-variant', 'font-weight', 'letter-spacing',
'line-height', 'list-style-image', 'list-style-position',
'list-style-type', 'orphans', 'page-break-inside', 'pitch-range', 'pitch',
'quotes', 'richness', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speak', 'speech-rate', 'stress', 'text-align', 'text-indent',
'text-transform', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'word-spacing', 'text-shadow',
}
FONT_SIZE_NAMES = {
'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
}
ALLOWED_MEDIA_TYPES = frozenset({'screen', 'all', 'aural', 'amzn-kf8'})
IGNORED_MEDIA_FEATURES = frozenset('width min-width max-width height min-height max-height device-width min-device-width max-device-width device-height min-device-height max-device-height aspect-ratio min-aspect-ratio max-aspect-ratio device-aspect-ratio min-device-aspect-ratio max-device-aspect-ratio color min-color max-color color-index min-color-index max-color-index monochrome min-monochrome max-monochrome -webkit-min-device-pixel-ratio resolution min-resolution max-resolution scan grid'.split()) # noqa
def media_ok(raw):
if not raw:
return True
if raw == 'amzn-mobi': # Optimization for the common case
return False
def query_ok(mq):
matched = True
if mq.media_type not in ALLOWED_MEDIA_TYPES:
matched = False
# Media queries that test for device specific features always fail
for media_feature, expr in mq.expressions:
if media_feature in IGNORED_MEDIA_FEATURES:
matched = False
return mq.negated ^ matched
try:
for mq in CSSMedia3Parser().parse_stylesheet('@media %s {}' % raw).rules[0].media:
if query_ok(mq):
return True
return False
except Exception:
pass
return True
def test_media_ok():
assert media_ok(None)
assert media_ok('')
assert not media_ok('amzn-mobi')
assert media_ok('amzn-kf8')
assert media_ok('screen')
assert media_ok('only screen')
assert not media_ok('not screen')
assert not media_ok('(device-width:10px)')
assert media_ok('screen, (device-width:10px)')
assert not media_ok('screen and (device-width:10px)')
class style_map(dict):
def __init__(self):
super().__init__()
self.important_properties = set()
class StylizerRules:
def __init__(self, opts, profile, stylesheets):
self.opts, self.profile, self.stylesheets = opts, profile, stylesheets
index = 0
self.rules = []
self.page_rule = {}
self.font_face_rules = []
for sheet_index, stylesheet in enumerate(stylesheets):
href = stylesheet.href
for rule in stylesheet.cssRules:
if rule.type == rule.MEDIA_RULE:
if media_ok(rule.media.mediaText):
for subrule in rule.cssRules:
self.rules.extend(self.flatten_rule(subrule, href, index, is_user_agent_sheet=sheet_index==0))
index += 1
else:
self.rules.extend(self.flatten_rule(rule, href, index, is_user_agent_sheet=sheet_index==0))
index = index + 1
self.rules.sort(key=itemgetter(0)) # sort by specificity
def flatten_rule(self, rule, href, index, is_user_agent_sheet=False):
results = []
sheet_index = 0 if is_user_agent_sheet else 1
if isinstance(rule, CSSStyleRule):
style = self.flatten_style(rule.style)
for selector in rule.selectorList:
specificity = (sheet_index,) + selector.specificity + (index,)
text = selector.selectorText
selector = list(selector.seq)
results.append((specificity, selector, style, text, href))
elif isinstance(rule, CSSPageRule):
style = self.flatten_style(rule.style)
self.page_rule.update(style)
elif isinstance(rule, CSSFontFaceRule):
if rule.style.length > 1:
# Ignore the meaningless font face rules generated by the
# benighted MS Word that contain only a font-family declaration
# and nothing else
self.font_face_rules.append(rule)
return results
def flatten_style(self, cssstyle):
style = style_map()
for prop in cssstyle:
name = prop.name
normalizer = normalizers.get(name, None)
is_important = prop.priority == 'important'
if normalizer is not None:
for name, val in normalizer(name, prop.propertyValue).items():
style[name] = val
if is_important:
style.important_properties.add(name)
elif name == 'text-align':
style['text-align'] = self._apply_text_align(prop.value)
if is_important:
style.important_properties.add(name)
else:
style[name] = prop.value
if is_important:
style.important_properties.add(name)
if 'font-size' in style:
size = style['font-size']
if size == 'normal':
size = 'medium'
if size == 'smallest':
size = 'xx-small'
if size in FONT_SIZE_NAMES:
style['font-size'] = "%.1frem" % (self.profile.fnames[size] / float(self.profile.fbase))
if '-epub-writing-mode' in style:
for x in ('-webkit-writing-mode', 'writing-mode'):
style[x] = style.get(x, style['-epub-writing-mode'])
return style
def _apply_text_align(self, text):
if text in ('left', 'justify') and self.opts.change_justification in ('left', 'justify'):
text = self.opts.change_justification
return text
def same_rules(self, opts, profile, stylesheets):
if self.opts != opts:
# it's unlikely to happen, but better safe than sorry
return False
if self.profile != profile:
return False
if len(self.stylesheets) != len(stylesheets):
return False
for index, stylesheet in enumerate(self.stylesheets):
if stylesheet != stylesheets[index]:
return False
return True
class Stylizer:
STYLESHEETS = WeakKeyDictionary()
def __init__(self, tree, path, oeb, opts, profile=None,
extra_css='', user_css='', base_css=''):
self.oeb, self.opts = oeb, opts
self.profile = profile
if self.profile is None:
# Use the default profile. This should really be using
# opts.output_profile, but I don't want to risk changing it, as
# doing so might well have hard to debug font size effects.
from calibre.customize.ui import output_profiles
for x in output_profiles():
if x.short_name == 'default':
self.profile = x
break
if self.profile is None:
# Just in case the default profile is removed in the future :)
self.profile = opts.output_profile
self.body_font_size = self.profile.fbase
self.logger = oeb.logger
item = oeb.manifest.hrefs[path]
basename = os.path.basename(path)
cssname = os.path.splitext(basename)[0] + '.css'
stylesheets = [html_css_stylesheet()]
if base_css:
stylesheets.append(parseString(base_css, validate=False))
style_tags = xpath(tree, '//*[local-name()="style" or local-name()="link"]')
# Add css_parser parsing profiles from output_profile
for profile in self.opts.output_profile.extra_css_modules:
cssprofiles.addProfile(profile['name'],
profile['props'],
profile['macros'])
parser = CSSParser(fetcher=self._fetch_css_file,
log=logging.getLogger('calibre.css'))
for elem in style_tags:
if (elem.tag in (XHTML('style'), SVG('style')) and elem.get('type', CSS_MIME) in OEB_STYLES and media_ok(elem.get('media'))):
text = elem.text if elem.text else ''
for x in elem:
t = getattr(x, 'text', None)
if t:
text += '\n\n' + force_unicode(t, 'utf-8')
t = getattr(x, 'tail', None)
if t:
text += '\n\n' + force_unicode(t, 'utf-8')
if text:
text = oeb.css_preprocessor(text)
# We handle @import rules separately
parser.setFetcher(lambda x: ('utf-8', b''))
stylesheet = parser.parseString(text, href=cssname,
validate=False)
parser.setFetcher(self._fetch_css_file)
for rule in stylesheet.cssRules:
if rule.type == rule.IMPORT_RULE:
ihref = item.abshref(rule.href)
if not media_ok(rule.media.mediaText):
continue
hrefs = self.oeb.manifest.hrefs
if ihref not in hrefs:
self.logger.warn('Ignoring missing stylesheet in @import rule:', rule.href)
continue
sitem = hrefs[ihref]
if sitem.media_type not in OEB_STYLES:
self.logger.warn('CSS @import of non-CSS file %r' % rule.href)
continue
stylesheets.append(sitem.data)
# Make links to resources absolute, since these rules will
# be folded into a stylesheet at the root
replaceUrls(stylesheet, item.abshref,
ignoreImportRules=True)
stylesheets.append(stylesheet)
elif (elem.tag == XHTML('link') and elem.get('href') and elem.get(
'rel', 'stylesheet').lower() == 'stylesheet' and elem.get(
'type', CSS_MIME).lower() in OEB_STYLES and media_ok(elem.get('media'))
):
href = urlnormalize(elem.attrib['href'])
path = item.abshref(href)
sitem = oeb.manifest.hrefs.get(path, None)
if sitem is None:
self.logger.warn(
'Stylesheet %r referenced by file %r not in manifest' %
(path, item.href))
continue
if not hasattr(sitem.data, 'cssRules'):
self.logger.warn(
'Stylesheet %r referenced by file %r is not CSS'%(path,
item.href))
continue
stylesheets.append(sitem.data)
csses = {'extra_css':extra_css, 'user_css':user_css}
for w, x in csses.items():
if x:
try:
text = x
stylesheet = parser.parseString(text, href=cssname,
validate=False)
stylesheets.append(stylesheet)
except Exception:
self.logger.exception('Failed to parse %s, ignoring.'%w)
self.logger.debug('Bad css: ')
self.logger.debug(x)
# using oeb to store the rules, page rule and font face rules
# and generating them again if opts, profile or stylesheets are different
if (not hasattr(self.oeb, 'stylizer_rules')) \
or not self.oeb.stylizer_rules.same_rules(self.opts, self.profile, stylesheets):
self.oeb.stylizer_rules = StylizerRules(self.opts, self.profile, stylesheets)
self.rules = self.oeb.stylizer_rules.rules
self.page_rule = self.oeb.stylizer_rules.page_rule
self.font_face_rules = self.oeb.stylizer_rules.font_face_rules
self.flatten_style = self.oeb.stylizer_rules.flatten_style
self._styles = {}
pseudo_pat = re.compile(':{1,2}(%s)' % ('|'.join(INAPPROPRIATE_PSEUDO_CLASSES)), re.I)
select = Select(tree, ignore_inappropriate_pseudo_classes=True)
for _, _, cssdict, text, _ in self.rules:
fl = pseudo_pat.search(text)
try:
matches = tuple(select(text))
except SelectorError as err:
self.logger.error(f'Ignoring CSS rule with invalid selector: {text!r} ({as_unicode(err)})')
continue
if fl is not None:
fl = fl.group(1)
if fl == 'first-letter' and getattr(self.oeb,
'plumber_output_format', '').lower() in {'mobi', 'docx'}:
# Fake first-letter
for elem in matches:
for x in elem.iter('*'):
if x.text:
punctuation_chars = []
text = str(x.text)
while text:
category = unicodedata.category(text[0])
if category[0] not in {'P', 'Z'}:
break
punctuation_chars.append(text[0])
text = text[1:]
special_text = ''.join(punctuation_chars) + \
(text[0] if text else '')
span = x.makeelement('{%s}span' % XHTML_NS)
span.text = special_text
span.set('data-fake-first-letter', '1')
span.tail = text[1:]
x.text = None
x.insert(0, span)
self.style(span)._update_cssdict(cssdict)
break
else: # Element pseudo-class
for elem in matches:
self.style(elem)._update_pseudo_class(fl, cssdict)
else:
for elem in matches:
self.style(elem)._update_cssdict(cssdict)
for elem in xpath(tree, '//h:*[@style]'):
self.style(elem)._apply_style_attr(url_replacer=item.abshref)
num_pat = re.compile(r'[0-9.]+$')
for elem in xpath(tree, '//h:img[@width or @height]'):
style = self.style(elem)
# Check if either height or width is not default
is_styled = style._style.get('width', 'auto') != 'auto' or \
style._style.get('height', 'auto') != 'auto'
if not is_styled:
# Update img style dimension using width and height
upd = {}
for prop in ('width', 'height'):
val = elem.get(prop, '').strip()
try:
del elem.attrib[prop]
except:
pass
if val:
if num_pat.match(val) is not None:
val += 'px'
upd[prop] = val
if upd:
style._update_cssdict(upd)
def _fetch_css_file(self, path):
hrefs = self.oeb.manifest.hrefs
if path not in hrefs:
self.logger.warn('CSS import of missing file %r' % path)
return (None, None)
item = hrefs[path]
if item.media_type not in OEB_STYLES:
self.logger.warn('CSS import of non-CSS file %r' % path)
return (None, None)
data = item.data.cssText
if not isinstance(data, bytes):
data = data.encode('utf-8')
return ('utf-8', data)
def style(self, element):
try:
return self._styles[element]
except KeyError:
return Style(element, self)
def stylesheet(self, name, font_scale=None):
rules = []
for _, _, style, selector, href in self.rules:
if href != name:
continue
if font_scale and 'font-size' in style and \
style['font-size'].endswith('pt'):
style = copy.copy(style)
size = float(style['font-size'][:-2])
style['font-size'] = "%.2fpt" % (size * font_scale)
style = ';\n '.join(': '.join(item) for item in style.items())
rules.append(f'{selector} {{\n {style};\n}}')
return '\n'.join(rules)
no_important_properties = frozenset()
svg_text_tags = tuple(map(SVG, ('text', 'textPath', 'tref', 'tspan')))
def is_only_number(x: str) -> bool:
try:
float(x)
return True
except Exception:
return False
def is_svg_text_tag(x):
return getattr(x, 'tag', '') in svg_text_tags
class Style:
MS_PAT = re.compile(r'^\s*(mso-|panose-|text-underline|tab-interval)')
viewport_relative_font_size: str = ''
def __init__(self, element, stylizer):
self._element = element
self._profile = stylizer.profile
self._stylizer = stylizer
self._style = style_map()
self._fontSize = None
self._width = None
self._height = None
self._lineHeight = None
self._bgcolor = None
self._fgcolor = None
self._pseudo_classes = {}
stylizer._styles[element] = self
def set(self, prop, val):
self._style[prop] = val
def drop(self, prop, default=None):
return self._style.pop(prop, default)
def _update_cssdict(self, cssdict):
self._update_style(cssdict)
def _update_style(self, cssdict):
current_ip = getattr(self._style, 'important_properties', no_important_properties)
if current_ip is no_important_properties:
s = style_map()
s.update(self._style)
self._style = s
current_ip = self._style.important_properties
update_ip = getattr(cssdict, 'important_properties', no_important_properties)
for name, val in cssdict.items():
override = False
if name in update_ip:
current_ip.add(name)
override = True
elif name not in current_ip:
override = True
if override:
self._style[name] = val
def _update_pseudo_class(self, name, cssdict):
orig = self._pseudo_classes.get(name, {})
orig.update(cssdict)
self._pseudo_classes[name] = orig
def _apply_style_attr(self, url_replacer=None):
attrib = self._element.attrib
if 'style' not in attrib:
return
css = attrib['style'].split(';')
css = filter(None, (x.strip() for x in css))
css = [y.strip() for y in css]
css = [y for y in css if self.MS_PAT.match(y) is None]
css = '; '.join(css)
try:
style = parseStyle(css, validate=False)
except CSSSyntaxError:
return
if url_replacer is not None:
replaceUrls(style, url_replacer, ignoreImportRules=True)
self._update_style(self._stylizer.flatten_style(style))
def _has_parent(self):
try:
return self._element.getparent() is not None
except AttributeError:
return False # self._element is None
def _get_parent(self):
elem = self._element.getparent()
if elem is None:
return None
return self._stylizer.style(elem)
def __getitem__(self, name):
domname = cssproperties._toDOMname(name)
if hasattr(self, domname):
return getattr(self, domname)
return self._unit_convert(self._get(name))
def _get(self, name):
result = self._style.get(name, None)
if (result == 'inherit' or (result is None and name in INHERITED and self._has_parent())):
stylizer = self._stylizer
result = stylizer.style(self._element.getparent())._get(name)
if result is None:
result = DEFAULTS[name]
return result
def get(self, name, default=None):
return self._style.get(name, default)
def _unit_convert(self, value, base=None, font=None):
'Return value in pts'
if base is None:
base = self.width
if not font and font != 0:
font = self.fontSize
return unit_convert(value, base, font, self._profile.dpi, body_font_size=self._stylizer.body_font_size)
def pt_to_px(self, value):
return (self._profile.dpi / 72) * value
@property
def color(self):
if self._fgcolor is None:
val = self._get('color')
if val and validate_color(val):
self._fgcolor = val
else:
self._fgcolor = DEFAULTS['color']
return self._fgcolor
@property
def backgroundColor(self):
'''
Return the background color by parsing both the background-color and
background shortcut properties. Note that inheritance/default values
are not used. None is returned if no background color is set.
'''
if self._bgcolor is None:
col = None
val = self._style.get('background-color', None)
if val and validate_color(val):
col = val
else:
val = self._style.get('background', None)
if val is not None:
try:
style = parseStyle('background: '+val, validate=False)
val = style.getProperty('background').propertyValue
try:
val = list(val)
except:
# val is CSSPrimitiveValue
val = [val]
for c in val:
c = c.cssText
if isinstance(c, bytes):
c = c.decode('utf-8', 'replace')
if validate_color(c):
col = c
break
except:
pass
if col is None:
self._bgcolor = False
else:
self._bgcolor = col
return self._bgcolor if self._bgcolor else None
@property
def fontSize(self):
def normalize_fontsize(value, base):
value = value.replace('"', '').replace("'", '')
result = None
factor = None
if value == 'inherit':
value = base
if value in FONT_SIZE_NAMES:
result = self._profile.fnames[value]
elif value == 'smaller':
factor = 1.0/1.2
for _, _, size in self._profile.fsizes:
if base <= size:
break
factor = None
result = size
elif value == 'larger':
factor = 1.2
for _, _, size in reversed(self._profile.fsizes):
if base >= size:
break
factor = None
result = size
else:
result = self._unit_convert(value, base=base, font=base)
if not isinstance(result, numbers.Number):
return base
if result < 0:
result = normalize_fontsize("smaller", base)
if factor:
result = factor * base
return result
if self._fontSize is None:
result = None
parent = self._get_parent()
if parent is not None:
base = parent.fontSize
else:
base = self._profile.fbase
if 'font-size' in self._style:
size = self._style['font-size']
if is_svg_text_tag(self._element) and (size.endswith('px') or is_only_number(size)):
self.viewport_relative_font_size = size
result = normalize_fontsize(size, base)
else:
result = base
self._fontSize = result
return self._fontSize
def img_dimension(self, attr, img_size):
ans = None
parent = self._get_parent()
if parent is not None:
base = getattr(parent, attr)
else:
base = getattr(self._profile, attr + '_pts')
x = self._style.get(attr)
if x is not None:
if x == 'auto':
ans = self._unit_convert(str(img_size) + 'px', base=base)
else:
x = self._unit_convert(x, base=base)
if isinstance(x, numbers.Number):
ans = x
if ans is None:
x = self._element.get(attr)
if x is not None:
x = self._unit_convert(x + 'px', base=base)
if isinstance(x, numbers.Number):
ans = x
if ans is None:
ans = self._unit_convert(str(img_size) + 'px', base=base)
maa = self._style.get('max-' + attr)
if maa is not None:
x = self._unit_convert(maa, base=base)
if isinstance(x, numbers.Number) and (ans is None or x < ans):
ans = x
return ans
def img_size(self, width, height):
' Return the final size of an <img> given that it points to an image of size widthxheight '
w, h = self._get('width'), self._get('height')
answ, ansh = self.img_dimension('width', width), self.img_dimension('height', height)
if w == 'auto' and h != 'auto':
answ = (float(width)/height) * ansh
elif h == 'auto' and w != 'auto':
ansh = (float(height)/width) * answ
return answ, ansh
@property
def width(self):
if self._width is None:
width = None
base = None
parent = self._get_parent()
if parent is not None:
base = parent.width
else:
base = self._profile.width_pts
if 'width' in self._element.attrib:
width = self._element.attrib['width']
elif 'width' in self._style:
width = self._style['width']
if not width or width == 'auto':
result = base
else:
result = self._unit_convert(width, base=base)
if isinstance(result, (str, bytes)):
result = self._profile.width
self._width = result
if 'max-width' in self._style:
result = self._unit_convert(self._style['max-width'], base=base)
if isinstance(result, (str, bytes)):
result = self._width
if result < self._width:
self._width = result
return self._width
@property
def parent_width(self):
parent = self._get_parent()
if parent is None:
return self.width
return parent.width
@property
def height(self):
if self._height is None:
height = None
base = None
parent = self._get_parent()
if parent is not None:
base = parent.height
else:
base = self._profile.height_pts
if 'height' in self._element.attrib:
height = self._element.attrib['height']
elif 'height' in self._style:
height = self._style['height']
if not height or height == 'auto':
result = base
else:
result = self._unit_convert(height, base=base)
if isinstance(result, (str, bytes)):
result = self._profile.height
self._height = result
if 'max-height' in self._style:
result = self._unit_convert(self._style['max-height'], base=base)
if isinstance(result, (str, bytes)):
result = self._height
if result < self._height:
self._height = result
return self._height
@property
def lineHeight(self):
if self._lineHeight is None:
result = None
parent = self._get_parent()
if 'line-height' in self._style:
lineh = self._style['line-height']
if lineh == 'normal':
lineh = '1.2'
try:
result = float(lineh) * self.fontSize
except ValueError:
result = self._unit_convert(lineh, base=self.fontSize)
elif parent is not None:
# TODO: proper inheritance
result = parent.lineHeight
else:
result = 1.2 * self.fontSize
self._lineHeight = result
return self._lineHeight
@property
def effective_text_decoration(self):
'''
Browsers do this creepy thing with text-decoration where even though the
property is not inherited, it looks like it is because containing
blocks apply it. The actual algorithm is utterly ridiculous, see
http://reference.sitepoint.com/css/text-decoration
This matters for MOBI output, where text-decoration is mapped to <u>
and <st> tags. Trying to implement the actual algorithm is too much
work, so we just use a simple fake that should cover most cases.
'''
css = self._style.get('text-decoration', None)
pcss = None
parent = self._get_parent()
if parent is not None:
pcss = parent._style.get('text-decoration', None)
if css in ('none', None, 'inherit') and pcss not in (None, 'none'):
return pcss
return css
@property
def first_vertical_align(self):
''' For docx output where tags are not nested, we cannot directly
simulate the HTML vertical-align rendering model. Instead use the
approximation of considering the first non-default vertical-align '''
val = self['vertical-align']
if val != 'baseline':
raw_val = self._get('vertical-align')
if '%' in raw_val:
val = self._unit_convert(raw_val, base=self['line-height'])
return val
parent = self._get_parent()
if parent is not None and 'inline' in parent['display']:
return parent.first_vertical_align
@property
def marginTop(self):
return self._unit_convert(
self._get('margin-top'), base=self.parent_width)
@property
def marginBottom(self):
return self._unit_convert(
self._get('margin-bottom'), base=self.parent_width)
@property
def marginLeft(self):
return self._unit_convert(
self._get('margin-left'), base=self.parent_width)
@property
def marginRight(self):
return self._unit_convert(
self._get('margin-right'), base=self.parent_width)
@property
def paddingTop(self):
return self._unit_convert(
self._get('padding-top'), base=self.parent_width)
@property
def paddingBottom(self):
return self._unit_convert(
self._get('padding-bottom'), base=self.parent_width)
@property
def paddingLeft(self):
return self._unit_convert(
self._get('padding-left'), base=self.parent_width)
@property
def paddingRight(self):
return self._unit_convert(
self._get('padding-right'), base=self.parent_width)
def __str__(self):
items = sorted(iteritems(self._style))
return '; '.join(f"{key}: {val}" for key, val in items)
def cssdict(self):
return dict(self._style)
def pseudo_classes(self, filter_css):
if filter_css:
css = copy.deepcopy(self._pseudo_classes)
for psel, cssdict in iteritems(css):
for k in filter_css:
cssdict.pop(k, None)
else:
css = self._pseudo_classes
return {k:v for k, v in iteritems(css) if v}
@property
def is_hidden(self):
return self._style.get('display') == 'none' or self._style.get('visibility') == 'hidden'
| 34,663 | Python | .py | 783 | 31.153257 | 513 | 0.543402 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,359 | normalize_css.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/normalize_css.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from functools import wraps
from css_parser import CSSParser
from css_parser import profile as cssprofiles
from css_parser.css import PropertyValue
from tinycss.fonts3 import parse_font, serialize_font_family
from calibre.ebooks.oeb.base import css_text
from polyglot.builtins import iteritems, string_or_bytes
DEFAULTS = {'azimuth': 'center', 'background-attachment': 'scroll', # {{{
'background-color': 'transparent', 'background-image': 'none',
'background-position': '0% 0%', 'background-repeat': 'repeat',
'border-bottom-color': 'currentColor', 'border-bottom-style':
'none', 'border-bottom-width': 'medium', 'border-collapse':
'separate', 'border-left-color': 'currentColor',
'border-left-style': 'none', 'border-left-width': 'medium',
'border-right-color': 'currentColor', 'border-right-style': 'none',
'border-right-width': 'medium', 'border-spacing': 0,
'border-top-color': 'currentColor', 'border-top-style': 'none',
'border-top-width': 'medium', 'bottom': 'auto', 'caption-side':
'top', 'clear': 'none', 'clip': 'auto', 'color': 'black',
'content': 'normal', 'counter-increment': 'none', 'counter-reset':
'none', 'cue-after': 'none', 'cue-before': 'none', 'cursor':
'auto', 'direction': 'ltr', 'display': 'inline', 'elevation':
'level', 'empty-cells': 'show', 'float': 'none', 'font-family':
'serif', 'font-size': 'medium', 'font-stretch': 'normal', 'font-style': 'normal',
'font-variant': 'normal', 'font-weight': 'normal', 'height':
'auto', 'left': 'auto', 'letter-spacing': 'normal', 'line-height':
'normal', 'list-style-image': 'none', 'list-style-position':
'outside', 'list-style-type': 'disc', 'margin-bottom': 0,
'margin-left': 0, 'margin-right': 0, 'margin-top': 0, 'max-height':
'none', 'max-width': 'none', 'min-height': 0, 'min-width': 0,
'orphans': '2', 'outline-color': 'invert', 'outline-style': 'none',
'outline-width': 'medium', 'overflow': 'visible', 'padding-bottom':
0, 'padding-left': 0, 'padding-right': 0, 'padding-top': 0,
'page-break-after': 'auto', 'page-break-before': 'auto',
'page-break-inside': 'auto', 'pause-after': 0, 'pause-before': 0,
'pitch': 'medium', 'pitch-range': '50', 'play-during': 'auto',
'position': 'static', 'quotes': "'“' '”' '‘' '’'", 'richness':
'50', 'right': 'auto', 'speak': 'normal', 'speak-header': 'once',
'speak-numeral': 'continuous', 'speak-punctuation': 'none',
'speech-rate': 'medium', 'stress': '50', 'table-layout': 'auto',
'text-align': 'auto', 'text-decoration': 'none', 'text-indent': 0,
'text-shadow': 'none', 'text-transform': 'none', 'top': 'auto',
'unicode-bidi': 'normal', 'vertical-align': 'baseline',
'visibility': 'visible', 'voice-family': 'default', 'volume':
'medium', 'white-space': 'normal', 'widows': '2', 'width': 'auto',
'word-spacing': 'normal', 'z-index': 'auto'}
# }}}
EDGES = ('top', 'right', 'bottom', 'left')
BORDER_PROPS = ('color', 'style', 'width')
def normalize_edge(name, cssvalue):
style = {}
if isinstance(cssvalue, PropertyValue):
primitives = [css_text(v) for v in cssvalue]
else:
primitives = [css_text(cssvalue)]
if len(primitives) == 1:
value, = primitives
values = (value, value, value, value)
elif len(primitives) == 2:
vert, horiz = primitives
values = (vert, horiz, vert, horiz)
elif len(primitives) == 3:
top, horiz, bottom = primitives
values = (top, horiz, bottom, horiz)
else:
values = primitives[:4]
if '-' in name:
l, _, r = name.partition('-')
for edge, value in zip(EDGES, values):
style[f'{l}-{edge}-{r}'] = value
else:
for edge, value in zip(EDGES, values):
style[f'{name}-{edge}'] = value
return style
def simple_normalizer(prefix, names, check_inherit=True):
composition = tuple('%s-%s' %(prefix, n) for n in names)
@wraps(normalize_simple_composition)
def wrapper(name, cssvalue):
return normalize_simple_composition(name, cssvalue, composition, check_inherit=check_inherit)
return wrapper
def normalize_simple_composition(name, cssvalue, composition, check_inherit=True):
if check_inherit and css_text(cssvalue) == 'inherit':
style = {k:'inherit' for k in composition}
else:
style = {k:DEFAULTS[k] for k in composition}
try:
primitives = [css_text(v) for v in cssvalue]
except TypeError:
primitives = [css_text(cssvalue)]
while primitives:
value = primitives.pop()
for key in composition:
if cssprofiles.validate(key, value):
style[key] = value
break
return style
font_composition = ('font-style', 'font-variant', 'font-weight', 'font-size', 'line-height', 'font-family')
def normalize_font(cssvalue, font_family_as_list=False):
# See https://developer.mozilla.org/en-US/docs/Web/CSS/font
composition = font_composition
val = css_text(cssvalue)
if val == 'inherit':
ans = {k:'inherit' for k in composition}
elif val in {'caption', 'icon', 'menu', 'message-box', 'small-caption', 'status-bar'}:
ans = {k:DEFAULTS[k] for k in composition}
else:
ans = {k:DEFAULTS[k] for k in composition}
ans.update(parse_font(val))
if font_family_as_list:
if isinstance(ans['font-family'], string_or_bytes):
ans['font-family'] = [x.strip() for x in ans['font-family'].split(',')]
else:
if not isinstance(ans['font-family'], string_or_bytes):
ans['font-family'] = serialize_font_family(ans['font-family'])
return ans
def normalize_border(name, cssvalue):
style = normalizers['border-' + EDGES[0]]('border-' + EDGES[0], cssvalue)
vals = style.copy()
for edge in EDGES[1:]:
style.update({k.replace(EDGES[0], edge):v for k, v in iteritems(vals)})
return style
normalizers = {
'list-style': simple_normalizer('list-style', ('type', 'position', 'image')),
'font': lambda prop, v: normalize_font(v),
'border': normalize_border,
}
for x in ('margin', 'padding', 'border-style', 'border-width', 'border-color'):
normalizers[x] = normalize_edge
for x in EDGES:
name = 'border-' + x
normalizers[name] = simple_normalizer(name, BORDER_PROPS, check_inherit=False)
SHORTHAND_DEFAULTS = {
'margin': '0', 'padding': '0', 'border-style': 'none', 'border-width': '0', 'border-color': 'currentColor',
'border':'none', 'border-left': 'none', 'border-right':'none', 'border-top': 'none', 'border-bottom': 'none',
'list-style': 'inherit', 'font': 'inherit',
}
_safe_parser = None
def safe_parser():
global _safe_parser
if _safe_parser is None:
import logging
_safe_parser = CSSParser(loglevel=logging.CRITICAL, validate=False)
return _safe_parser
def normalize_filter_css(props):
ans = set()
p = safe_parser()
for prop in props:
n = normalizers.get(prop, None)
ans.add(prop)
if n is not None and prop in SHORTHAND_DEFAULTS:
dec = p.parseStyle(f'{prop}: {SHORTHAND_DEFAULTS[prop]}')
cssvalue = dec.getPropertyCSSValue(dec.item(0))
ans |= set(n(prop, cssvalue))
return ans
def condense_edge(vals):
edges = {x.name.rpartition('-')[-1]:x.value for x in vals}
if len(edges) != 4 or set(edges) != {'left', 'top', 'right', 'bottom'}:
return
ce = {}
for (x, y) in [('left', 'right'), ('top', 'bottom')]:
if edges[x] == edges[y]:
ce[x] = edges[x]
else:
ce[x], ce[y] = edges[x], edges[y]
if len(ce) == 4:
return ' '.join(ce[x] for x in ('top', 'right', 'bottom', 'left'))
if len(ce) == 3:
if 'right' in ce:
return ' '.join(ce[x] for x in ('top', 'right', 'top', 'left'))
return ' '.join(ce[x] for x in ('top', 'left', 'bottom'))
if len(ce) == 2:
if ce['top'] == ce['left']:
return ce['top']
return ' '.join(ce[x] for x in ('top', 'left'))
def simple_condenser(prefix, func):
@wraps(func)
def condense_simple(style, props):
cp = func(props)
if cp is not None:
for prop in props:
style.removeProperty(prop.name)
style.setProperty(prefix, cp)
return condense_simple
def condense_border(style, props):
prop_map = {p.name:p for p in props}
edge_vals = []
for edge in EDGES:
name = 'border-%s' % edge
vals = []
for prop in BORDER_PROPS:
x = prop_map.get(f'{name}-{prop}', None)
if x is not None:
vals.append(x)
if len(vals) == 3:
for prop in vals:
style.removeProperty(prop.name)
style.setProperty(name, ' '.join(x.value for x in vals))
prop_map[name] = style.getProperty(name)
x = prop_map.get(name, None)
if x is not None:
edge_vals.append(x)
if len(edge_vals) == 4 and len({x.value for x in edge_vals}) == 1:
for prop in edge_vals:
style.removeProperty(prop.name)
style.setProperty('border', edge_vals[0].value)
condensers = {'margin': simple_condenser('margin', condense_edge), 'padding': simple_condenser('padding', condense_edge), 'border': condense_border}
def condense_rule(style):
expanded = {'margin-':[], 'padding-':[], 'border-':[]}
for prop in style.getProperties():
for x in expanded:
if prop.name and prop.name.startswith(x):
expanded[x].append(prop)
break
for prefix, vals in iteritems(expanded):
if len(vals) > 1 and {x.priority for x in vals} == {''}:
condensers[prefix[:-1]](style, vals)
def condense_sheet(sheet):
for rule in sheet.cssRules:
if rule.type == rule.STYLE_RULE:
condense_rule(rule.style)
def test_normalization(return_tests=False): # {{{
import unittest
from itertools import product
from css_parser import parseStyle
class TestNormalization(unittest.TestCase):
longMessage = True
maxDiff = None
def test_font_normalization(self):
def font_dict(expected):
ans = {k:DEFAULTS[k] for k in font_composition} if expected else {}
ans.update(expected)
return ans
for raw, expected in iteritems({
'some_font': {'font-family':'some_font'}, 'inherit':{k:'inherit' for k in font_composition},
'1.2pt/1.4 A_Font': {'font-family':'A_Font', 'font-size':'1.2pt', 'line-height':'1.4'},
'bad font': {'font-family':'"bad font"'}, '10% serif': {'font-family':'serif', 'font-size':'10%'},
'12px "My Font", serif': {'font-family':'"My Font", serif', 'font-size': '12px'},
'normal 0.6em/135% arial,sans-serif': {'font-family': 'arial, sans-serif', 'font-size': '0.6em', 'line-height':'135%', 'font-style':'normal'},
'bold italic large serif': {'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'large'},
'bold italic small-caps larger/normal serif':
{'font-family':'serif', 'font-weight':'bold', 'font-style':'italic', 'font-size':'larger',
'line-height':'normal', 'font-variant':'small-caps'},
'2em A B': {'font-family': '"A B"', 'font-size': '2em'},
}):
val = tuple(parseStyle('font: %s' % raw, validate=False))[0].propertyValue
style = normalizers['font']('font', val)
self.assertDictEqual(font_dict(expected), style, raw)
def test_border_normalization(self):
def border_edge_dict(expected, edge='right'):
ans = {f'border-{edge}-{x}': DEFAULTS[f'border-{edge}-{x}'] for x in ('style', 'width', 'color')}
for x, v in iteritems(expected):
ans[f'border-{edge}-{x}'] = v
return ans
def border_dict(expected):
ans = {}
for edge in EDGES:
ans.update(border_edge_dict(expected, edge))
return ans
def border_val_dict(expected, val='color'):
ans = {f'border-{edge}-{val}': DEFAULTS[f'border-{edge}-{val}'] for edge in EDGES}
for edge in EDGES:
ans[f'border-{edge}-{val}'] = expected
return ans
for raw, expected in iteritems({
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
'2em groove': {'width':'2em', 'style':'groove'},
}):
for edge in EDGES:
br = 'border-%s' % edge
val = tuple(parseStyle(f'{br}: {raw}', validate=False))[0].propertyValue
self.assertDictEqual(border_edge_dict(expected, edge), normalizers[br](br, val))
for raw, expected in iteritems({
'solid 1px red': {'color':'red', 'width':'1px', 'style':'solid'},
'1px': {'width': '1px'}, '#aaa': {'color': '#aaa'},
'thin groove': {'width':'thin', 'style':'groove'},
}):
val = tuple(parseStyle('{}: {}'.format('border', raw), validate=False))[0].propertyValue
self.assertDictEqual(border_dict(expected), normalizers['border']('border', val))
for name, val in iteritems({
'width': '10%', 'color': 'rgb(0, 1, 1)', 'style': 'double',
}):
cval = tuple(parseStyle(f'border-{name}: {val}', validate=False))[0].propertyValue
self.assertDictEqual(border_val_dict(val, name), normalizers['border-'+name]('border-'+name, cval))
def test_edge_normalization(self):
def edge_dict(prefix, expected):
return {f'{prefix}-{edge}' : x for edge, x in zip(EDGES, expected)}
for raw, expected in iteritems({
'2px': ('2px', '2px', '2px', '2px'),
'1em 2em': ('1em', '2em', '1em', '2em'),
'1em 2em 3em': ('1em', '2em', '3em', '2em'),
'1 2 3 4': ('1', '2', '3', '4'),
}):
for prefix in ('margin', 'padding'):
cval = tuple(parseStyle(f'{prefix}: {raw}', validate=False))[0].propertyValue
self.assertDictEqual(edge_dict(prefix, expected), normalizers[prefix](prefix, cval))
def test_list_style_normalization(self):
def ls_dict(expected):
ans = {'list-style-%s' % x : DEFAULTS['list-style-%s' % x] for x in ('type', 'image', 'position')}
for k, v in iteritems(expected):
ans['list-style-%s' % k] = v
return ans
for raw, expected in iteritems({
'url(http://www.example.com/images/list.png)': {'image': 'url(http://www.example.com/images/list.png)'},
'inside square': {'position':'inside', 'type':'square'},
'upper-roman url(img) outside': {'position':'outside', 'type':'upper-roman', 'image':'url(img)'},
}):
cval = tuple(parseStyle('list-style: %s' % raw, validate=False))[0].propertyValue
self.assertDictEqual(ls_dict(expected), normalizers['list-style']('list-style', cval))
def test_filter_css_normalization(self):
ae = self.assertEqual
ae({'font'} | set(font_composition), normalize_filter_css({'font'}))
for p in ('margin', 'padding'):
ae({p} | {p + '-' + x for x in EDGES}, normalize_filter_css({p}))
bvals = {f'border-{edge}-{x}' for edge in EDGES for x in BORDER_PROPS}
ae(bvals | {'border'}, normalize_filter_css({'border'}))
for x in BORDER_PROPS:
sbvals = {f'border-{e}-{x}' for e in EDGES}
ae(sbvals | {'border-%s' % x}, normalize_filter_css({'border-%s' % x}))
for e in EDGES:
sbvals = {f'border-{e}-{x}' for x in BORDER_PROPS}
ae(sbvals | {'border-%s' % e}, normalize_filter_css({'border-%s' % e}))
ae({'list-style', 'list-style-image', 'list-style-type', 'list-style-position'}, normalize_filter_css({'list-style'}))
def test_edge_condensation(self):
for s, v in iteritems({
(1, 1, 3) : None,
(1, 2, 3, 4) : '2pt 3pt 4pt 1pt',
(1, 2, 3, 2) : '2pt 3pt 2pt 1pt',
(1, 2, 1, 3) : '2pt 1pt 3pt',
(1, 2, 1, 2) : '2pt 1pt',
(1, 1, 1, 1) : '1pt',
('2%', '2%', '2%', '2%') : '2%',
tuple('0 0 0 0'.split()) : '0',
}):
for prefix in ('margin', 'padding'):
css = {f'{prefix}-{x}' : str(y)+'pt' if isinstance(y, numbers.Number) else y
for x, y in zip(('left', 'top', 'right', 'bottom'), s)}
css = '; '.join((f'{k}:{v}' for k, v in iteritems(css)))
style = parseStyle(css)
condense_rule(style)
val = getattr(style.getProperty(prefix), 'value', None)
self.assertEqual(v, val)
if val is not None:
for edge in EDGES:
self.assertFalse(getattr(style.getProperty(f'{prefix}-{edge}'), 'value', None))
def test_border_condensation(self):
vals = 'red solid 5px'
css = '; '.join(f'border-{edge}-{p}: {v}' for edge in EDGES for p, v in zip(BORDER_PROPS, vals.split()))
style = parseStyle(css)
condense_rule(style)
for e, p in product(EDGES, BORDER_PROPS):
self.assertFalse(style.getProperty(f'border-{e}-{p}'))
self.assertFalse(style.getProperty('border-%s' % e))
self.assertFalse(style.getProperty('border-%s' % p))
self.assertEqual(style.getProperty('border').value, vals)
css = '; '.join(f'border-{edge}-{p}: {v}' for edge in ('top',) for p, v in zip(BORDER_PROPS, vals.split()))
style = parseStyle(css)
condense_rule(style)
self.assertEqual(css_text(style).rstrip(';'), 'border-top: %s' % vals)
css += ';' + '; '.join(f'border-{edge}-{p}: {v}' for edge in ('right', 'left', 'bottom') for p, v in
zip(BORDER_PROPS, vals.replace('red', 'green').split()))
style = parseStyle(css)
condense_rule(style)
self.assertEqual(len(style.getProperties()), 4)
self.assertEqual(style.getProperty('border-top').value, vals)
self.assertEqual(style.getProperty('border-left').value, vals.replace('red', 'green'))
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestNormalization)
if return_tests:
return tests
unittest.TextTestRunner(verbosity=4).run(tests)
# }}}
if __name__ == '__main__':
test_normalization()
| 19,739 | Python | .py | 377 | 41.257294 | 158 | 0.553235 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,360 | writer.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/writer.py | '''
Directory output OEBBook writer.
'''
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os
from calibre.ebooks.oeb.base import OPF_MIME, DirContainer, OEBError, xml2str
__all__ = ['OEBWriter']
class OEBWriter:
DEFAULT_PROFILE = 'PRS505'
"""Default renderer profile for content written with this Writer."""
TRANSFORMS = []
"""List of transforms to apply to content written with this Writer."""
def __init__(self, version='2.0', page_map=False, pretty_print=False):
self.version = version
self.page_map = page_map
self.pretty_print = pretty_print
@classmethod
def config(cls, cfg):
"""Add any book-writing options to the :class:`Config` object
:param:`cfg`.
"""
oeb = cfg.add_group('oeb', _('OPF/NCX/etc. generation options.'))
versions = ['1.2', '2.0']
oeb('opf_version', ['--opf-version'], default='2.0', choices=versions,
help=_('OPF version to generate. Default is %default.'))
oeb('adobe_page_map', ['--adobe-page-map'], default=False,
help=_('Generate an Adobe "page-map" file if pagination '
'information is available.'))
return cfg
@classmethod
def generate(cls, opts):
"""Generate a Writer instance from command-line options."""
version = opts.opf_version
page_map = opts.adobe_page_map
pretty_print = opts.pretty_print
return cls(version=version, page_map=page_map,
pretty_print=pretty_print)
def __call__(self, oeb, path):
"""
Write the book in the :class:`OEBBook` object :param:`oeb` to a folder
at :param:`path`.
"""
version = int(self.version[0])
opfname = None
if os.path.splitext(path)[1].lower() == '.opf':
opfname = os.path.basename(path)
path = os.path.dirname(path)
if not os.path.isdir(path):
os.mkdir(path)
output = DirContainer(path, oeb.log)
for item in oeb.manifest.values():
output.write(item.href, item.bytes_representation)
if version == 1:
metadata = oeb.to_opf1()
elif version == 2:
metadata = oeb.to_opf2(page_map=self.page_map)
else:
raise OEBError("Unrecognized OPF version %r" % self.version)
pretty_print = self.pretty_print
for mime, (href, data) in metadata.items():
if opfname and mime == OPF_MIME:
href = opfname
output.write(href, xml2str(data, pretty_print=pretty_print))
return
| 2,666 | Python | .py | 65 | 32.4 | 78 | 0.600077 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,361 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
| 92 | Python | .py | 2 | 45 | 66 | 0.655556 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,362 | base.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/base.py | '''
Basic support for manipulating OEB 1.x/2.0 content and metadata.
'''
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
__docformat__ = 'restructuredtext en'
import logging
import numbers
import os
import re
import sys
from collections import defaultdict
from itertools import count
from operator import attrgetter
from typing import Optional
from lxml import etree, html
from calibre import as_unicode, force_unicode, get_types_map, isbytestring
from calibre.constants import __version__, filesystem_encoding
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.conversion.preprocess import CSSPreProcessor
from calibre.ebooks.oeb.parse_utils import XHTML, XHTML_NS, NotHTML, barename, namespace, parse_html
from calibre.translations.dynamic import translate
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.icu import numeric_sort_key
from calibre.utils.icu import title_case as icu_title
from calibre.utils.localization import __
from calibre.utils.short_uuid import uuid4
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import codepoint_to_chr, iteritems, itervalues, string_or_bytes
from polyglot.urllib import unquote as urlunquote
from polyglot.urllib import urldefrag, urljoin, urlparse, urlunparse
XML_NS = 'http://www.w3.org/XML/1998/namespace'
OEB_DOC_NS = 'http://openebook.org/namespaces/oeb-document/1.0/'
OPF1_NS = 'http://openebook.org/namespaces/oeb-package/1.0/'
OPF2_NS = 'http://www.idpf.org/2007/opf'
OPF_NSES = {OPF1_NS, OPF2_NS}
DC09_NS = 'http://purl.org/metadata/dublin_core'
DC10_NS = 'http://purl.org/dc/elements/1.0/'
DC11_NS = 'http://purl.org/dc/elements/1.1/'
DC_NSES = {DC09_NS, DC10_NS, DC11_NS}
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
DCTERMS_NS = 'http://purl.org/dc/terms/'
NCX_NS = 'http://www.daisy.org/z3986/2005/ncx/'
SVG_NS = 'http://www.w3.org/2000/svg'
XLINK_NS = 'http://www.w3.org/1999/xlink'
CALIBRE_NS = 'http://calibre.kovidgoyal.net/2009/metadata'
RE_NS = 'http://exslt.org/regular-expressions'
MBP_NS = 'http://www.mobipocket.com'
EPUB_NS = 'http://www.idpf.org/2007/ops'
MATHML_NS = 'http://www.w3.org/1998/Math/MathML'
SMIL_NS = 'http://www.w3.org/ns/SMIL'
XPNSMAP = {
'h': XHTML_NS, 'o1': OPF1_NS, 'o2': OPF2_NS, 'd09': DC09_NS,
'd10': DC10_NS, 'd11': DC11_NS, 'xsi': XSI_NS, 'dt': DCTERMS_NS,
'ncx': NCX_NS, 'svg': SVG_NS, 'xl': XLINK_NS, 're': RE_NS,
'mathml': MATHML_NS, 'mbp': MBP_NS, 'calibre': CALIBRE_NS,
'epub':EPUB_NS, 'smil': SMIL_NS,
}
OPF1_NSMAP = {'dc': DC11_NS, 'oebpackage': OPF1_NS}
OPF2_NSMAP = {'opf': OPF2_NS, 'dc': DC11_NS, 'dcterms': DCTERMS_NS,
'xsi': XSI_NS, 'calibre': CALIBRE_NS}
def XML(name):
return f'{{{XML_NS}}}{name}'
def OPF(name):
return f'{{{OPF2_NS}}}{name}'
def DC(name):
return f'{{{DC11_NS}}}{name}'
def XSI(name):
return f'{{{XSI_NS}}}{name}'
def DCTERMS(name):
return f'{{{DCTERMS_NS}}}{name}'
def NCX(name):
return f'{{{NCX_NS}}}{name}'
def SVG(name):
return f'{{{SVG_NS}}}{name}'
def XLINK(name):
return f'{{{XLINK_NS}}}{name}'
def SMIL(name):
return f'{{{SMIL_NS}}}{name}'
def EPUB(name):
return f'{{{EPUB_NS}}}{name}'
def CALIBRE(name):
return f'{{{CALIBRE_NS}}}{name}'
_css_url_re = re.compile(r'url\s*\([\'"]{0,1}(.*?)[\'"]{0,1}\)', re.I)
_css_import_re = re.compile(r'@import "(.*?)"')
_archive_re = re.compile(r'[^ ]+')
# Tags that should not be self closed in epub output
self_closing_bad_tags = {'a', 'abbr', 'address', 'article', 'aside', 'audio', 'b',
'bdo', 'blockquote', 'body', 'button', 'cite', 'code', 'dd', 'del', 'details',
'dfn', 'div', 'dl', 'dt', 'em', 'fieldset', 'figcaption', 'figure', 'footer',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'i', 'iframe', 'ins', 'kbd',
'label', 'legend', 'li', 'map', 'mark', 'meter', 'nav', 'ol', 'output', 'p',
'pre', 'progress', 'q', 'rp', 'rt', 'samp', 'section', 'select', 'small',
'span', 'strong', 'sub', 'summary', 'sup', 'textarea', 'time', 'ul', 'var',
'video', 'title', 'script', 'style'}
def css_text(x):
ans = x.cssText
if isinstance(ans, bytes):
ans = ans.decode('utf-8', 'replace')
return ans
def as_string_type(pat, for_unicode):
if for_unicode:
if isinstance(pat, bytes):
pat = pat.decode('utf-8')
else:
if isinstance(pat, str):
pat = pat.encode('utf-8')
return pat
def self_closing_pat(for_unicode):
attr = 'unicode_ans' if for_unicode else 'bytes_ans'
ans = getattr(self_closing_pat, attr, None)
if ans is None:
sub = '|'.join(self_closing_bad_tags)
template = r'<(?P<tag>%s)(?=[\s/])(?P<arg>[^>]*)/>'
pat = template % sub
pat = as_string_type(pat, for_unicode)
ans = re.compile(pat, flags=re.IGNORECASE)
setattr(self_closing_pat, attr, ans)
return ans
def close_self_closing_tags(raw):
for_unicode = isinstance(raw, str)
repl = as_string_type(r'<\g<tag>\g<arg>></\g<tag>>', for_unicode)
pat = self_closing_pat(for_unicode)
return pat.sub(repl, raw)
def uuid_id():
return 'u' + uuid4()
def itercsslinks(raw):
for match in _css_url_re.finditer(raw):
yield match.group(1), match.start(1)
for match in _css_import_re.finditer(raw):
yield match.group(1), match.start(1)
_link_attrs = set(html.defs.link_attrs) | {XLINK('href'), 'poster', 'altimg'}
def iterlinks(root, find_links_in_css=True):
'''
Iterate over all links in a OEB Document.
:param root: A valid lxml.etree element.
'''
assert etree.iselement(root)
for el in root.iter('*'):
try:
tag = barename(el.tag).lower()
except Exception:
continue
attribs = el.attrib
if tag == 'object':
codebase = None
# <object> tags have attributes that are relative to
# codebase
if 'codebase' in attribs:
codebase = el.get('codebase')
yield (el, 'codebase', codebase, 0)
for attrib in 'classid', 'data':
if attrib in attribs:
value = el.get(attrib)
if codebase is not None:
value = urljoin(codebase, value)
yield (el, attrib, value, 0)
if 'archive' in attribs:
for match in _archive_re.finditer(el.get('archive')):
value = match.group(0)
if codebase is not None:
value = urljoin(codebase, value)
yield (el, 'archive', value, match.start())
else:
for attr in attribs:
if attr in _link_attrs:
yield (el, attr, attribs[attr], 0)
if not find_links_in_css:
continue
if tag == 'style' and el.text:
for match in _css_url_re.finditer(el.text):
yield (el, None, match.group(1), match.start(1))
for match in _css_import_re.finditer(el.text):
yield (el, None, match.group(1), match.start(1))
if 'style' in attribs:
for match in _css_url_re.finditer(attribs['style']):
yield (el, 'style', match.group(1), match.start(1))
def make_links_absolute(root, base_url):
'''
Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from)
'''
def link_repl(href):
return urljoin(base_url, href)
rewrite_links(root, link_repl)
def resolve_base_href(root):
base_href = None
basetags = root.xpath('//base[@href]|//h:base[@href]',
namespaces=XPNSMAP)
for b in basetags:
base_href = b.get('href')
b.drop_tree()
if not base_href:
return
make_links_absolute(root, base_href, resolve_base_href=False)
def rewrite_links(root, link_repl_func, resolve_base_href=False):
'''
Rewrite all the links in the document. For each link
``link_repl_func(link)`` will be called, and the return value
will replace the old link.
Note that links may not be absolute (unless you first called
``make_links_absolute()``), and may be internal (e.g.,
``'#anchor'``). They can also be values like
``'mailto:email'`` or ``'javascript:expr'``.
If the ``link_repl_func`` returns None, the attribute or
tag text will be removed completely.
'''
from css_parser import CSSParser, log, replaceUrls
log.setLevel(logging.WARN)
log.raiseExceptions = False
if resolve_base_href:
resolve_base_href(root)
for el, attrib, link, pos in iterlinks(root, find_links_in_css=False):
new_link = link_repl_func(link.strip())
if new_link == link:
continue
if new_link is None:
# Remove the attribute or element content
if attrib is None:
el.text = ''
else:
del el.attrib[attrib]
continue
if attrib is None:
new = el.text[:pos] + new_link + el.text[pos+len(link):]
el.text = new
else:
cur = el.attrib[attrib]
if not pos and len(cur) == len(link):
# Most common case
el.attrib[attrib] = new_link
else:
new = cur[:pos] + new_link + cur[pos+len(link):]
el.attrib[attrib] = new
parser = CSSParser(raiseExceptions=False, log=_css_logger,
fetcher=lambda x:(None, ''))
for el in root.iter(etree.Element):
try:
tag = el.tag
except UnicodeDecodeError:
continue
if tag in (XHTML('style'), SVG('style')) and el.text and \
(_css_url_re.search(el.text) is not None or '@import' in
el.text):
stylesheet = parser.parseString(el.text, validate=False)
replaceUrls(stylesheet, link_repl_func)
repl = css_text(stylesheet)
el.text = '\n'+ clean_xml_chars(repl) + '\n'
text = el.get('style')
if text and _css_url_re.search(text) is not None:
try:
stext = parser.parseStyle(text, validate=False)
except Exception:
# Parsing errors are raised by css_parser
continue
replaceUrls(stext, link_repl_func)
repl = css_text(stext).replace('\n', ' ').replace('\r',
' ')
el.set('style', repl)
types_map = get_types_map()
EPUB_MIME = types_map['.epub']
XHTML_MIME = types_map['.xhtml']
CSS_MIME = types_map['.css']
NCX_MIME = types_map['.ncx']
OPF_MIME = types_map['.opf']
PAGE_MAP_MIME = 'application/oebps-page-map+xml'
OEB_DOC_MIME = 'text/x-oeb1-document'
OEB_CSS_MIME = 'text/x-oeb1-css'
OPENTYPE_MIME = types_map['.otf']
GIF_MIME = types_map['.gif']
JPEG_MIME = types_map['.jpeg']
PNG_MIME = types_map['.png']
SVG_MIME = types_map['.svg']
WEBP_MIME = types_map['.webp']
BINARY_MIME = 'application/octet-stream'
XHTML_CSS_NAMESPACE = '@namespace "%s";\n' % XHTML_NS
OEB_STYLES = {CSS_MIME, OEB_CSS_MIME, 'text/x-oeb-css', 'xhtml/css'}
OEB_DOCS = {XHTML_MIME, 'text/html', OEB_DOC_MIME,
'text/x-oeb-document'}
OEB_RASTER_IMAGES = {GIF_MIME, JPEG_MIME, PNG_MIME, WEBP_MIME}
OEB_IMAGES = {GIF_MIME, JPEG_MIME, PNG_MIME, SVG_MIME}
MS_COVER_TYPE = 'other.ms-coverimage-standard'
ENTITY_RE = re.compile(r'&([a-zA-Z_:][a-zA-Z0-9.-_:]+);')
COLLAPSE_RE = re.compile(r'[ \t\r\n\v]+')
QNAME_RE = re.compile(r'^[{][^{}]+[}][^{}]+$')
PREFIXNAME_RE = re.compile(r'^[^:]+[:][^:]+')
XMLDECL_RE = re.compile(r'^\s*<[?]xml.*?[?]>')
CSSURL_RE = re.compile(r'''url[(](?P<q>["']?)(?P<url>[^)]+)(?P=q)[)]''')
def element(parent, *args, **kwargs):
if parent is not None:
return etree.SubElement(parent, *args, **kwargs)
return etree.Element(*args, **kwargs)
def prefixname(name, nsrmap):
if not isqname(name):
return name
ns = namespace(name)
if ns not in nsrmap:
return name
prefix = nsrmap[ns]
if not prefix:
return barename(name)
return ':'.join((prefix, barename(name)))
def isprefixname(name):
return name and PREFIXNAME_RE.match(name) is not None
def qname(name, nsmap):
if not isprefixname(name):
return name
prefix, local = name.split(':', 1)
if prefix not in nsmap:
return name
return f'{{{nsmap[prefix]}}}{local}'
def isqname(name):
return name and QNAME_RE.match(name) is not None
def XPath(expr):
return etree.XPath(expr, namespaces=XPNSMAP)
def xpath(elem, expr):
return elem.xpath(expr, namespaces=XPNSMAP)
def xml2str(root, pretty_print=False, strip_comments=False, with_tail=True):
if not strip_comments:
# -- in comments trips up adobe digital editions
for x in root.iterdescendants(etree.Comment):
if x.text and '--' in x.text:
x.text = x.text.replace('--', '__')
ans = etree.tostring(root, encoding='utf-8', xml_declaration=True,
pretty_print=pretty_print, with_tail=with_tail)
if strip_comments:
ans = re.compile(br'<!--.*?-->', re.DOTALL).sub(b'', ans)
return ans
def xml2text(elem, pretty_print=False, method='text'):
return etree.tostring(elem, method=method, encoding='unicode', with_tail=False, pretty_print=pretty_print)
def escape_cdata(root):
pat = re.compile(r'[<>&]')
for elem in root.iterdescendants('{%s}style' % XHTML_NS, '{%s}script' % XHTML_NS):
if elem.text and pat.search(elem.text) is not None:
elem.text = etree.CDATA(elem.text.replace(']]>', r'\]\]\>'))
def serialize(data, media_type, pretty_print=False):
if isinstance(data, etree._Element):
is_oeb_doc = media_type in OEB_DOCS
if is_oeb_doc:
escape_cdata(data)
ans = xml2str(data, pretty_print=pretty_print)
if is_oeb_doc:
# Convert self closing div|span|a|video|audio|iframe|etc tags
# to normally closed ones, as they are interpreted
# incorrectly by some browser based renderers
ans = close_self_closing_tags(ans)
return ans
if isinstance(data, str):
return data.encode('utf-8')
if hasattr(data, 'cssText'):
from calibre.ebooks.oeb.polish.utils import setup_css_parser_serialization
setup_css_parser_serialization()
data = data.cssText
if isinstance(data, str):
data = data.encode('utf-8')
return data + b'\n'
return b'' if data is None else bytes(data)
ASCII_CHARS = frozenset(codepoint_to_chr(x) for x in range(128))
UNIBYTE_CHARS = frozenset(x.encode('ascii') for x in ASCII_CHARS)
USAFE = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-/~')
URL_SAFE = frozenset(USAFE)
URL_SAFE_BYTES = frozenset(USAFE.encode('ascii'))
URL_UNSAFE = [ASCII_CHARS - URL_SAFE, UNIBYTE_CHARS - URL_SAFE_BYTES]
del USAFE
def urlquote(href):
""" Quote URL-unsafe characters, allowing IRI-safe characters.
That is, this function returns valid IRIs not valid URIs. In particular,
IRIs can contain non-ascii characters. """
result = []
isbytes = isinstance(href, bytes)
unsafe = URL_UNSAFE[int(isbytes)]
esc, join = "%%%02x", ''
if isbytes:
esc, join = esc.encode('ascii'), b''
for char in href:
if char in unsafe:
char = esc % ord(char)
result.append(char)
return join.join(result)
def urlnormalize(href):
"""Convert a URL into normalized form, with all and only URL-unsafe
characters URL quoted.
"""
try:
parts = urlparse(href)
except ValueError as e:
raise ValueError(f'Failed to parse the URL: {href!r} with underlying error: {as_unicode(e)}')
if not parts.scheme or parts.scheme == 'file':
path, frag = urldefrag(href)
parts = ('', '', path, '', '', frag)
parts = (part.replace('\\', '/') for part in parts)
parts = (urlunquote(part) for part in parts)
parts = (urlquote(part) for part in parts)
return urlunparse(parts)
def extract(elem):
"""
Removes this element from the tree, including its children and
text. The tail text is joined to the previous element or
parent.
"""
parent = elem.getparent()
if parent is not None:
if elem.tail:
previous = elem.getprevious()
if previous is None:
parent.text = (parent.text or '') + elem.tail
else:
previous.tail = (previous.tail or '') + elem.tail
parent.remove(elem)
class DummyHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self, logging.WARNING)
self.setFormatter(logging.Formatter('%(message)s'))
self.log = None
def emit(self, record):
if self.log is not None:
msg = self.format(record)
f = self.log.error if record.levelno >= logging.ERROR \
else self.log.warn
f(msg)
_css_logger = logging.getLogger('calibre.css')
_css_logger.setLevel(logging.WARNING)
_css_log_handler = DummyHandler()
_css_logger.addHandler(_css_log_handler)
class OEBError(Exception):
"""Generic OEB-processing error."""
pass
class NullContainer:
"""An empty container.
For use with book formats which do not support container-like access.
"""
def __init__(self, log):
self.log = log
def read(self, path):
raise OEBError('Attempt to read from NullContainer')
def write(self, path):
raise OEBError('Attempt to write to NullContainer')
def exists(self, path):
return False
def namelist(self):
return []
class DirContainer:
"""Filesystem directory container."""
def __init__(self, path, log, ignore_opf=False):
self.log = log
if isbytestring(path):
path = path.decode(filesystem_encoding)
self.opfname = None
ext = os.path.splitext(path)[1].lower()
if ext == '.opf':
self.opfname = os.path.basename(path)
self.rootdir = os.path.dirname(path)
return
self.rootdir = path
if not ignore_opf:
for path in self.namelist():
ext = os.path.splitext(path)[1].lower()
if ext == '.opf':
self.opfname = path
return
def _unquote(self, path):
# unquote must run on a bytestring and will return a bytestring
# If it runs on a unicode object, it returns a double encoded unicode
# string: unquote(u'%C3%A4') != unquote(b'%C3%A4').decode('utf-8')
# and the latter is correct
if isinstance(path, str):
path = path.encode('utf-8')
return urlunquote(path).decode('utf-8')
def read(self, path):
if path is None:
path = self.opfname
path = os.path.join(self.rootdir, self._unquote(path))
with open(path, 'rb') as f:
return f.read()
def write(self, path, data):
path = os.path.join(self.rootdir, self._unquote(path))
dir = os.path.dirname(path)
if not os.path.isdir(dir):
os.makedirs(dir)
with open(path, 'wb') as f:
return f.write(data)
def exists(self, path):
if not path:
return False
try:
path = os.path.join(self.rootdir, self._unquote(path))
except ValueError: # Happens if path contains quoted special chars
return False
try:
return os.path.isfile(path)
except UnicodeEncodeError:
# On linux, if LANG is unset, the os.stat call tries to encode the
# unicode path using ASCII
# To replicate try:
# LANG=en_US.ASCII python -c "import os; os.stat(u'Espa\xf1a')"
return os.path.isfile(path.encode(filesystem_encoding))
def namelist(self):
names = []
base = self.rootdir
for root, dirs, files in os.walk(base):
for fname in files:
fname = os.path.join(root, fname)
if isinstance(fname, bytes):
try:
fname = fname.decode(filesystem_encoding)
except Exception:
try:
fname = fname.decode('utf-8')
except Exception:
continue
fname = fname.replace('\\', '/')
names.append(fname)
return names
class Metadata:
"""A collection of OEB data model metadata.
Provides access to the list of items associated with a particular metadata
term via the term's local name using either Python container or attribute
syntax. Return an empty list for any terms with no currently associated
metadata items.
"""
DC_TERMS = {'contributor', 'coverage', 'creator', 'date',
'description', 'format', 'identifier', 'language',
'publisher', 'relation', 'rights', 'source',
'subject', 'title', 'type'}
CALIBRE_TERMS = {'series', 'series_index', 'rating', 'timestamp',
'publication_type', 'title_sort'}
OPF_ATTRS = {'role': OPF('role'), 'file-as': OPF('file-as'),
'scheme': OPF('scheme'), 'event': OPF('event'),
'type': XSI('type'), 'lang': XML('lang'), 'id': 'id'}
OPF1_NSMAP = {'dc': DC11_NS, 'oebpackage': OPF1_NS}
OPF2_NSMAP = {'opf': OPF2_NS, 'dc': DC11_NS, 'dcterms': DCTERMS_NS,
'xsi': XSI_NS, 'calibre': CALIBRE_NS}
class Item:
"""An item of OEB data model metadata.
The metadata term or name may be accessed via the :attr:`term` or
:attr:`name` attributes. The metadata value or content may be accessed
via the :attr:`value` or :attr:`content` attributes, or via Unicode or
string representations of the object.
OEB data model metadata attributes may be accessed either via their
fully-qualified names using the Python container access syntax, or via
their local names using Python attribute syntax. Only attributes
allowed by the OPF 2.0 specification are supported.
"""
class Attribute:
"""Smart accessor for allowed OEB metadata item attributes."""
def __init__(self, attr, allowed=None):
if not callable(attr):
attr_, attr = attr, lambda term: attr_
self.attr = attr
self.allowed = allowed
def term_attr(self, obj):
term = obj.term
if namespace(term) != DC11_NS:
term = OPF('meta')
allowed = self.allowed
if allowed is not None and term not in allowed:
raise AttributeError(
'attribute {!r} not valid for metadata term {!r}'.format(
self.attr(term), barename(obj.term)))
return self.attr(term)
def __get__(self, obj, cls):
if obj is None:
return None
return obj.attrib.get(self.term_attr(obj), '')
def __set__(self, obj, value):
obj.attrib[self.term_attr(obj)] = value
def __init__(self, term, value, attrib={}, nsmap={}, **kwargs):
self.attrib = attrib = dict(attrib)
self.nsmap = nsmap = dict(nsmap)
attrib.update(kwargs)
if namespace(term) == OPF2_NS:
term = barename(term)
ns = namespace(term)
local = barename(term).lower()
if local in Metadata.DC_TERMS and (not ns or ns in DC_NSES):
# Anything looking like Dublin Core is coerced
term = DC(local)
elif local in Metadata.CALIBRE_TERMS and ns in (CALIBRE_NS, ''):
# Ditto for Calibre-specific metadata
term = CALIBRE(local)
self.term = term
self.value = value
for attr, value in tuple(iteritems(attrib)):
if isprefixname(value):
attrib[attr] = qname(value, nsmap)
nsattr = Metadata.OPF_ATTRS.get(attr, attr)
if nsattr == OPF('scheme') and namespace(term) != DC11_NS:
# The opf:meta element takes @scheme, not @opf:scheme
nsattr = 'scheme'
if attr != nsattr:
attrib[nsattr] = attrib.pop(attr)
@property
def name(self):
return self.term
@property
def content(self):
return self.value
@content.setter
def content(self, value):
self.value = value
scheme = Attribute(lambda term: 'scheme' if
term == OPF('meta') else OPF('scheme'),
[DC('identifier'), OPF('meta')])
file_as = Attribute(OPF('file-as'), [DC('creator'), DC('contributor'),
DC('title')])
role = Attribute(OPF('role'), [DC('creator'), DC('contributor')])
event = Attribute(OPF('event'), [DC('date')])
id = Attribute('id')
type = Attribute(XSI('type'), [DC('date'), DC('format'),
DC('type')])
lang = Attribute(XML('lang'), [DC('contributor'), DC('coverage'),
DC('creator'), DC('publisher'),
DC('relation'), DC('rights'),
DC('source'), DC('subject'),
OPF('meta')])
def __getitem__(self, key):
return self.attrib[key]
def __setitem__(self, key, value):
self.attrib[key] = value
def __contains__(self, key):
return key in self.attrib
def get(self, key, default=None):
return self.attrib.get(key, default)
def __repr__(self):
return 'Item(term=%r, value=%r, attrib=%r)' \
% (barename(self.term), self.value, self.attrib)
def __str__(self):
return as_unicode(self.value)
def to_opf1(self, dcmeta=None, xmeta=None, nsrmap={}):
attrib = {}
for key, value in self.attrib.items():
if namespace(key) == OPF2_NS:
key = barename(key)
attrib[key] = prefixname(value, nsrmap)
if namespace(self.term) == DC11_NS:
name = DC(icu_title(barename(self.term)))
elem = element(dcmeta, name, attrib=attrib)
elem.text = self.value
else:
elem = element(xmeta, 'meta', attrib=attrib)
elem.attrib['name'] = prefixname(self.term, nsrmap)
elem.attrib['content'] = prefixname(self.value, nsrmap)
return elem
def to_opf2(self, parent=None, nsrmap={}):
attrib = {}
for key, value in self.attrib.items():
attrib[key] = prefixname(value, nsrmap)
if namespace(self.term) == DC11_NS:
elem = element(parent, self.term, attrib=attrib)
try:
elem.text = self.value
except:
elem.text = repr(self.value)
else:
elem = element(parent, OPF('meta'), attrib=attrib)
elem.attrib['name'] = prefixname(self.term, nsrmap)
elem.attrib['content'] = prefixname(self.value, nsrmap)
return elem
def __init__(self, oeb):
self.oeb = oeb
self.items = defaultdict(list)
self.primary_writing_mode = None
def add(self, term, value, attrib={}, nsmap={}, **kwargs):
"""Add a new metadata item."""
item = self.Item(term, value, attrib, nsmap, **kwargs)
items = self.items[barename(item.term)]
items.append(item)
return item
def iterkeys(self):
yield from self.items
__iter__ = iterkeys
def clear(self, key):
l = self.items[key]
for x in list(l):
l.remove(x)
def filter(self, key, predicate):
l = self.items[key]
for x in list(l):
if predicate(x):
l.remove(x)
def __getitem__(self, key):
return self.items[key]
def __contains__(self, key):
return key in self.items
def __getattr__(self, term):
return self.items[term]
@property
def _nsmap(self):
nsmap = {}
for term in self.items:
for item in self.items[term]:
nsmap.update(item.nsmap)
return nsmap
@property
def _opf1_nsmap(self):
nsmap = self._nsmap
for key, value in nsmap.items():
if value in OPF_NSES or value in DC_NSES:
del nsmap[key]
return nsmap
@property
def _opf2_nsmap(self):
nsmap = self._nsmap
nsmap.update(OPF2_NSMAP)
return nsmap
def to_opf1(self, parent=None):
nsmap = self._opf1_nsmap
nsrmap = {value: key for key, value in iteritems(nsmap)}
elem = element(parent, 'metadata', nsmap=nsmap)
dcmeta = element(elem, 'dc-metadata', nsmap=OPF1_NSMAP)
xmeta = element(elem, 'x-metadata')
for term in self.items:
for item in self.items[term]:
item.to_opf1(dcmeta, xmeta, nsrmap=nsrmap)
if 'ms-chaptertour' not in self.items:
chaptertour = self.Item('ms-chaptertour', 'chaptertour')
chaptertour.to_opf1(dcmeta, xmeta, nsrmap=nsrmap)
return elem
def to_opf2(self, parent=None):
nsmap = self._opf2_nsmap
nsrmap = {value: key for key, value in iteritems(nsmap)}
elem = element(parent, OPF('metadata'), nsmap=nsmap)
for term in self.items:
for item in self.items[term]:
item.to_opf2(elem, nsrmap=nsrmap)
if self.primary_writing_mode:
elem.append(elem.makeelement(OPF('meta'), attrib={'name':'primary-writing-mode', 'content':self.primary_writing_mode}))
return elem
class Manifest:
"""Collection of files composing an OEB data model book.
Provides access to the content of the files composing the book and
attributes associated with those files, including their internal paths,
unique identifiers, and MIME types.
Itself acts as a :class:`set` of manifest items, and provides the following
instance data member for dictionary-like access:
:attr:`ids`: A dictionary in which the keys are the unique identifiers of
the manifest items and the values are the items themselves.
:attr:`hrefs`: A dictionary in which the keys are the internal paths of the
manifest items and the values are the items themselves.
"""
class Item:
"""An OEB data model book content file.
Provides the following data members for accessing the file content and
metadata associated with this particular file.
:attr:`id`: Unique identifier.
:attr:`href`: Book-internal path.
:attr:`media_type`: MIME type of the file content.
:attr:`fallback`: Unique id of any fallback manifest item associated
with this manifest item.
:attr:`spine_position`: Display/reading order index for book textual
content. `None` for manifest items which are not part of the
book's textual content.
:attr:`linear`: `True` for textual content items which are part of the
primary linear reading order and `False` for textual content items
which are not (such as footnotes). Meaningless for items which
have a :attr:`spine_position` of `None`.
"""
def __init__(self, oeb, id, href, media_type,
fallback=None, loader=str, data=None):
if href:
href = str(href)
self.oeb = oeb
self.id = id
self.href = self.path = urlnormalize(href)
self.media_type = media_type
self.fallback = fallback
self.override_css_fetch = None
self.resolve_css_imports = True
self.spine_position = None
self.linear = True
if loader is None and data is None:
loader = oeb.container.read
self._loader = loader
self._data = data
def __repr__(self):
return 'Item(id=%r, href=%r, media_type=%r)' \
% (self.id, self.href, self.media_type)
# Parsing {{{
def _parse_xml(self, data):
if not data:
return
data = xml_to_unicode(data, strip_encoding_pats=True,
assume_utf8=True, resolve_entities=True)[0]
return safe_xml_fromstring(data)
def _parse_xhtml(self, data):
orig_data = data
fname = urlunquote(self.href)
self.oeb.log.debug('Parsing', fname, '...')
self.oeb.html_preprocessor.current_href = self.href
try:
data = parse_html(data, log=self.oeb.log,
decoder=self.oeb.decode,
preprocessor=self.oeb.html_preprocessor,
filename=fname, non_html_file_tags={'ncx'})
except NotHTML:
return self._parse_xml(orig_data)
return data
def _parse_txt(self, data):
has_html = '<html>'
if isinstance(data, bytes):
has_html = has_html.encode('ascii')
if has_html in data:
return self._parse_xhtml(data)
self.oeb.log.debug('Converting', self.href, '...')
from calibre.ebooks.txt.processor import convert_markdown
title = self.oeb.metadata.title
if title:
title = str(title[0])
else:
title = _('Unknown')
return self._parse_xhtml(convert_markdown(data, title=title))
def _parse_css(self, data):
from css_parser import CSSParser, log, resolveImports
from css_parser.css import CSSRule
log.setLevel(logging.WARN)
log.raiseExceptions = False
self.oeb.log.debug('Parsing', self.href, '...')
data = self.oeb.decode(data)
data = self.oeb.css_preprocessor(data, add_namespace=False)
parser = CSSParser(loglevel=logging.WARNING,
fetcher=self.override_css_fetch or self._fetch_css,
log=_css_logger)
data = parser.parseString(data, href=self.href, validate=False)
if self.resolve_css_imports:
data = resolveImports(data)
for rule in tuple(data.cssRules.rulesOfType(CSSRule.PAGE_RULE)):
data.cssRules.remove(rule)
return data
def _fetch_css(self, path):
hrefs = self.oeb.manifest.hrefs
if path not in hrefs:
self.oeb.logger.warn('CSS import of missing file %r' % path)
return (None, None)
item = hrefs[path]
if item.media_type not in OEB_STYLES:
self.oeb.logger.warn('CSS import of non-CSS file %r' % path)
return (None, None)
data = item.data.cssText
enc = None if isinstance(data, str) else 'utf-8'
return (enc, data)
# }}}
@property
def data_as_bytes_or_none(self) -> Optional[bytes]:
if self._loader is None:
return None
return self._loader(getattr(self, 'html_input_href', self.href))
@property
def data(self):
"""Provides MIME type sensitive access to the manifest
entry's associated content.
- XHTML, HTML, and variant content is parsed as necessary to
convert and return as an lxml.etree element in the XHTML
namespace.
- XML content is parsed and returned as an lxml.etree element.
- CSS and CSS-variant content is parsed and returned as a css_parser
CSS DOM stylesheet.
- All other content is returned as a :class:`str` or :class:`bytes`
object with no special parsing.
"""
data = self._data
if data is None:
data = self.data_as_bytes_or_none
try:
mt = self.media_type.lower()
except Exception:
mt = 'application/octet-stream'
if not isinstance(data, string_or_bytes):
pass # already parsed
elif mt in OEB_DOCS:
data = self._parse_xhtml(data)
elif mt[-4:] in ('+xml', '/xml'):
data = self._parse_xml(data)
elif mt in OEB_STYLES:
data = self._parse_css(data)
elif mt == 'text/plain':
self.oeb.log.warn('%s contains data in TXT format'%self.href,
'converting to HTML')
data = self._parse_txt(data)
self.media_type = XHTML_MIME
self._data = data
return data
@data.setter
def data(self, value):
self._data = value
@data.deleter
def data(self):
self._data = None
def reparse_css(self):
self._data = self._parse_css(str(self))
def unload_data_from_memory(self, memory=None):
if isinstance(self._data, bytes):
if memory is None:
from calibre.ptempfile import PersistentTemporaryFile
pt = PersistentTemporaryFile(suffix='_oeb_base_mem_unloader.img')
with pt:
pt.write(self._data)
self.oeb._temp_files.append(pt.name)
def loader(*args):
with open(pt.name, 'rb') as f:
ans = f.read()
os.remove(pt.name)
return ans
self._loader = loader
else:
def loader2(*args):
with open(memory, 'rb') as f:
ans = f.read()
return ans
self._loader = loader2
self._data = None
@property
def unicode_representation(self):
data = self.data
if isinstance(data, etree._Element):
return xml2text(data, pretty_print=self.oeb.pretty_print)
if isinstance(data, str):
return data
if hasattr(data, 'cssText'):
return css_text(data)
return str(data)
@property
def bytes_representation(self):
return serialize(self.data, self.media_type, pretty_print=self.oeb.pretty_print)
def __str__(self):
return self.unicode_representation
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __hash__(self):
return id(self)
@property
def sort_key(self):
href = self.href
if isinstance(href, bytes):
href = force_unicode(href)
sp = self.spine_position if isinstance(self.spine_position, numbers.Number) else sys.maxsize
return sp, (self.media_type or '').lower(), numeric_sort_key(href), self.id
def relhref(self, href):
"""Convert the URL provided in :param:`href` from a book-absolute
reference to a reference relative to this manifest item.
"""
return rel_href(self.href, href)
def abshref(self, href):
"""Convert the URL provided in :param:`href` from a reference
relative to this manifest item to a book-absolute reference.
"""
try:
purl = urlparse(href)
except ValueError:
return href
scheme = purl.scheme
if scheme and scheme != 'file':
return href
purl = list(purl)
purl[0] = ''
href = urlunparse(purl)
path, frag = urldefrag(href)
if not path:
if frag:
return '#'.join((self.href, frag))
else:
return self.href
if '/' not in self.href:
return href
dirname = os.path.dirname(self.href)
href = os.path.join(dirname, href)
href = os.path.normpath(href).replace('\\', '/')
return href
def __init__(self, oeb):
self.oeb = oeb
self.items = set()
self.ids = {}
self.hrefs = {}
def add(self, id, href, media_type, fallback=None, loader=None, data=None):
"""Add a new item to the book manifest.
The item's :param:`id`, :param:`href`, and :param:`media_type` are all
required. A :param:`fallback` item-id is required for any items with a
MIME type which is not one of the OPS core media types. Either the
item's data itself may be provided with :param:`data`, or a loader
function for the data may be provided with :param:`loader`, or the
item's data may later be set manually via the :attr:`data` attribute.
"""
item = self.Item(
self.oeb, id, href, media_type, fallback, loader, data)
self.items.add(item)
self.ids[item.id] = item
self.hrefs[item.href] = item
return item
def remove(self, item):
"""Removes :param:`item` from the manifest."""
if item in self.ids:
item = self.ids[item]
del self.ids[item.id]
if item.href in self.hrefs:
del self.hrefs[item.href]
self.items.remove(item)
if item in self.oeb.spine:
self.oeb.spine.remove(item)
def remove_duplicate_item(self, item):
if item in self.ids:
item = self.ids[item]
del self.ids[item.id]
self.items.remove(item)
def generate(self, id=None, href=None):
"""Generate a new unique identifier and/or internal path for use in
creating a new manifest item, using the provided :param:`id` and/or
:param:`href` as bases.
Returns an two-tuple of the new id and path. If either :param:`id` or
:param:`href` are `None` then the corresponding item in the return
tuple will also be `None`.
"""
if id is not None:
base = id
index = 1
while id in self.ids:
id = base + str(index)
index += 1
if href is not None:
href = urlnormalize(href)
base, ext = os.path.splitext(href)
index = 1
lhrefs = {x.lower() for x in self.hrefs}
while href.lower() in lhrefs:
href = base + str(index) + ext
index += 1
return id, str(href)
def __iter__(self):
yield from self.items
def __len__(self):
return len(self.items)
def values(self):
return list(self.items)
def __contains__(self, item):
return item in self.items
def to_opf1(self, parent=None):
elem = element(parent, 'manifest')
for item in self.items:
media_type = item.media_type
if media_type in OEB_DOCS:
media_type = OEB_DOC_MIME
elif media_type in OEB_STYLES:
media_type = OEB_CSS_MIME
attrib = {'id': item.id, 'href': urlunquote(item.href),
'media-type': media_type}
if item.fallback:
attrib['fallback'] = item.fallback
element(elem, 'item', attrib=attrib)
return elem
def to_opf2(self, parent=None):
elem = element(parent, OPF('manifest'))
for item in sorted(self.items, key=attrgetter('sort_key')):
media_type = item.media_type
if media_type in OEB_DOCS:
media_type = XHTML_MIME
elif media_type in OEB_STYLES:
media_type = CSS_MIME
attrib = {'id': item.id, 'href': urlunquote(item.href),
'media-type': media_type}
if item.fallback:
attrib['fallback'] = item.fallback
element(elem, OPF('item'), attrib=attrib)
return elem
@property
def main_stylesheet(self):
ans = getattr(self, '_main_stylesheet', None)
if ans is None:
for item in self:
if item.media_type.lower() in OEB_STYLES:
ans = item
break
return ans
@main_stylesheet.setter
def main_stylesheet(self, item):
self._main_stylesheet = item
class Spine:
"""Collection of manifest items composing an OEB data model book's main
textual content.
The spine manages which manifest items compose the book's main textual
content and the sequence in which they appear. Provides Python container
access as a list-like object.
"""
def __init__(self, oeb):
self.oeb = oeb
self.items = []
self.page_progression_direction = None
def _linear(self, linear):
if isinstance(linear, string_or_bytes):
linear = linear.lower()
if linear is None or linear in ('yes', 'true'):
linear = True
elif linear in ('no', 'false'):
linear = False
return linear
def add(self, item, linear=None):
"""Append :param:`item` to the end of the `Spine`."""
item.linear = self._linear(linear)
item.spine_position = len(self.items)
self.items.append(item)
return item
def insert(self, index, item, linear):
"""Insert :param:`item` at position :param:`index` in the `Spine`."""
item.linear = self._linear(linear)
item.spine_position = index
self.items.insert(index, item)
for i in range(index, len(self.items)):
self.items[i].spine_position = i
return item
def remove(self, item):
"""Remove :param:`item` from the `Spine`."""
index = item.spine_position
self.items.pop(index)
for i in range(index, len(self.items)):
self.items[i].spine_position = i
item.spine_position = None
def index(self, item):
for i, x in enumerate(self):
if item == x:
return i
return -1
def __iter__(self):
yield from self.items
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
def __contains__(self, item):
return (item in self.items)
def to_opf1(self, parent=None):
elem = element(parent, 'spine')
for item in self.items:
if item.linear:
element(elem, 'itemref', attrib={'idref': item.id})
return elem
def to_opf2(self, parent=None):
elem = element(parent, OPF('spine'))
for item in self.items:
attrib = {'idref': item.id}
if not item.linear:
attrib['linear'] = 'no'
element(elem, OPF('itemref'), attrib=attrib)
return elem
class Guide:
"""Collection of references to standard frequently-occurring sections
within an OEB data model book.
Provides dictionary-like access, in which the keys are the OEB reference
type identifiers and the values are `Reference` objects.
"""
class Reference:
"""Reference to a standard book section.
Provides the following instance data members:
:attr:`type`: Reference type identifier, as chosen from the list
allowed in the OPF 2.0 specification.
:attr:`title`: Human-readable section title.
:attr:`href`: Book-internal URL of the referenced section. May include
a fragment identifier.
"""
_TYPES_TITLES = [('cover', __('Cover')),
('title-page', __('Title page')),
('toc', __('Table of Contents')),
('index', __('Index')),
('glossary', __('Glossary')),
('acknowledgements', __('Acknowledgements')),
('bibliography', __('Bibliography')),
('colophon', __('Colophon')),
('copyright-page', __('Copyright')),
('dedication', __('Dedication')),
('epigraph', __('Epigraph')),
('foreword', __('Foreword')),
('loi', __('List of illustrations')),
('lot', __('List of tables')),
('notes', __('Notes')),
('preface', __('Preface')),
('text', __('Main text'))]
TITLES = dict(_TYPES_TITLES)
TYPES = frozenset(TITLES)
ORDER = {t: i for i, (t, _) in enumerate(_TYPES_TITLES)}
def __init__(self, oeb, type, title, href):
self.oeb = oeb
if type.lower() in self.TYPES:
type = type.lower()
elif type not in self.TYPES and \
not type.startswith('other.'):
type = 'other.' + type
if not title and type in self.TITLES:
title = oeb.translate(self.TITLES[type])
self.type = type
self.title = title
self.href = urlnormalize(href)
def __repr__(self):
return 'Reference(type=%r, title=%r, href=%r)' \
% (self.type, self.title, self.href)
@property
def item(self):
"""The manifest item associated with this reference."""
path = urldefrag(self.href)[0]
hrefs = self.oeb.manifest.hrefs
return hrefs.get(path, None)
def __init__(self, oeb):
self.oeb = oeb
self.refs = {}
def add(self, type, title, href):
"""Add a new reference to the `Guide`."""
if href:
href = str(href)
ref = self.Reference(self.oeb, type, title, href)
self.refs[type] = ref
return ref
def remove(self, type):
return self.refs.pop(type, None)
def remove_by_href(self, href):
remove = [r for r, i in iteritems(self.refs) if i.href == href]
for r in remove:
self.remove(r)
def iterkeys(self):
yield from self.refs
__iter__ = iterkeys
def values(self):
return sorted(itervalues(self.refs), key=lambda ref: ref.ORDER.get(ref.type, 10000))
def items(self):
yield from self.refs.items()
def __getitem__(self, key):
return self.refs[key]
def get(self, key):
return self.refs.get(key)
def __delitem__(self, key):
del self.refs[key]
def __contains__(self, key):
return key in self.refs
def __len__(self):
return len(self.refs)
def to_opf1(self, parent=None):
elem = element(parent, 'guide')
for ref in self.refs.values():
attrib = {'type': ref.type, 'href': urlunquote(ref.href)}
if ref.title:
attrib['title'] = ref.title
element(elem, 'reference', attrib=attrib)
return elem
def to_opf2(self, parent=None):
if not len(self):
return
elem = element(parent, OPF('guide'))
for ref in self.refs.values():
attrib = {'type': ref.type, 'href': urlunquote(ref.href)}
if ref.title:
attrib['title'] = ref.title
element(elem, OPF('reference'), attrib=attrib)
return elem
class TOC:
"""Represents a hierarchical table of contents or navigation tree for
accessing arbitrary semantic sections within an OEB data model book.
Acts as a node within the navigation tree. Provides list-like access to
sub-nodes. Provides the follow node instance data attributes:
:attr:`title`: The title of this navigation node.
:attr:`href`: Book-internal URL referenced by this node.
:attr:`klass`: Optional semantic class referenced by this node.
:attr:`id`: Option unique identifier for this node.
:attr:`author`: Optional author attribution for periodicals <mbp:>
:attr:`description`: Optional description attribute for periodicals <mbp:>
:attr:`toc_thumbnail`: Optional toc thumbnail image
"""
def __init__(self, title=None, href=None, klass=None, id=None,
play_order=None, author=None, description=None, toc_thumbnail=None):
self.title = title
self.href = urlnormalize(href) if href else href
self.klass = klass
self.id = id
self.nodes = []
self.play_order = 0
if play_order is None:
play_order = self.next_play_order()
self.play_order = play_order
self.author = author
self.description = description
self.toc_thumbnail = toc_thumbnail
def add(self, title, href, klass=None, id=None, play_order=0, author=None, description=None, toc_thumbnail=None):
"""Create and return a new sub-node of this node."""
node = TOC(title, href, klass, id, play_order, author, description, toc_thumbnail)
self.nodes.append(node)
return node
def remove(self, node):
for child in self.nodes:
if child is node:
self.nodes.remove(child)
return True
else:
if child.remove(node):
return True
return False
def iter(self):
"""Iterate over this node and all descendants in depth-first order."""
yield self
for child in self.nodes:
yield from child.iter()
def count(self):
return len(list(self.iter())) - 1
def next_play_order(self):
entries = [x.play_order for x in self.iter()]
base = max(entries) if entries else 0
return base+1
def has_href(self, href):
for x in self.iter():
if x.href == href:
return True
return False
def has_text(self, text):
for x in self.iter():
if x.title and x.title.lower() == text.lower():
return True
return False
def iterdescendants(self, breadth_first=False):
"""Iterate over all descendant nodes in depth-first order."""
if breadth_first:
for child in self.nodes:
yield child
for child in self.nodes:
yield from child.iterdescendants(breadth_first=True)
else:
for child in self.nodes:
yield from child.iter()
def __iter__(self):
"""Iterate over all immediate child nodes."""
yield from self.nodes
def __getitem__(self, index):
return self.nodes[index]
def autolayer(self):
"""Make sequences of children pointing to the same content file into
children of the first node referencing that file.
"""
prev = None
for node in list(self.nodes):
if prev and urldefrag(prev.href)[0] == urldefrag(node.href)[0]:
self.nodes.remove(node)
prev.nodes.append(node)
else:
prev = node
def depth(self):
"""The maximum depth of the navigation tree rooted at this node."""
try:
return max(node.depth() for node in self.nodes) + 1
except ValueError:
return 1
def get_lines(self, lvl=0):
ans = [('\t'*lvl) + 'TOC: %s --> %s'%(self.title, self.href)]
for child in self:
ans.extend(child.get_lines(lvl+1))
return ans
def __str__(self):
return '\n'.join(self.get_lines())
def to_opf1(self, tour):
for node in self.nodes:
element(tour, 'site', attrib={
'title': node.title, 'href': urlunquote(node.href)})
node.to_opf1(tour)
return tour
def to_ncx(self, parent=None):
if parent is None:
parent = etree.Element(NCX('navMap'))
for node in self.nodes:
id = node.id or uuid_id()
po = node.play_order
if po == 0:
po = 1
attrib = {'id': id, 'playOrder': str(po)}
if node.klass:
attrib['class'] = node.klass
point = element(parent, NCX('navPoint'), attrib=attrib)
label = etree.SubElement(point, NCX('navLabel'))
title = node.title
if title:
title = re.sub(r'\s+', ' ', title)
element(label, NCX('text')).text = title
# Do not unescape this URL as ADE requires it to be escaped to
# handle semi colons and other special characters in the file names
element(point, NCX('content'), src=node.href)
node.to_ncx(point)
return parent
def rationalize_play_orders(self):
'''
Ensure that all nodes with the same play_order have the same href and
with different play_orders have different hrefs.
'''
def po_node(n):
for x in self.iter():
if x is n:
return
if x.play_order == n.play_order:
return x
def href_node(n):
for x in self.iter():
if x is n:
return
if x.href == n.href:
return x
for x in self.iter():
y = po_node(x)
if y is not None:
if x.href != y.href:
x.play_order = getattr(href_node(x), 'play_order',
self.next_play_order())
y = href_node(x)
if y is not None:
x.play_order = y.play_order
class PageList:
"""Collection of named "pages" to mapped positions within an OEB data model
book's textual content.
Provides list-like access to the pages.
"""
class Page:
"""Represents a mapping between a page name and a position within
the book content.
Provides the following instance data attributes:
:attr:`name`: The name of this page. Generally a number.
:attr:`href`: Book-internal URL at which point this page begins.
:attr:`type`: Must be one of 'front' (for prefatory pages, as commonly
labeled in print with small-case Roman numerals), 'normal' (for
standard pages, as commonly labeled in print with Arabic numerals),
or 'special' (for other pages, as commonly not labeled in any
fashion in print, such as the cover and title pages).
:attr:`klass`: Optional semantic class of this page.
:attr:`id`: Optional unique identifier for this page.
"""
TYPES = {'front', 'normal', 'special'}
def __init__(self, name, href, type='normal', klass=None, id=None):
self.name = str(name)
self.href = urlnormalize(href)
self.type = type if type in self.TYPES else 'normal'
self.id = id
self.klass = klass
def __init__(self):
self.pages = []
def add(self, name, href, type='normal', klass=None, id=None):
"""Create a new page and add it to the `PageList`."""
page = self.Page(name, href, type, klass, id)
self.pages.append(page)
return page
def __len__(self):
return len(self.pages)
def __iter__(self):
yield from self.pages
def __getitem__(self, index):
return self.pages[index]
def pop(self, index=-1):
return self.pages.pop(index)
def remove(self, page):
return self.pages.remove(page)
def to_ncx(self, parent=None):
plist = element(parent, NCX('pageList'), id=uuid_id())
values = {t: count(1) for t in ('front', 'normal', 'special')}
for page in self.pages:
id = page.id or uuid_id()
type = page.type
value = str(next(values[type]))
attrib = {'id': id, 'value': value, 'type': type, 'playOrder': '0'}
if page.klass:
attrib['class'] = page.klass
ptarget = element(plist, NCX('pageTarget'), attrib=attrib)
label = element(ptarget, NCX('navLabel'))
element(label, NCX('text')).text = page.name
element(ptarget, NCX('content'), src=page.href)
return plist
def to_page_map(self):
pmap = etree.Element(OPF('page-map'), nsmap={None: OPF2_NS})
for page in self.pages:
element(pmap, OPF('page'), name=page.name, href=page.href)
return pmap
class OEBBook:
"""Representation of a book in the IDPF OEB data model."""
COVER_SVG_XP = XPath('h:body//svg:svg[position() = 1]')
COVER_OBJECT_XP = XPath('h:body//h:object[@data][position() = 1]')
def __init__(self, logger,
html_preprocessor,
css_preprocessor=CSSPreProcessor(),
encoding='utf-8', pretty_print=False,
input_encoding='utf-8'):
"""Create empty book. Arguments:
:param:`encoding`: Default encoding for textual content read
from an external container.
:param:`pretty_print`: Whether or not the canonical string form
of XML markup is pretty-printed.
:param html_preprocessor: A callable that takes a unicode object
and returns a unicode object. Will be called on all html files
before they are parsed.
:param css_preprocessor: A callable that takes a unicode object
and returns a unicode object. Will be called on all CSS files
before they are parsed.
:param:`logger`: A Log object to use for logging all messages
related to the processing of this book. It is accessible
via the instance data members :attr:`logger,log`.
It provides the following public instance data members for
accessing various parts of the OEB data model:
:attr:`metadata`: Metadata such as title, author name(s), etc.
:attr:`manifest`: Manifest of all files included in the book,
including MIME types and fallback information.
:attr:`spine`: In-order list of manifest items which compose
the textual content of the book.
:attr:`guide`: Collection of references to standard positions
within the text, such as the cover, preface, etc.
:attr:`toc`: Hierarchical table of contents.
:attr:`pages`: List of "pages," such as indexed to a print edition of
the same text.
"""
_css_log_handler.log = logger
self.encoding = encoding
self.input_encoding = input_encoding
self.html_preprocessor = html_preprocessor
self.css_preprocessor = css_preprocessor
self.pretty_print = pretty_print
self.logger = self.log = logger
self.version = '2.0'
self.container = NullContainer(self.log)
self.metadata = Metadata(self)
self.uid = None
self.manifest = Manifest(self)
self.spine = Spine(self)
self.guide = Guide(self)
self.toc = TOC()
self.pages = PageList()
self.auto_generated_toc = True
self._temp_files = []
def clean_temp_files(self):
for path in self._temp_files:
try:
os.remove(path)
except:
pass
@classmethod
def generate(cls, opts):
"""Generate an OEBBook instance from command-line options."""
encoding = opts.encoding
pretty_print = opts.pretty_print
return cls(encoding=encoding, pretty_print=pretty_print)
def translate(self, text):
"""Translate :param:`text` into the book's primary language."""
lang = str(self.metadata.language[0])
lang = lang.split('-', 1)[0].lower()
return translate(lang, text)
def decode(self, data):
"""Automatically decode :param:`data` into a `unicode` object."""
def fix_data(d):
return d.replace('\r\n', '\n').replace('\r', '\n')
if isinstance(data, str):
return fix_data(data)
bom_enc = None
if data[:4] in (b'\0\0\xfe\xff', b'\xff\xfe\0\0'):
bom_enc = {b'\0\0\xfe\xff':'utf-32-be',
b'\xff\xfe\0\0':'utf-32-le'}[data[:4]]
data = data[4:]
elif data[:2] in (b'\xff\xfe', b'\xfe\xff'):
bom_enc = {b'\xff\xfe':'utf-16-le', 'b\xfe\xff':'utf-16-be'}[data[:2]]
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
bom_enc = 'utf-8'
data = data[3:]
if bom_enc is not None:
try:
return fix_data(data.decode(bom_enc))
except UnicodeDecodeError:
pass
if self.input_encoding:
try:
return fix_data(data.decode(self.input_encoding, 'replace'))
except UnicodeDecodeError:
pass
try:
return fix_data(data.decode('utf-8'))
except UnicodeDecodeError:
pass
data, _ = xml_to_unicode(data)
return fix_data(data)
def to_opf1(self):
"""Produce OPF 1.2 representing the book's metadata and structure.
Returns a dictionary in which the keys are MIME types and the values
are tuples of (default) filenames and lxml.etree element structures.
"""
package = etree.Element('package',
attrib={'unique-identifier': self.uid.id})
self.metadata.to_opf1(package)
self.manifest.to_opf1(package)
self.spine.to_opf1(package)
tours = element(package, 'tours')
tour = element(tours, 'tour',
attrib={'id': 'chaptertour', 'title': 'Chapter Tour'})
self.toc.to_opf1(tour)
self.guide.to_opf1(package)
return {OPF_MIME: ('content.opf', package)}
def _update_playorder(self, ncx):
hrefs = set(map(urlnormalize, xpath(ncx, '//ncx:content/@src')))
playorder = {}
next = 1
selector = XPath('h:body//*[@id or @name]')
for item in self.spine:
base = item.href
if base in hrefs:
playorder[base] = next
next += 1
for elem in selector(item.data):
added = False
for attr in ('id', 'name'):
id = elem.get(attr)
if not id:
continue
href = '#'.join([base, id])
if href in hrefs:
playorder[href] = next
added = True
if added:
next += 1
selector = XPath('ncx:content/@src')
for i, elem in enumerate(xpath(ncx, '//*[@playOrder and ./ncx:content[@src]]')):
href = urlnormalize(selector(elem)[0])
order = playorder.get(href, i)
elem.attrib['playOrder'] = str(order)
return
def _to_ncx(self):
try:
lang = str(self.metadata.language[0])
except IndexError:
lang = 'en'
lang = lang.replace('_', '-')
ncx = etree.Element(NCX('ncx'),
attrib={'version': '2005-1', XML('lang'): lang},
nsmap={None: NCX_NS})
head = etree.SubElement(ncx, NCX('head'))
etree.SubElement(head, NCX('meta'),
name='dtb:uid', content=str(self.uid))
etree.SubElement(head, NCX('meta'),
name='dtb:depth', content=str(self.toc.depth()))
generator = ''.join(['calibre (', __version__, ')'])
etree.SubElement(head, NCX('meta'),
name='dtb:generator', content=generator)
etree.SubElement(head, NCX('meta'),
name='dtb:totalPageCount', content=str(len(self.pages)))
maxpnum = etree.SubElement(head, NCX('meta'),
name='dtb:maxPageNumber', content='0')
title = etree.SubElement(ncx, NCX('docTitle'))
text = etree.SubElement(title, NCX('text'))
text.text = str(self.metadata.title[0])
navmap = etree.SubElement(ncx, NCX('navMap'))
self.toc.to_ncx(navmap)
if len(self.pages) > 0:
plist = self.pages.to_ncx(ncx)
value = max(int(x) for x in xpath(plist, '//@value'))
maxpnum.attrib['content'] = str(value)
self._update_playorder(ncx)
return ncx
def to_opf2(self, page_map=False):
"""Produce OPF 2.0 representing the book's metadata and structure.
Returns a dictionary in which the keys are MIME types and the values
are tuples of (default) filenames and lxml.etree element structures.
"""
results = {}
package = etree.Element(OPF('package'),
attrib={'version': '2.0', 'unique-identifier': self.uid.id},
nsmap={None: OPF2_NS})
self.metadata.to_opf2(package)
manifest = self.manifest.to_opf2(package)
spine = self.spine.to_opf2(package)
self.guide.to_opf2(package)
results[OPF_MIME] = ('content.opf', package)
id, href = self.manifest.generate('ncx', 'toc.ncx')
etree.SubElement(manifest, OPF('item'), id=id, href=href,
attrib={'media-type': NCX_MIME})
spine.attrib['toc'] = id
results[NCX_MIME] = (href, self._to_ncx())
if page_map and len(self.pages) > 0:
id, href = self.manifest.generate('page-map', 'page-map.xml')
etree.SubElement(manifest, OPF('item'), id=id, href=href,
attrib={'media-type': PAGE_MAP_MIME})
spine.attrib['page-map'] = id
results[PAGE_MAP_MIME] = (href, self.pages.to_page_map())
if self.spine.page_progression_direction in {'ltr', 'rtl'}:
spine.attrib['page-progression-direction'] = self.spine.page_progression_direction
return results
def rel_href(base_href, href):
"""Convert the URL provided in :param:`href` to a URL relative to the URL
in :param:`base_href` """
if urlparse(href).scheme:
return href
if '/' not in base_href:
return href
base = list(filter(lambda x: x and x != '.', os.path.dirname(os.path.normpath(base_href)).replace(os.sep, '/').split('/')))
while True:
try:
idx = base.index('..')
except ValueError:
break
if idx > 0:
del base[idx-1:idx+1]
else:
break
if not base:
return href
target, frag = urldefrag(href)
target = target.split('/')
index = 0
for index in range(min(len(base), len(target))):
if base[index] != target[index]:
break
else:
index += 1
relhref = (['..'] * (len(base) - index)) + target[index:]
relhref = '/'.join(relhref)
if frag:
relhref = '#'.join((relhref, frag))
return relhref
| 72,281 | Python | .py | 1,710 | 31.711111 | 131 | 0.566475 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,363 | reader.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/reader.py | """
Container-/OPF-based input OEBBook reader.
"""
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import copy
import io
import os
import re
import sys
import uuid
from collections import defaultdict
from lxml import etree
from calibre import guess_type, xml_replace_entities
from calibre.constants import __appname__, __version__
from calibre.ebooks.oeb.base import (
BINARY_MIME,
COLLAPSE_RE,
DC11_NS,
DC_NSES,
JPEG_MIME,
MS_COVER_TYPE,
NCX_MIME,
OEB_DOCS,
OEB_IMAGES,
OEB_STYLES,
OPF,
OPF1_NS,
OPF2_NS,
OPF2_NSMAP,
PAGE_MAP_MIME,
SVG_MIME,
XHTML_MIME,
XMLDECL_RE,
DirContainer,
OEBBook,
OEBError,
XPath,
barename,
iterlinks,
namespace,
urlnormalize,
xml2text,
xpath,
)
from calibre.ebooks.oeb.writer import OEBWriter
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.localization import __, get_lang
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.urllib import unquote, urldefrag, urlparse
__all__ = ['OEBReader']
class OEBReader:
"""Read an OEBPS 1.x or OPF/OPS 2.0 file collection."""
COVER_SVG_XP = XPath('h:body//svg:svg[position() = 1]')
COVER_OBJECT_XP = XPath('h:body//h:object[@data][position() = 1]')
Container = DirContainer
"""Container type used to access book files. Override in sub-classes."""
DEFAULT_PROFILE = 'PRS505'
"""Default renderer profile for content read with this Reader."""
TRANSFORMS = []
"""List of transforms to apply to content read with this Reader."""
@classmethod
def config(cls, cfg):
"""Add any book-reading options to the :class:`Config` object
:param:`cfg`.
"""
return
@classmethod
def generate(cls, opts):
"""Generate a Reader instance from command-line options."""
return cls()
def __call__(self, oeb, path):
"""Read the book at :param:`path` into the :class:`OEBBook` object
:param:`oeb`.
"""
self.oeb = oeb
self.logger = self.log = oeb.logger
oeb.container = self.Container(path, self.logger)
oeb.container.log = oeb.log
opf = self._read_opf()
self._all_from_opf(opf)
return oeb
def _clean_opf(self, opf):
nsmap = {}
for elem in opf.iter(tag=etree.Element):
nsmap.update(elem.nsmap)
for elem in opf.iter(tag=etree.Element):
if namespace(elem.tag) in ('', OPF1_NS) and ':' not in barename(elem.tag):
elem.tag = OPF(barename(elem.tag))
nsmap.update(OPF2_NSMAP)
attrib = dict(opf.attrib)
if xmlns := attrib.pop('xmlns:', None):
attrib['xmlns'] = xmlns
nroot = etree.Element(OPF('package'),
nsmap={None: OPF2_NS}, attrib=attrib)
metadata = etree.SubElement(nroot, OPF('metadata'), nsmap=nsmap)
ignored = (OPF('dc-metadata'), OPF('x-metadata'))
for elem in xpath(opf, 'o2:metadata//*'):
if elem.tag in ignored:
continue
if namespace(elem.tag) in DC_NSES:
tag = barename(elem.tag).lower()
elem.tag = f'{{{DC11_NS}}}{tag}'
if elem.tag.startswith('dc:'):
tag = elem.tag.partition(':')[-1].lower()
elem.tag = f'{{{DC11_NS}}}{tag}'
metadata.append(elem)
for element in xpath(opf, 'o2:metadata//o2:meta'):
metadata.append(element)
for tag in ('o2:manifest', 'o2:spine', 'o2:tours', 'o2:guide'):
for element in xpath(opf, tag):
nroot.append(element)
return nroot
def _read_opf(self):
data = self.oeb.container.read(None)
data = self.oeb.decode(data)
data = XMLDECL_RE.sub('', data)
data = re.sub(r'http://openebook.org/namespaces/oeb-package/1.0(/*)',
OPF1_NS, data)
try:
opf = safe_xml_fromstring(data)
except etree.XMLSyntaxError:
data = xml_replace_entities(clean_xml_chars(data), encoding=None)
try:
opf = safe_xml_fromstring(data)
self.logger.warn('OPF contains invalid HTML named entities')
except etree.XMLSyntaxError:
data = re.sub(r'(?is)<tours>.+</tours>', '', data)
data = data.replace('<dc-metadata>',
'<dc-metadata xmlns:dc="http://purl.org/metadata/dublin_core">')
opf = safe_xml_fromstring(data)
self.logger.warn('OPF contains invalid tours section')
ns = namespace(opf.tag)
if ns not in ('', OPF1_NS, OPF2_NS):
raise OEBError('Invalid namespace %r for OPF document' % ns)
opf = self._clean_opf(opf)
return opf
def _metadata_from_opf(self, opf):
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata
stream = io.BytesIO(etree.tostring(opf, xml_declaration=True, encoding='utf-8'))
o = OPF(stream)
pwm = o.primary_writing_mode
if pwm:
self.oeb.metadata.primary_writing_mode = pwm
mi = o.to_book_metadata()
if not mi.language:
mi.language = get_lang().replace('_', '-')
self.oeb.metadata.add('language', mi.language)
if not mi.book_producer:
mi.book_producer = '%(a)s (%(v)s) [http://%(a)s-ebook.com]'%\
dict(a=__appname__, v=__version__)
meta_info_to_oeb_metadata(mi, self.oeb.metadata, self.logger)
m = self.oeb.metadata
m.add('identifier', str(uuid.uuid4()), id='uuid_id', scheme='uuid')
self.oeb.uid = self.oeb.metadata.identifier[-1]
if not m.title:
m.add('title', self.oeb.translate(__('Unknown')))
has_aut = False
for x in m.creator:
if getattr(x, 'role', '').lower() in ('', 'aut'):
has_aut = True
break
if not has_aut:
m.add('creator', self.oeb.translate(__('Unknown')), role='aut')
def _manifest_prune_invalid(self):
'''
Remove items from manifest that contain invalid data. This prevents
catastrophic conversion failure, when a few files contain corrupted
data.
'''
bad = []
check = OEB_DOCS.union(OEB_STYLES)
for item in list(self.oeb.manifest.values()):
if item.media_type in check:
try:
item.data
except KeyboardInterrupt:
raise
except:
self.logger.exception('Failed to parse content in %s'%
item.href)
bad.append(item)
self.oeb.manifest.remove(item)
return bad
def _manifest_add_missing(self, invalid):
import css_parser
manifest = self.oeb.manifest
known = set(manifest.hrefs)
unchecked = set(manifest.values())
cdoc = OEB_DOCS|OEB_STYLES
invalid = set()
while unchecked:
new = set()
for item in unchecked:
data = None
if (item.media_type in cdoc or item.media_type[-4:] in ('/xml', '+xml')):
try:
data = item.data
except:
self.oeb.log.exception('Failed to read from manifest '
'entry with id: %s, ignoring'%item.id)
invalid.add(item)
continue
if data is None:
continue
if (item.media_type in OEB_DOCS or item.media_type[-4:] in ('/xml', '+xml')):
hrefs = [r[2] for r in iterlinks(data)]
for href in hrefs:
if isinstance(href, bytes):
href = href.decode('utf-8')
href, _ = urldefrag(href)
if not href:
continue
try:
href = item.abshref(urlnormalize(href))
scheme = urlparse(href).scheme
except:
self.oeb.log.exception(
'Skipping invalid href: %r'%href)
continue
if not scheme and href not in known:
new.add(href)
elif item.media_type in OEB_STYLES:
try:
urls = list(css_parser.getUrls(data))
except:
urls = []
for url in urls:
href, _ = urldefrag(url)
href = item.abshref(urlnormalize(href))
scheme = urlparse(href).scheme
if not scheme and href not in known:
new.add(href)
unchecked.clear()
warned = set()
for href in new:
known.add(href)
is_invalid = False
for item in invalid:
if href == item.abshref(urlnormalize(href)):
is_invalid = True
break
if is_invalid:
continue
if not self.oeb.container.exists(href):
if href not in warned:
self.logger.warn('Referenced file %r not found' % href)
warned.add(href)
continue
if href not in warned:
self.logger.warn('Referenced file %r not in manifest' % href)
warned.add(href)
id, _ = manifest.generate(id='added')
guessed = guess_type(href)[0]
media_type = guessed or BINARY_MIME
added = manifest.add(id, href, media_type)
unchecked.add(added)
for item in invalid:
self.oeb.manifest.remove(item)
def _manifest_from_opf(self, opf):
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:manifest/o2:item'):
id = elem.get('id')
href = elem.get('href')
media_type = elem.get('media-type', None)
if media_type is None:
media_type = elem.get('mediatype', None)
if not media_type or media_type == 'text/xml':
guessed = guess_type(href)[0]
media_type = guessed or media_type or BINARY_MIME
if hasattr(media_type, 'lower'):
media_type = media_type.lower()
fallback = elem.get('fallback')
if href in manifest.hrefs:
self.logger.warn('Duplicate manifest entry for %r' % href)
continue
if not self.oeb.container.exists(href):
self.logger.warn('Manifest item %r not found' % href)
continue
if id in manifest.ids:
self.logger.warn('Duplicate manifest id %r' % id)
id, href = manifest.generate(id, href)
manifest.add(id, href, media_type, fallback)
invalid = self._manifest_prune_invalid()
self._manifest_add_missing(invalid)
def _spine_add_extra(self):
manifest = self.oeb.manifest
spine = self.oeb.spine
unchecked = set(spine)
selector = XPath('h:body//h:a/@href')
extras = set()
while unchecked:
new = set()
for item in unchecked:
if item.media_type not in OEB_DOCS:
# TODO: handle fallback chains
continue
for href in selector(item.data):
href, _ = urldefrag(href)
if not href:
continue
try:
href = item.abshref(urlnormalize(href))
except ValueError: # Malformed URL
continue
if href not in manifest.hrefs:
continue
found = manifest.hrefs[href]
if found.media_type not in OEB_DOCS or \
found in spine or found in extras:
continue
new.add(found)
extras.update(new)
unchecked = new
version = int(self.oeb.version[0])
removed_items_to_ignore = getattr(self.oeb, 'removed_items_to_ignore', ())
for item in extras:
if item.href in removed_items_to_ignore:
continue
if version >= 2:
self.logger.warn(
'Spine-referenced file %r not in spine' % item.href)
spine.add(item, linear=False)
def _spine_from_opf(self, opf):
spine = self.oeb.spine
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:spine/o2:itemref'):
idref = elem.get('idref')
if idref not in manifest.ids:
self.logger.warn('Spine item %r not found' % idref)
continue
item = manifest.ids[idref]
if item.media_type.lower() in OEB_DOCS and hasattr(item.data, 'xpath') and not getattr(item.data, 'tag', '').endswith('}ncx'):
spine.add(item, elem.get('linear'))
else:
if hasattr(item.data, 'tag') and item.data.tag and item.data.tag.endswith('}html'):
item.media_type = XHTML_MIME
spine.add(item, elem.get('linear'))
else:
self.oeb.log.warn('The item %s is not a XML document.'
' Removing it from spine.'%item.href)
if len(spine) == 0:
raise OEBError("Spine is empty")
self._spine_add_extra()
for val in xpath(opf, '/o2:package/o2:spine/@page-progression-direction'):
if val in {'ltr', 'rtl'}:
spine.page_progression_direction = val
def _guide_from_opf(self, opf):
guide = self.oeb.guide
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:guide/o2:reference'):
ref_href = elem.get('href')
path = urlnormalize(urldefrag(ref_href)[0])
if path not in manifest.hrefs:
corrected_href = None
for href in manifest.hrefs:
if href.lower() == path.lower():
corrected_href = href
break
if corrected_href is None:
self.logger.warn('Guide reference %r not found' % ref_href)
continue
ref_href = corrected_href
typ = elem.get('type')
if typ not in guide:
guide.add(typ, elem.get('title'), ref_href)
def _find_ncx(self, opf):
result = xpath(opf, '/o2:package/o2:spine/@toc')
if result:
id = result[0]
if id not in self.oeb.manifest.ids:
return None
item = self.oeb.manifest.ids[id]
self.oeb.manifest.remove(item)
return item
for item in self.oeb.manifest.values():
if item.media_type == NCX_MIME:
self.oeb.manifest.remove(item)
return item
return None
def _toc_from_navpoint(self, item, toc, navpoint):
children = xpath(navpoint, 'ncx:navPoint')
for child in children:
title = ''.join(xpath(child, 'ncx:navLabel/ncx:text/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
href = xpath(child, 'ncx:content/@src')
if not title:
self._toc_from_navpoint(item, toc, child)
continue
if (not href or not href[0]) and not xpath(child, 'ncx:navPoint'):
# This node is useless
continue
href = item.abshref(urlnormalize(href[0])) if href and href[0] else ''
path, _ = urldefrag(href)
if path and path not in self.oeb.manifest.hrefs:
path = urlnormalize(path)
if href and path not in self.oeb.manifest.hrefs:
self.logger.warn('TOC reference %r not found' % href)
gc = xpath(child, 'ncx:navPoint')
if not gc:
# This node is useless
continue
id = child.get('id')
klass = child.get('class', 'chapter')
try:
po = int(child.get('playOrder', self.oeb.toc.next_play_order()))
except:
po = self.oeb.toc.next_play_order()
authorElement = xpath(child,
'descendant::calibre:meta[@name = "author"]')
if authorElement:
author = authorElement[0].text
else:
author = None
descriptionElement = xpath(child,
'descendant::calibre:meta[@name = "description"]')
if descriptionElement:
description = etree.tostring(descriptionElement[0],
method='text', encoding='unicode').strip()
if not description:
description = None
else:
description = None
index_image = xpath(child,
'descendant::calibre:meta[@name = "toc_thumbnail"]')
toc_thumbnail = (index_image[0].text if index_image else None)
if not toc_thumbnail or not toc_thumbnail.strip():
toc_thumbnail = None
node = toc.add(title, href, id=id, klass=klass,
play_order=po, description=description, author=author,
toc_thumbnail=toc_thumbnail)
self._toc_from_navpoint(item, node, child)
def _toc_from_ncx(self, item):
if (item is None) or (item.data is None):
return False
self.log.debug('Reading TOC from NCX...')
ncx = item.data
title = ''.join(xpath(ncx, 'ncx:docTitle/ncx:text/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
title = title or str(self.oeb.metadata.title[0])
toc = self.oeb.toc
toc.title = title
navmaps = xpath(ncx, 'ncx:navMap')
for navmap in navmaps:
self._toc_from_navpoint(item, toc, navmap)
return True
def _toc_from_tour(self, opf):
result = xpath(opf, 'o2:tours/o2:tour')
if not result:
return False
self.log.debug('Reading TOC from tour...')
tour = result[0]
toc = self.oeb.toc
toc.title = tour.get('title')
sites = xpath(tour, 'o2:site')
for site in sites:
title = site.get('title')
href = site.get('href')
if not title or not href:
continue
path, _ = urldefrag(urlnormalize(href))
if path not in self.oeb.manifest.hrefs:
self.logger.warn('TOC reference %r not found' % href)
continue
id = site.get('id')
toc.add(title, href, id=id)
return True
def _toc_from_html(self, opf):
if 'toc' not in self.oeb.guide:
return False
self.log.debug('Reading TOC from HTML...')
itempath, frag = urldefrag(self.oeb.guide['toc'].href)
item = self.oeb.manifest.hrefs[itempath]
html = item.data
if frag:
elems = xpath(html, './/*[@id="%s"]' % frag)
if not elems:
elems = xpath(html, './/*[@name="%s"]' % frag)
elem = elems[0] if elems else html
while elem != html and not xpath(elem, './/h:a[@href]'):
elem = elem.getparent()
html = elem
titles = defaultdict(list)
order = []
for anchor in xpath(html, './/h:a[@href]'):
href = anchor.attrib['href']
href = item.abshref(urlnormalize(href))
path, frag = urldefrag(href)
if path not in self.oeb.manifest.hrefs:
continue
title = xml2text(anchor)
title = COLLAPSE_RE.sub(' ', title.strip())
if href not in titles:
order.append(href)
titles[href].append(title)
toc = self.oeb.toc
for href in order:
toc.add(' '.join(titles[href]), href)
return True
def _toc_from_spine(self, opf):
self.log.warn('Generating default TOC from spine...')
toc = self.oeb.toc
titles = []
headers = []
for item in self.oeb.spine:
if not item.linear:
continue
html = item.data
title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
if title:
titles.append(title)
headers.append('(unlabled)')
for tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'strong'):
expr = '/h:html/h:body//h:%s[position()=1]/text()'
header = ''.join(xpath(html, expr % tag))
header = COLLAPSE_RE.sub(' ', header.strip())
if header:
headers[-1] = header
break
use = titles
if len(titles) > len(set(titles)):
use = headers
for title, item in zip(use, self.oeb.spine):
if not item.linear:
continue
toc.add(title, item.href)
return True
def _toc_from_opf(self, opf, item):
self.oeb.auto_generated_toc = False
if self._toc_from_ncx(item):
return
# Prefer HTML to tour based TOC, since several LIT files
# have good HTML TOCs but bad tour based TOCs
if self._toc_from_html(opf):
return
if self._toc_from_tour(opf):
return
self._toc_from_spine(opf)
self.oeb.auto_generated_toc = True
def _pages_from_ncx(self, opf, item):
if item is None:
return False
ncx = item.data
if ncx is None:
return False
ptargets = xpath(ncx, 'ncx:pageList/ncx:pageTarget')
if not ptargets:
return False
pages = self.oeb.pages
for ptarget in ptargets:
name = ''.join(xpath(ptarget, 'ncx:navLabel/ncx:text/text()'))
name = COLLAPSE_RE.sub(' ', name.strip())
href = xpath(ptarget, 'ncx:content/@src')
if not href:
continue
href = item.abshref(urlnormalize(href[0]))
id = ptarget.get('id')
type = ptarget.get('type', 'normal')
klass = ptarget.get('class')
pages.add(name, href, type=type, id=id, klass=klass)
return True
def _find_page_map(self, opf):
result = xpath(opf, '/o2:package/o2:spine/@page-map')
if result:
id = result[0]
if id not in self.oeb.manifest.ids:
return None
item = self.oeb.manifest.ids[id]
self.oeb.manifest.remove(item)
return item
for item in self.oeb.manifest.values():
if item.media_type == PAGE_MAP_MIME:
self.oeb.manifest.remove(item)
return item
return None
def _pages_from_page_map(self, opf):
item = self._find_page_map(opf)
if item is None:
return False
pmap = item.data
pages = self.oeb.pages
for page in xpath(pmap, 'o2:page'):
name = page.get('name', '')
href = page.get('href')
if not href:
continue
name = COLLAPSE_RE.sub(' ', name.strip())
href = item.abshref(urlnormalize(href))
type = 'normal'
if not name:
type = 'special'
elif name.lower().strip('ivxlcdm') == '':
type = 'front'
pages.add(name, href, type=type)
return True
def _pages_from_opf(self, opf, item):
if self._pages_from_ncx(opf, item):
return
if self._pages_from_page_map(opf):
return
return
def _cover_from_html(self, hcover):
from calibre.ebooks import render_html_svg_workaround
with TemporaryDirectory('_html_cover') as tdir:
writer = OEBWriter()
writer(self.oeb, tdir)
path = os.path.join(tdir, unquote(hcover.href))
data = render_html_svg_workaround(path, self.logger, root=tdir)
if not data:
data = b''
id, href = self.oeb.manifest.generate('cover', 'cover.jpg')
item = self.oeb.manifest.add(id, href, JPEG_MIME, data=data)
return item
def _locate_cover_image(self):
if self.oeb.metadata.cover:
id = str(self.oeb.metadata.cover[0])
item = self.oeb.manifest.ids.get(id, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
else:
self.logger.warn('Invalid cover image @id %r' % id)
hcover = self.oeb.spine[0]
if 'cover' in self.oeb.guide:
href = self.oeb.guide['cover'].href
item = self.oeb.manifest.hrefs[href]
media_type = item.media_type
if media_type in OEB_IMAGES:
return item
elif media_type in OEB_DOCS:
hcover = item
html = hcover.data
if MS_COVER_TYPE in self.oeb.guide:
href = self.oeb.guide[MS_COVER_TYPE].href
item = self.oeb.manifest.hrefs.get(href, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
if self.COVER_SVG_XP(html):
svg = copy.deepcopy(self.COVER_SVG_XP(html)[0])
href = os.path.splitext(hcover.href)[0] + '.svg'
id, href = self.oeb.manifest.generate(hcover.id, href)
item = self.oeb.manifest.add(id, href, SVG_MIME, data=svg)
return item
if self.COVER_OBJECT_XP(html):
object = self.COVER_OBJECT_XP(html)[0]
href = hcover.abshref(object.get('data'))
item = self.oeb.manifest.hrefs.get(href, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
return self._cover_from_html(hcover)
def _ensure_cover_image(self):
cover = self._locate_cover_image()
if self.oeb.metadata.cover:
self.oeb.metadata.cover[0].value = cover.id
return
self.oeb.metadata.add('cover', cover.id)
def _manifest_remove_duplicates(self):
seen = set()
dups = set()
for item in self.oeb.manifest:
if item.href in seen:
dups.add(item.href)
seen.add(item.href)
for href in dups:
items = [x for x in self.oeb.manifest if x.href == href]
for x in items:
if x not in self.oeb.spine:
self.oeb.log.warn('Removing duplicate manifest item with id:', x.id)
self.oeb.manifest.remove_duplicate_item(x)
def _all_from_opf(self, opf):
self.oeb.version = opf.get('version', '1.2')
self._metadata_from_opf(opf)
self._manifest_from_opf(opf)
self._spine_from_opf(opf)
self._manifest_remove_duplicates()
self._guide_from_opf(opf)
item = self._find_ncx(opf)
self._toc_from_opf(opf, item)
self._pages_from_opf(opf, item)
# self._ensure_cover_image()
def main(argv=sys.argv):
reader = OEBReader()
for arg in argv[1:]:
oeb = reader(OEBBook(), arg)
for name, doc in oeb.to_opf1().values():
print(etree.tostring(doc, pretty_print=True))
for name, doc in oeb.to_opf2(page_map=True).values():
print(etree.tostring(doc, pretty_print=True))
return 0
if __name__ == '__main__':
sys.exit(main())
| 28,491 | Python | .py | 693 | 28.510823 | 138 | 0.531034 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,364 | parse_utils.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/parse_utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from lxml import etree, html
from calibre import force_unicode, xml_replace_entities
from calibre.constants import filesystem_encoding
from calibre.ebooks.chardet import strip_encoding_declarations, xml_to_unicode
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import iteritems, itervalues, string_or_bytes
RECOVER_PARSER = etree.XMLParser(recover=True, no_network=True, resolve_entities=False)
XHTML_NS = 'http://www.w3.org/1999/xhtml'
XMLNS_NS = 'http://www.w3.org/2000/xmlns/'
class NotHTML(Exception):
def __init__(self, root_tag):
Exception.__init__(self, 'Data is not HTML')
self.root_tag = root_tag
def barename(name):
return name.rpartition('}')[-1]
def namespace(name):
return name.rpartition('}')[0][1:]
def XHTML(name):
return f'{{{XHTML_NS}}}{name}'
def xpath(elem, expr):
return elem.xpath(expr, namespaces={'h':XHTML_NS})
def XPath(expr):
return etree.XPath(expr, namespaces={'h':XHTML_NS})
META_XP = XPath('/h:html/h:head/h:meta[@http-equiv="Content-Type"]')
def merge_multiple_html_heads_and_bodies(root, log=None):
heads, bodies = xpath(root, '//h:head'), xpath(root, '//h:body')
if not (len(heads) > 1 or len(bodies) > 1):
return root
for child in root:
root.remove(child)
head = root.makeelement(XHTML('head'))
body = root.makeelement(XHTML('body'))
for h in heads:
for x in h:
head.append(x)
for b in bodies:
for x in b:
body.append(x)
for x in (head, body):
root.append(x)
if log is not None:
log.warn('Merging multiple <head> and <body> sections')
return root
def clone_element(elem, nsmap={}, in_context=True):
if in_context:
maker = elem.getroottree().getroot().makeelement
else:
maker = etree.Element
nelem = maker(elem.tag, attrib=elem.attrib,
nsmap=nsmap)
nelem.text, nelem.tail = elem.text, elem.tail
nelem.extend(elem)
return nelem
def node_depth(node):
ans = 0
p = node.getparent()
while p is not None:
ans += 1
p = p.getparent()
return ans
def html5_parse(data, max_nesting_depth=100):
from html5_parser import parse
from calibre.utils.cleantext import clean_xml_chars
data = parse(clean_xml_chars(data), maybe_xhtml=True, keep_doctype=False, sanitize_names=True)
# Check that the asinine HTML 5 algorithm did not result in a tree with
# insane nesting depths
for x in data.iterdescendants():
if isinstance(x.tag, string_or_bytes) and not len(x): # Leaf node
depth = node_depth(x)
if depth > max_nesting_depth:
raise ValueError('HTML 5 parsing resulted in a tree with nesting'
' depth > %d'%max_nesting_depth)
return data
def _html4_parse(data):
data = html.fromstring(data)
data.attrib.pop('xmlns', None)
for elem in data.iter(tag=etree.Comment):
if elem.text:
elem.text = elem.text.strip('-')
data = etree.tostring(data, encoding='unicode')
data = safe_xml_fromstring(data)
return data
def clean_word_doc(data, log):
prefixes = []
for match in re.finditer(r'xmlns:(\S+?)=".*?microsoft.*?"', data):
prefixes.append(match.group(1))
if prefixes:
log.warn('Found microsoft markup, cleaning...')
# Remove empty tags as they are not rendered by browsers
# but can become renderable HTML tags like <p/> if the
# document is parsed by an HTML parser
pat = re.compile(
r'<(%s):([a-zA-Z0-9]+)[^>/]*?></\1:\2>'%('|'.join(prefixes)),
re.DOTALL)
data = pat.sub('', data)
pat = re.compile(
r'<(%s):([a-zA-Z0-9]+)[^>/]*?/>'%('|'.join(prefixes)))
data = pat.sub('', data)
return data
def ensure_namespace_prefixes(node, nsmap):
namespace_uris = frozenset(itervalues(nsmap))
fnsmap = {k:v for k, v in iteritems(node.nsmap) if v not in namespace_uris}
fnsmap.update(nsmap)
if fnsmap != dict(node.nsmap):
node = clone_element(node, nsmap=fnsmap, in_context=False)
return node
class HTML5Doc(ValueError):
pass
def check_for_html5(prefix, root):
if re.search(r'<!DOCTYPE\s+html\s*>', prefix, re.IGNORECASE) is not None:
if root.xpath('//svg'):
raise HTML5Doc('This document appears to be un-namespaced HTML 5, should be parsed by the HTML 5 parser')
def parse_html(data, log=None, decoder=None, preprocessor=None,
filename='<string>', non_html_file_tags=frozenset()):
if log is None:
from calibre.utils.logging import default_log
log = default_log
filename = force_unicode(filename, enc=filesystem_encoding)
if not isinstance(data, str):
if decoder is not None:
data = decoder(data)
else:
data = xml_to_unicode(data)[0]
data = strip_encoding_declarations(data)
# Remove DOCTYPE declaration as it messes up parsing
# In particular, it causes tostring to insert xmlns
# declarations, which messes up the coercing logic
pre = ''
idx = data.find('<html')
if idx == -1:
idx = data.find('<HTML')
has_html4_doctype = False
if idx > -1:
pre = data[:idx]
data = data[idx:]
if '<!DOCTYPE' in pre: # Handle user defined entities
# kindlegen produces invalid xhtml with uppercase attribute names
# if fed HTML 4 with uppercase attribute names, so try to detect
# and compensate for that.
has_html4_doctype = re.search(r'<!DOCTYPE\s+[^>]+HTML\s+4.0[^.]+>', pre) is not None
# Process private entities
user_entities = {}
for match in re.finditer(r'<!ENTITY\s+(\S+)\s+([^>]+)', pre):
val = match.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
user_entities[match.group(1)] = val
if user_entities:
pat = re.compile(r'&(%s);'%('|'.join(list(user_entities.keys()))))
data = pat.sub(lambda m:user_entities[m.group(1)], data)
if preprocessor is not None:
data = preprocessor(data)
# There could be null bytes in data if it had � entities in it
data = data.replace('\0', '')
data = raw = clean_word_doc(data, log)
# Try with more & more drastic measures to parse
try:
data = safe_xml_fromstring(data, recover=False)
check_for_html5(pre, data)
except (HTML5Doc, etree.XMLSyntaxError):
log.debug('Initial parse failed, using more'
' forgiving parsers')
raw = data = xml_replace_entities(raw)
try:
data = safe_xml_fromstring(data, recover=False)
check_for_html5(pre, data)
except (HTML5Doc, etree.XMLSyntaxError):
log.debug('Parsing %s as HTML' % filename)
data = raw
try:
data = html5_parse(data)
except Exception:
log.exception(
'HTML 5 parsing failed, falling back to older parsers')
data = _html4_parse(data)
if has_html4_doctype or data.tag == 'HTML' or (len(data) and (data[-1].get('LANG') or data[-1].get('DIR'))):
# Lower case all tag and attribute names
data.tag = data.tag.lower()
for x in data.iterdescendants():
try:
x.tag = x.tag.lower()
for key, val in list(iteritems(x.attrib)):
del x.attrib[key]
key = key.lower()
x.attrib[key] = val
except:
pass
if barename(data.tag) != 'html':
if barename(data.tag) in non_html_file_tags:
raise NotHTML(data.tag)
log.warn('File %r does not appear to be (X)HTML'%filename)
nroot = safe_xml_fromstring('<html></html>')
has_body = False
for child in list(data):
if isinstance(child.tag, (str, bytes)) and barename(child.tag) == 'body':
has_body = True
break
parent = nroot
if not has_body:
log.warn('File %r appears to be a HTML fragment'%filename)
nroot = safe_xml_fromstring('<html><body/></html>')
parent = nroot[0]
for child in list(data.iter()):
oparent = child.getparent()
if oparent is not None:
oparent.remove(child)
parent.append(child)
data = nroot
# Force into the XHTML namespace
if not namespace(data.tag):
log.warn('Forcing', filename, 'into XHTML namespace')
data.attrib['xmlns'] = XHTML_NS
data = etree.tostring(data, encoding='unicode')
try:
data = safe_xml_fromstring(data, recover=False)
except:
data = data.replace(':=', '=').replace(':>', '>')
data = data.replace('<http:/>', '')
try:
data = safe_xml_fromstring(data, recover=False)
except etree.XMLSyntaxError:
log.warn('Stripping comments from %s'%
filename)
data = re.compile(r'<!--.*?-->', re.DOTALL).sub('',
data)
data = data.replace(
"<?xml version='1.0' encoding='utf-8'?><o:p></o:p>",
'')
data = data.replace("<?xml version='1.0' encoding='utf-8'??>", '')
try:
data = safe_xml_fromstring(data)
except etree.XMLSyntaxError:
log.warn('Stripping meta tags from %s'% filename)
data = re.sub(r'<meta\s+[^>]+?>', '', data)
data = safe_xml_fromstring(data)
elif namespace(data.tag) != XHTML_NS:
# OEB_DOC_NS, but possibly others
ns = namespace(data.tag)
attrib = dict(data.attrib)
nroot = etree.Element(XHTML('html'),
nsmap={None: XHTML_NS}, attrib=attrib)
for elem in data.iterdescendants():
if isinstance(elem.tag, string_or_bytes) and \
namespace(elem.tag) == ns:
elem.tag = XHTML(barename(elem.tag))
for elem in data:
nroot.append(elem)
data = nroot
# Remove non default prefixes referring to the XHTML namespace
data = ensure_namespace_prefixes(data, {None: XHTML_NS})
data = merge_multiple_html_heads_and_bodies(data, log)
# Ensure has a <head/>
head = xpath(data, '/h:html/h:head')
head = head[0] if head else None
if head is None:
log.warn('File %s missing <head/> element' % filename)
head = etree.Element(XHTML('head'))
data.insert(0, head)
title = etree.SubElement(head, XHTML('title'))
title.text = _('Unknown')
elif not xpath(data, '/h:html/h:head/h:title'):
title = etree.SubElement(head, XHTML('title'))
title.text = _('Unknown')
# Ensure <title> is not empty
title = xpath(data, '/h:html/h:head/h:title')[0]
if not title.text or not title.text.strip():
title.text = _('Unknown')
# Remove any encoding-specifying <meta/> elements
for meta in META_XP(data):
meta.getparent().remove(meta)
meta = etree.SubElement(head, XHTML('meta'),
attrib={'http-equiv': 'Content-Type'})
meta.set('content', 'text/html; charset=utf-8') # Ensure content is second attribute
# Ensure has a <body/>
if not xpath(data, '/h:html/h:body'):
body = xpath(data, '//h:body')
if body:
body = body[0]
body.getparent().remove(body)
data.append(body)
else:
log.warn('File %s missing <body/> element' % filename)
etree.SubElement(data, XHTML('body'))
# Remove microsoft office markup
r = [x for x in data.iterdescendants(etree.Element) if 'microsoft-com' in x.tag]
for x in r:
x.tag = XHTML('span')
def remove_elem(a):
p = a.getparent()
idx = p.index(a) -1
p.remove(a)
if a.tail:
if idx < 0:
if p.text is None:
p.text = ''
p.text += a.tail
else:
if p[idx].tail is None:
p[idx].tail = ''
p[idx].tail += a.tail
# Remove hyperlinks with no content as they cause rendering
# artifacts in browser based renderers
# Also remove empty <b>, <u> and <i> tags
for a in xpath(data, '//h:a[@href]|//h:i|//h:b|//h:u'):
if a.get('id', None) is None and a.get('name', None) is None \
and len(a) == 0 and not a.text:
remove_elem(a)
# Convert <br>s with content into paragraphs as ADE can't handle
# them
for br in xpath(data, '//h:br'):
if len(br) > 0 or br.text:
br.tag = XHTML('div')
# Remove any stray text in the <head> section and format it nicely
data.text = '\n '
head = xpath(data, '//h:head')
if head:
head = head[0]
head.text = '\n '
head.tail = '\n '
for child in head:
child.tail = '\n '
child.tail = '\n '
return data
| 13,522 | Python | .py | 328 | 32.04878 | 117 | 0.581252 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,365 | bookmarks.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/iterator/bookmarks.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import numbers
import os
from io import BytesIO
from calibre.utils.zipfile import safe_replace
from polyglot.builtins import as_unicode
BM_FIELD_SEP = '*|!|?|*'
BM_LEGACY_ESC = 'esc-text-%&*#%(){}ads19-end-esc'
def parse_bookmarks(raw):
raw = as_unicode(raw)
for line in raw.splitlines():
if '^' in line:
tokens = line.rpartition('^')
title, ref = tokens[0], tokens[2]
try:
spine, _, pos = ref.partition('#')
spine = int(spine.strip())
except Exception:
continue
yield {'type':'legacy', 'title':title, 'spine':spine, 'pos':pos}
elif BM_FIELD_SEP in line:
try:
title, spine, pos = line.strip().split(BM_FIELD_SEP)
spine = int(spine)
except Exception:
continue
# Unescape from serialization
pos = pos.replace(BM_LEGACY_ESC, '^')
# Check for pos being a scroll fraction
try:
pos = float(pos)
except Exception:
pass
yield {'type':'cfi', 'title':title, 'pos':pos, 'spine':spine}
class BookmarksMixin:
def __init__(self, copy_bookmarks_to_file=True):
self.copy_bookmarks_to_file = copy_bookmarks_to_file
def parse_bookmarks(self, raw):
for bm in parse_bookmarks(raw):
self.bookmarks.append(bm)
def serialize_bookmarks(self, bookmarks):
dat = []
for bm in bookmarks:
if bm['type'] == 'legacy':
rec = '%s^%d#%s'%(bm['title'], bm['spine'], bm['pos'])
else:
pos = bm['pos']
if isinstance(pos, numbers.Number):
pos = str(pos)
else:
pos = pos.replace('^', BM_LEGACY_ESC)
rec = BM_FIELD_SEP.join([bm['title'], str(bm['spine']), pos])
dat.append(rec)
return ('\n'.join(dat) +'\n')
def read_bookmarks(self):
self.bookmarks = []
raw = self.config['bookmarks_'+self.pathtoebook] or ''
if not raw:
# Look for bookmarks saved inside the ebook
bmfile = os.path.join(self.base, 'META-INF', 'calibre_bookmarks.txt')
if os.path.exists(bmfile):
with open(bmfile, 'rb') as f:
raw = f.read()
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
self.parse_bookmarks(raw)
def save_bookmarks(self, bookmarks=None, no_copy_to_file=False):
if bookmarks is None:
bookmarks = self.bookmarks
dat = self.serialize_bookmarks(bookmarks)
self.config['bookmarks_'+self.pathtoebook] = dat
if not no_copy_to_file and self.copy_bookmarks_to_file and os.path.splitext(
self.pathtoebook)[1].lower() == '.epub' and os.access(self.pathtoebook, os.W_OK):
try:
with open(self.pathtoebook, 'r+b') as zf:
safe_replace(zf, 'META-INF/calibre_bookmarks.txt',
BytesIO(dat.encode('utf-8')),
add_missing=True)
except OSError:
return
def add_bookmark(self, bm, no_copy_to_file=False):
self.bookmarks = [x for x in self.bookmarks if x['title'] !=
bm['title']]
self.bookmarks.append(bm)
self.save_bookmarks(no_copy_to_file=no_copy_to_file)
def set_bookmarks(self, bookmarks):
self.bookmarks = bookmarks
| 3,713 | Python | .py | 90 | 29.988889 | 97 | 0.546437 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,366 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/iterator/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
from calibre.customize.ui import available_input_formats
def is_supported(path):
ext = os.path.splitext(path)[1].replace('.', '').lower()
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
return ext in available_input_formats() or ext == 'kepub'
class UnsupportedFormatError(Exception):
def __init__(self, fmt):
Exception.__init__(self, _('%s format books are not supported')%fmt.upper())
def EbookIterator(*args, **kwargs):
'For backwards compatibility'
from calibre.ebooks.oeb.iterator.book import EbookIterator
return EbookIterator(*args, **kwargs)
def get_preprocess_html(path_to_ebook, output=None):
from calibre.ebooks.conversion.plumber import Plumber, set_regex_wizard_callback
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.logging import DevNull
raw = {}
set_regex_wizard_callback(raw.__setitem__)
with TemporaryDirectory('_regex_wiz') as tdir:
pl = Plumber(path_to_ebook, os.path.join(tdir, 'a.epub'), DevNull(), for_regex_wizard=True)
pl.run()
items = [raw[item.href] for item in pl.oeb.spine if item.href in raw]
with (sys.stdout if output is None else open(output, 'wb')) as out:
for html in items:
out.write(html.encode('utf-8'))
out.write(b'\n\n' + b'-'*80 + b'\n\n')
| 1,514 | Python | .py | 33 | 40.818182 | 99 | 0.683027 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,367 | spine.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/iterator/spine.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
from collections import namedtuple
from functools import partial
from operator import attrgetter
from calibre import guess_type, replace_entities
from calibre.ebooks.chardet import xml_to_unicode
def character_count(html):
''' Return the number of "significant" text characters in a HTML string. '''
count = 0
strip_space = re.compile(r'\s+')
for match in re.finditer(r'>[^<]+<', html):
count += len(strip_space.sub(' ', match.group()))-2
return count
def anchor_map(html):
''' Return map of all anchor names to their offsets in the html '''
ans = {}
for match in re.finditer(
r'''(?:id|name)\s*=\s*['"]([^'"]+)['"]''', html):
anchor = match.group(1)
ans[anchor] = ans.get(anchor, match.start())
return ans
def all_links(html):
''' Return set of all links in the file '''
ans = set()
for match in re.finditer(
r'''<\s*[Aa]\s+.*?[hH][Rr][Ee][Ff]\s*=\s*(['"])(.+?)\1''', html, re.MULTILINE|re.DOTALL):
ans.add(replace_entities(match.group(2)))
return ans
class SpineItem(str):
def __new__(cls, path, mime_type=None, read_anchor_map=True,
run_char_count=True, from_epub=False, read_links=True):
ppath = path.partition('#')[0]
if not os.path.exists(path) and os.path.exists(ppath):
path = ppath
obj = super().__new__(cls, path)
with open(path, 'rb') as f:
raw = f.read()
if from_epub:
# According to the spec, HTML in EPUB must be encoded in utf-8 or
# utf-16. Furthermore, there exist epub files produced by the usual
# incompetents that have utf-8 encoded HTML files that contain
# incorrect encoding declarations. See
# http://www.idpf.org/epub/20/spec/OPS_2.0.1_draft.htm#Section1.4.1.2
# http://www.idpf.org/epub/30/spec/epub30-publications.html#confreq-xml-enc
# https://bugs.launchpad.net/bugs/1188843
# So we first decode with utf-8 and only if that fails we try xml_to_unicode. This
# is the same algorithm as that used by the conversion pipeline (modulo
# some BOM based detection). Sigh.
try:
raw, obj.encoding = raw.decode('utf-8'), 'utf-8'
except UnicodeDecodeError:
raw, obj.encoding = xml_to_unicode(raw)
else:
raw, obj.encoding = xml_to_unicode(raw)
obj.character_count = character_count(raw) if run_char_count else 10000
obj.anchor_map = anchor_map(raw) if read_anchor_map else {}
obj.all_links = all_links(raw) if read_links else set()
obj.verified_links = set()
obj.start_page = -1
obj.pages = -1
obj.max_page = -1
obj.index_entries = []
if mime_type is None:
mime_type = guess_type(obj)[0]
obj.mime_type = mime_type
obj.is_single_page = None
return obj
class IndexEntry:
def __init__(self, spine, toc_entry, num):
self.num = num
self.text = toc_entry.text or _('Unknown')
self.key = toc_entry.abspath
self.anchor = self.start_anchor = toc_entry.fragment or None
try:
self.spine_pos = spine.index(self.key)
except ValueError:
self.spine_pos = -1
self.anchor_pos = 0
if self.spine_pos > -1:
self.anchor_pos = spine[self.spine_pos].anchor_map.get(self.anchor,
0)
self.depth = 0
p = toc_entry.parent
while p is not None:
self.depth += 1
p = p.parent
self.sort_key = (self.spine_pos, self.anchor_pos)
self.spine_count = len(spine)
def find_end(self, all_entries):
potential_enders = [i for i in all_entries if
i.depth <= self.depth and
(
(i.spine_pos == self.spine_pos and i.anchor_pos > self.anchor_pos) or
i.spine_pos > self.spine_pos
)]
if potential_enders:
# potential_enders is sorted by (spine_pos, anchor_pos)
end = potential_enders[0]
self.end_spine_pos = end.spine_pos
self.end_anchor = end.anchor
else:
self.end_spine_pos = self.spine_count - 1
self.end_anchor = None
def create_indexing_data(spine, toc):
if not toc:
return
f = partial(IndexEntry, spine)
index_entries = list(map(f,
(t for t in toc.flat() if t is not toc),
(i-1 for i, t in enumerate(toc.flat()) if t is not toc)
))
index_entries.sort(key=attrgetter('sort_key'))
[i.find_end(index_entries) for i in index_entries]
ie = namedtuple('IndexEntry', 'entry start_anchor end_anchor')
for spine_pos, spine_item in enumerate(spine):
for i in index_entries:
if i.end_spine_pos < spine_pos or i.spine_pos > spine_pos:
continue # Does not touch this file
start = i.anchor if i.spine_pos == spine_pos else None
end = i.end_anchor if i.spine_pos == spine_pos else None
spine_item.index_entries.append(ie(i, start, end))
| 5,386 | Python | .py | 126 | 33.65873 | 101 | 0.591751 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,368 | book.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/iterator/book.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Iterate over the HTML files in an ebook. Useful for writing viewers.
'''
import math
import os
import re
from functools import partial
from calibre import guess_type, prepare_string_for_xml
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.oeb.base import urlparse, urlunquote
from calibre.ebooks.oeb.iterator.bookmarks import BookmarksMixin
from calibre.ebooks.oeb.iterator.spine import SpineItem, create_indexing_data
from calibre.ebooks.oeb.transforms.cover import CoverManager
from calibre.ptempfile import PersistentTemporaryDirectory, remove_dir
from calibre.utils.config import DynamicConfig
from calibre.utils.logging import default_log
from calibre.utils.tdir_in_cache import tdir_in_cache
TITLEPAGE = CoverManager.SVG_TEMPLATE.replace(
'__ar__', 'none').replace('__viewbox__', '0 0 600 800'
).replace('__width__', '600').replace('__height__', '800')
class FakeOpts:
verbose = 0
breadth_first = False
max_levels = 5
input_encoding = None
def write_oebbook(oeb, path):
from calibre import walk
from calibre.ebooks.oeb.writer import OEBWriter
w = OEBWriter()
w(oeb, path)
for f in walk(path):
if f.endswith('.opf'):
return f
def extract_book(pathtoebook, tdir, log=None, view_kepub=False, processed=False, only_input_plugin=False):
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
from calibre.utils.logging import default_log
log = log or default_log
plumber = Plumber(pathtoebook, tdir, log, view_kepub=view_kepub)
plumber.setup_options()
if pathtoebook.lower().endswith('.opf'):
plumber.opts.dont_package = True
if hasattr(plumber.opts, 'no_process'):
plumber.opts.no_process = True
plumber.input_plugin.for_viewer = True
with plumber.input_plugin, open(plumber.input, 'rb') as inf:
pathtoopf = plumber.input_plugin(inf,
plumber.opts, plumber.input_fmt, log, {}, tdir)
if not only_input_plugin:
# Run the HTML preprocess/parsing from the conversion pipeline as
# well
if (processed or plumber.input_fmt.lower() in {'pdb', 'pdf', 'rb'} and
not hasattr(pathtoopf, 'manifest')):
if hasattr(pathtoopf, 'manifest'):
pathtoopf = write_oebbook(pathtoopf, tdir)
pathtoopf = create_oebbook(log, pathtoopf, plumber.opts)
if hasattr(pathtoopf, 'manifest'):
pathtoopf = write_oebbook(pathtoopf, tdir)
book_format = os.path.splitext(pathtoebook)[1][1:].upper()
if getattr(plumber.input_plugin, 'is_kf8', False):
fs = ':joint' if getattr(plumber.input_plugin, 'mobi_is_joint', False) else ''
book_format = 'KF8' + fs
return book_format, pathtoopf, plumber.input_fmt
def run_extract_book(*args, **kwargs):
from calibre.utils.ipc.simple_worker import fork_job
ans = fork_job('calibre.ebooks.oeb.iterator.book', 'extract_book', args=args, kwargs=kwargs, timeout=3000, no_output=True)
return ans['result']
class EbookIterator(BookmarksMixin):
CHARACTERS_PER_PAGE = 1000
def __init__(self, pathtoebook, log=None, copy_bookmarks_to_file=True, use_tdir_in_cache=False):
BookmarksMixin.__init__(self, copy_bookmarks_to_file=copy_bookmarks_to_file)
self.use_tdir_in_cache = use_tdir_in_cache
self.log = log or default_log
pathtoebook = pathtoebook.strip()
self.pathtoebook = os.path.abspath(pathtoebook)
self.config = DynamicConfig(name='iterator')
ext = os.path.splitext(pathtoebook)[1].replace('.', '').lower()
ext = re.sub(r'(x{0,1})htm(l{0,1})', 'html', ext)
self.ebook_ext = ext.replace('original_', '')
def search(self, text, index, backwards=False):
from calibre.ebooks.oeb.polish.parsing import parse
pmap = [(i, path) for i, path in enumerate(self.spine)]
if backwards:
pmap.reverse()
q = text.lower()
for i, path in pmap:
if (backwards and i < index) or (not backwards and i > index):
with open(path, 'rb') as f:
raw = f.read().decode(path.encoding)
root = parse(raw)
fragments = []
def serialize(elem):
if elem.text:
fragments.append(elem.text.lower())
if elem.tail:
fragments.append(elem.tail.lower())
for child in elem.iterchildren():
if hasattr(getattr(child, 'tag', None), 'rpartition') and child.tag.rpartition('}')[-1] not in {'script', 'style', 'del'}:
serialize(child)
elif getattr(child, 'tail', None):
fragments.append(child.tail.lower())
for body in root.xpath('//*[local-name() = "body"]'):
body.tail = None
serialize(body)
if q in ''.join(fragments):
return i
def __enter__(self, processed=False, only_input_plugin=False,
run_char_count=True, read_anchor_map=True, view_kepub=False, read_links=True):
''' Convert an ebook file into an exploded OEB book suitable for
display in viewers/preprocessing etc. '''
self.delete_on_exit = []
if self.use_tdir_in_cache:
self._tdir = tdir_in_cache('ev')
else:
self._tdir = PersistentTemporaryDirectory('_ebook_iter')
self.base = os.path.realpath(self._tdir)
self.book_format, self.pathtoopf, input_fmt = run_extract_book(
self.pathtoebook, self.base, only_input_plugin=only_input_plugin, view_kepub=view_kepub, processed=processed)
self.opf = OPF(self.pathtoopf, os.path.dirname(self.pathtoopf))
self.mi = self.opf.to_book_metadata()
self.language = None
if self.mi.languages:
self.language = self.mi.languages[0].lower()
self.spine = []
Spiny = partial(SpineItem, read_anchor_map=read_anchor_map, read_links=read_links,
run_char_count=run_char_count, from_epub=self.book_format == 'EPUB')
if input_fmt.lower() == 'htmlz':
self.spine.append(Spiny(os.path.join(os.path.dirname(self.pathtoopf), 'index.html'), mime_type='text/html'))
else:
ordered = [i for i in self.opf.spine if i.is_linear] + \
[i for i in self.opf.spine if not i.is_linear]
is_comic = input_fmt.lower() in {'cbc', 'cbz', 'cbr', 'cb7'}
for i in ordered:
spath = i.path
mt = None
if i.idref is not None:
mt = self.opf.manifest.type_for_id(i.idref)
if mt is None:
mt = guess_type(spath)[0]
try:
self.spine.append(Spiny(spath, mime_type=mt))
if is_comic:
self.spine[-1].is_single_page = True
except:
self.log.warn('Missing spine item:', repr(spath))
cover = self.opf.cover
if cover and self.ebook_ext in {'lit', 'mobi', 'prc', 'opf', 'fb2',
'azw', 'azw3', 'docx', 'htmlz'}:
cfile = os.path.join(self.base, 'calibre_iterator_cover.html')
rcpath = os.path.relpath(cover, self.base).replace(os.sep, '/')
chtml = (TITLEPAGE%prepare_string_for_xml(rcpath, True)).encode('utf-8')
with open(cfile, 'wb') as f:
f.write(chtml)
self.spine[0:0] = [Spiny(cfile,
mime_type='application/xhtml+xml')]
self.delete_on_exit.append(cfile)
if self.opf.path_to_html_toc is not None and \
self.opf.path_to_html_toc not in self.spine:
try:
self.spine.append(Spiny(self.opf.path_to_html_toc))
except:
import traceback
traceback.print_exc()
sizes = [i.character_count for i in self.spine]
self.pages = [math.ceil(i/float(self.CHARACTERS_PER_PAGE)) for i in sizes]
for p, s in zip(self.pages, self.spine):
s.pages = p
start = 1
for s in self.spine:
s.start_page = start
start += s.pages
s.max_page = s.start_page + s.pages - 1
self.toc = self.opf.toc
if read_anchor_map:
create_indexing_data(self.spine, self.toc)
self.verify_links()
self.read_bookmarks()
return self
def verify_links(self):
spine_paths = {s:s for s in self.spine}
for item in self.spine:
base = os.path.dirname(item)
for link in item.all_links:
try:
p = urlparse(urlunquote(link))
except Exception:
continue
if not p.scheme and not p.netloc:
path = os.path.abspath(os.path.join(base, p.path)) if p.path else item
try:
path = spine_paths[path]
except Exception:
continue
if not p.fragment or p.fragment in path.anchor_map:
item.verified_links.add((path, p.fragment))
def __exit__(self, *args):
remove_dir(self._tdir)
for x in self.delete_on_exit:
try:
os.remove(x)
except:
pass
| 9,757 | Python | .py | 205 | 36.160976 | 146 | 0.588606 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,369 | split.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/split.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import copy
import os
import re
from calibre.ebooks.oeb.base import OEB_DOCS, OPF, XHTML, XPNSMAP, XPath, barename
from calibre.ebooks.oeb.polish.errors import MalformedMarkup
from calibre.ebooks.oeb.polish.replace import LinkRebaser
from calibre.ebooks.oeb.polish.toc import node_from_loc
from polyglot.builtins import iteritems, string_or_bytes
from polyglot.urllib import urlparse
class AbortError(ValueError):
pass
def in_table(node):
while node is not None:
if node.tag.endswith('}table'):
return True
node = node.getparent()
return False
def adjust_split_point(split_point, log):
'''
Move the split point up its ancestor chain if it has no content
before it. This handles the common case:
<div id="chapter1"><h2>Chapter 1</h2>...</div> with a page break on the
h2.
'''
sp = split_point
while True:
parent = sp.getparent()
if (
parent is None or
barename(parent.tag) in {'body', 'html'} or
(parent.text and parent.text.strip()) or
parent.index(sp) > 0
):
break
sp = parent
if sp is not split_point:
log.debug('Adjusted split point to ancestor')
return sp
def get_body(root):
return root.find('h:body', namespaces=XPNSMAP)
def do_split(split_point, log, before=True):
'''
Split tree into a *before* and an *after* tree at ``split_point``.
:param split_point: The Element at which to split
:param before: If True tree is split before split_point, otherwise after split_point
:return: before_tree, after_tree
'''
if before:
# We cannot adjust for after since moving an after split point to a
# parent will cause breakage if the parent contains any content
# after the original split point
split_point = adjust_split_point(split_point, log)
tree = split_point.getroottree()
path = tree.getpath(split_point)
tree, tree2 = copy.deepcopy(tree), copy.deepcopy(tree)
root, root2 = tree.getroot(), tree2.getroot()
body, body2 = map(get_body, (root, root2))
split_point = root.xpath(path)[0]
split_point2 = root2.xpath(path)[0]
def nix_element(elem, top=True):
# Remove elem unless top is False in which case replace elem by its
# children
parent = elem.getparent()
if top:
parent.remove(elem)
else:
index = parent.index(elem)
parent[index:index+1] = list(elem.iterchildren())
# Tree 1
hit_split_point = False
keep_descendants = False
split_point_descendants = frozenset(split_point.iterdescendants())
for elem in tuple(body.iterdescendants()):
if elem is split_point:
hit_split_point = True
if before:
nix_element(elem)
else:
# We want to keep the descendants of the split point in
# Tree 1
keep_descendants = True
# We want the split point element, but not its tail
elem.tail = '\n'
continue
if hit_split_point:
if keep_descendants:
if elem in split_point_descendants:
# elem is a descendant keep it
continue
else:
# We are out of split_point, so prevent further set
# lookups of split_point_descendants
keep_descendants = False
nix_element(elem)
# Tree 2
ancestors = frozenset(XPath('ancestor::*')(split_point2))
for elem in tuple(body2.iterdescendants()):
if elem is split_point2:
if not before:
# Keep the split point element's tail, if it contains non-whitespace
# text
tail = elem.tail
if tail and not tail.isspace():
parent = elem.getparent()
idx = parent.index(elem)
if idx == 0:
parent.text = (parent.text or '') + tail
else:
sib = parent[idx-1]
sib.tail = (sib.tail or '') + tail
# Remove the element itself
nix_element(elem)
break
if elem in ancestors:
# We have to preserve the ancestors as they could have CSS
# styles that are inherited/applicable, like font or
# width. So we only remove the text, if any.
elem.text = '\n'
else:
nix_element(elem, top=False)
body2.text = '\n'
return tree, tree2
class SplitLinkReplacer:
def __init__(self, base, bottom_anchors, top_name, bottom_name, container):
self.bottom_anchors, self.bottom_name = bottom_anchors, bottom_name
self.container, self.top_name = container, top_name
self.base = base
self.replaced = False
def __call__(self, url):
if url and url.startswith('#'):
return url
name = self.container.href_to_name(url, self.base)
if name != self.top_name:
return url
purl = urlparse(url)
if purl.fragment and purl.fragment in self.bottom_anchors:
url = self.container.name_to_href(self.bottom_name, self.base) + '#' + purl.fragment
self.replaced = True
return url
def split(container, name, loc_or_xpath, before=True, totals=None):
'''
Split the file specified by name at the position specified by loc_or_xpath.
Splitting automatically migrates all links and references to the affected
files.
:param loc_or_xpath: Should be an XPath expression such as
//h:div[@id="split_here"]. Can also be a *loc* which is used internally to
implement splitting in the preview panel.
:param before: If True the split occurs before the identified element otherwise after it.
:param totals: Used internally
'''
root = container.parsed(name)
if isinstance(loc_or_xpath, str):
split_point = root.xpath(loc_or_xpath)[0]
else:
try:
split_point = node_from_loc(root, loc_or_xpath, totals=totals)
except MalformedMarkup:
# The webkit HTML parser and the container parser have yielded
# different node counts, this can happen if the file is valid XML
# but contains constructs like nested <p> tags. So force parse it
# with the HTML 5 parser and try again.
raw = container.raw_data(name)
root = container.parse_xhtml(raw, fname=name, force_html5_parse=True)
try:
split_point = node_from_loc(root, loc_or_xpath, totals=totals)
except MalformedMarkup:
raise MalformedMarkup(_('The file %s has malformed markup. Try running the Fix HTML tool'
' before splitting') % name)
container.replace(name, root)
if in_table(split_point):
raise AbortError('Cannot split inside tables')
if split_point.tag.endswith('}body'):
raise AbortError('Cannot split on the <body> tag')
tree1, tree2 = do_split(split_point, container.log, before=before)
root1, root2 = tree1.getroot(), tree2.getroot()
anchors_in_top = frozenset(root1.xpath('//*/@id')) | frozenset(root1.xpath('//*/@name')) | {''}
anchors_in_bottom = frozenset(root2.xpath('//*/@id')) | frozenset(root2.xpath('//*/@name'))
base, ext = name.rpartition('.')[0::2]
base = re.sub(r'_split\d+$', '', base)
nname, s = None, 0
while not nname or container.exists(nname):
s += 1
nname = '%s_split%d.%s' % (base, s, ext)
manifest_item = container.generate_item(nname, media_type=container.mime_map[name])
bottom_name = container.href_to_name(manifest_item.get('href'), container.opf_name)
# Fix links in the split trees
for r in (root1, root2):
for a in r.xpath('//*[@href]'):
url = a.get('href')
if url.startswith('#'):
fname = name
else:
fname = container.href_to_name(url, name)
if fname == name:
purl = urlparse(url)
if purl.fragment in anchors_in_top:
if r is root2:
a.set('href', f'{container.name_to_href(name, bottom_name)}#{purl.fragment}')
else:
a.set('href', '#' + purl.fragment)
elif purl.fragment in anchors_in_bottom:
if r is root1:
a.set('href', f'{container.name_to_href(bottom_name, name)}#{purl.fragment}')
else:
a.set('href', '#' + purl.fragment)
# Fix all links in the container that point to anchors in the bottom tree
for fname, media_type in iteritems(container.mime_map):
if fname not in {name, bottom_name}:
repl = SplitLinkReplacer(fname, anchors_in_bottom, name, bottom_name, container)
container.replace_links(fname, repl)
container.replace(name, root1)
container.replace(bottom_name, root2)
spine = container.opf_xpath('//opf:spine')[0]
for spine_item, spine_name, linear in container.spine_iter:
if spine_name == name:
break
index = spine.index(spine_item) + 1
si = spine.makeelement(OPF('itemref'), idref=manifest_item.get('id'))
if not linear:
si.set('linear', 'no')
container.insert_into_xml(spine, si, index=index)
container.dirty(container.opf_name)
return bottom_name
def multisplit(container, name, xpath, before=True):
'''
Split the specified file at multiple locations (all tags that match the specified XPath expression). See also: :func:`split`.
Splitting automatically migrates all links and references to the affected
files.
:param before: If True the splits occur before the identified element otherwise after it.
'''
root = container.parsed(name)
nodes = root.xpath(xpath, namespaces=XPNSMAP)
if not nodes:
raise AbortError(_('The expression %s did not match any nodes') % xpath)
for split_point in nodes:
if in_table(split_point):
raise AbortError('Cannot split inside tables')
if split_point.tag.endswith('}body'):
raise AbortError('Cannot split on the <body> tag')
for i, tag in enumerate(nodes):
tag.set('calibre-split-point', str(i))
current = name
all_names = [name]
for i in range(len(nodes)):
current = split(container, current, '//*[@calibre-split-point="%d"]' % i, before=before)
all_names.append(current)
for x in all_names:
for tag in container.parsed(x).xpath('//*[@calibre-split-point]'):
tag.attrib.pop('calibre-split-point')
container.dirty(x)
return all_names[1:]
class MergeLinkReplacer:
def __init__(self, base, anchor_map, master, container):
self.container, self.anchor_map = container, anchor_map
self.master = master
self.base = base
self.replaced = False
def __call__(self, url):
if url and url.startswith('#'):
return url
name = self.container.href_to_name(url, self.base)
amap = self.anchor_map.get(name, None)
if amap is None:
return url
purl = urlparse(url)
frag = purl.fragment or ''
frag = amap.get(frag, frag)
url = self.container.name_to_href(self.master, self.base) + '#' + frag
self.replaced = True
return url
def add_text(body, text):
if len(body) > 0:
body[-1].tail = (body[-1].tail or '') + text
else:
body.text = (body.text or '') + text
def all_anchors(root):
return set(root.xpath('//*/@id')) | set(root.xpath('//*/@name'))
def all_stylesheets(container, name):
for link in XPath('//h:head/h:link[@href]')(container.parsed(name)):
name = container.href_to_name(link.get('href'), name)
typ = link.get('type', 'text/css')
if typ == 'text/css':
yield name
def unique_anchor(seen_anchors, current):
c = 0
ans = current
while ans in seen_anchors:
c += 1
ans = '%s_%d' % (current, c)
return ans
def remove_name_attributes(root):
# Remove all name attributes, replacing them with id attributes
for elem in root.xpath('//*[@id and @name]'):
del elem.attrib['name']
for elem in root.xpath('//*[@name]'):
elem.set('id', elem.attrib.pop('name'))
def merge_html(container, names, master, insert_page_breaks=False):
p = container.parsed
root = p(master)
# Ensure master has a <head>
head = root.find('h:head', namespaces=XPNSMAP)
if head is None:
head = root.makeelement(XHTML('head'))
container.insert_into_xml(root, head, 0)
seen_anchors = all_anchors(root)
seen_stylesheets = set(all_stylesheets(container, master))
master_body = p(master).findall('h:body', namespaces=XPNSMAP)[-1]
master_base = os.path.dirname(master)
anchor_map = {n:{} for n in names if n != master}
first_anchor_map = {}
for name in names:
if name == master:
continue
# Insert new stylesheets into master
for sheet in all_stylesheets(container, name):
if sheet not in seen_stylesheets:
seen_stylesheets.add(sheet)
link = head.makeelement(XHTML('link'), rel='stylesheet', type='text/css', href=container.name_to_href(sheet, master))
container.insert_into_xml(head, link)
# Rebase links if master is in a different directory
if os.path.dirname(name) != master_base:
container.replace_links(name, LinkRebaser(container, name, master))
root = p(name)
children = []
for body in p(name).findall('h:body', namespaces=XPNSMAP):
children.append(body.text if body.text and body.text.strip() else '\n\n')
children.extend(body)
first_child = ''
for first_child in children:
if not isinstance(first_child, string_or_bytes):
break
if isinstance(first_child, string_or_bytes):
# body contained only text, no tags
first_child = body.makeelement(XHTML('p'))
first_child.text, children[0] = children[0], first_child
amap = anchor_map[name]
remove_name_attributes(root)
for elem in root.xpath('//*[@id]'):
val = elem.get('id')
if not val:
continue
if val in seen_anchors:
nval = unique_anchor(seen_anchors, val)
elem.set('id', nval)
amap[val] = nval
else:
seen_anchors.add(val)
if 'id' not in first_child.attrib:
first_child.set('id', unique_anchor(seen_anchors, 'top'))
seen_anchors.add(first_child.get('id'))
first_anchor_map[name] = first_child.get('id')
if insert_page_breaks:
first_child.set('style', first_child.get('style', '') + '; page-break-before: always')
amap[''] = first_child.get('id')
# Fix links that point to local changed anchors
for a in XPath('//h:a[starts-with(@href, "#")]')(root):
q = a.get('href')[1:]
if q in amap:
a.set('href', '#' + amap[q])
for child in children:
if isinstance(child, string_or_bytes):
add_text(master_body, child)
else:
master_body.append(copy.deepcopy(child))
container.remove_item(name, remove_from_guide=False)
# Fix all links in the container that point to merged files
for fname, media_type in iteritems(container.mime_map):
repl = MergeLinkReplacer(fname, anchor_map, master, container)
container.replace_links(fname, repl)
return first_anchor_map
def merge_css(container, names, master):
p = container.parsed
msheet = p(master)
master_base = os.path.dirname(master)
merged = set()
for name in names:
if name == master:
continue
# Rebase links if master is in a different directory
if os.path.dirname(name) != master_base:
container.replace_links(name, LinkRebaser(container, name, master))
sheet = p(name)
# Remove charset rules
cr = [r for r in sheet.cssRules if r.type == r.CHARSET_RULE]
[sheet.deleteRule(sheet.cssRules.index(r)) for r in cr]
for rule in sheet.cssRules:
msheet.add(rule)
container.remove_item(name)
merged.add(name)
# Remove links to merged stylesheets in the html files, replacing with a
# link to the master sheet
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
removed = False
root = p(name)
for link in XPath('//h:link[@href]')(root):
q = container.href_to_name(link.get('href'), name)
if q in merged:
container.remove_from_xml(link)
removed = True
if removed:
container.dirty(name)
if removed and master not in set(all_stylesheets(container, name)):
head = root.find('h:head', namespaces=XPNSMAP)
if head is not None:
link = head.makeelement(XHTML('link'), type='text/css', rel='stylesheet', href=container.name_to_href(master, name))
container.insert_into_xml(head, link)
def merge(container, category, names, master):
'''
Merge the specified files into a single file, automatically migrating all
links and references to the affected files. The file must all either be HTML or CSS files.
:param category: Must be either ``'text'`` for HTML files or ``'styles'`` for CSS files
:param names: The list of files to be merged
:param master: Which of the merged files is the *master* file, that is, the file that will remain after merging.
'''
if category not in {'text', 'styles'}:
raise AbortError('Cannot merge files of type: %s' % category)
if len(names) < 2:
raise AbortError('Must specify at least two files to be merged')
if master not in names:
raise AbortError('The master file (%s) must be one of the files being merged' % master)
if category == 'text':
merge_html(container, names, master)
elif category == 'styles':
merge_css(container, names, master)
container.dirty(master)
| 18,872 | Python | .py | 429 | 34.421911 | 136 | 0.606211 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,370 | errors.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/errors.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks import DRMError as _DRMError
class InvalidBook(ValueError):
pass
class DRMError(_DRMError):
def __init__(self):
super().__init__(_('This file is locked with DRM. It cannot be edited.'))
class MalformedMarkup(ValueError):
pass
class UnsupportedContainerType(Exception):
pass
| 477 | Python | .py | 14 | 30.5 | 81 | 0.718404 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,371 | download.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/download.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import mimetypes
import os
import posixpath
import re
import shutil
from collections import defaultdict
from contextlib import closing
from functools import partial
from io import BytesIO
from multiprocessing.dummy import Pool
from tempfile import NamedTemporaryFile
from calibre import as_unicode, browser
from calibre import sanitize_file_name as sanitize_file_name_base
from calibre.constants import iswindows
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, barename, iterlinks
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.ptempfile import TemporaryDirectory
from calibre.web import get_download_filename_from_response
from polyglot.binary import from_base64_bytes
from polyglot.builtins import iteritems
from polyglot.urllib import unquote, urlparse
def is_external(url):
try:
purl = urlparse(url)
except Exception:
return False
return purl.scheme in ('http', 'https', 'file', 'ftp', 'data')
def iterhtmllinks(container, name):
for el, attr, link, pos in iterlinks(container.parsed(name)):
tag = barename(el.tag).lower()
if tag != 'a' and is_external(link):
yield el, attr, link
def get_external_resources(container):
ans = defaultdict(list)
for name, media_type in iteritems(container.mime_map):
if container.has_name(name) and container.exists(name):
if media_type in OEB_DOCS:
for el, attr, link in iterhtmllinks(container, name):
ans[link].append(name)
elif media_type in OEB_STYLES:
for link in container.iterlinks(name, get_line_numbers=False):
if is_external(link):
ans[link].append(name)
return dict(ans)
def get_filename(original_url_parsed, response):
ans = get_download_filename_from_response(response) or posixpath.basename(original_url_parsed.path) or 'unknown'
headers = response.info()
try:
ct = headers.get_params()[0][0].lower()
except Exception:
ct = ''
if ct:
mt = guess_type(ans)
if mt != ct:
exts = mimetypes.guess_all_extensions(ct)
if exts:
ans += exts[0]
return ans
def get_content_length(response):
cl = response.info().get('Content-Length')
try:
return int(cl)
except Exception:
return -1
class ProgressTracker:
def __init__(self, fobj, url, sz, progress_report):
self.fobj = fobj
self.progress_report = progress_report
self.url, self.sz = url, sz
self.close, self.flush, self.name = fobj.close, fobj.flush, fobj.name
def write(self, x):
ret = self.fobj.write(x)
try:
self.progress_report(self.url, self.fobj.tell(), self.sz)
except Exception:
pass
return ret
def sanitize_file_name(x):
from calibre.ebooks.oeb.polish.check.parsing import make_filename_safe
x = sanitize_file_name_base(x)
while '..' in x:
x = x.replace('..', '.')
return make_filename_safe(x)
def download_one(tdir, timeout, progress_report, data_uri_map, url):
try:
purl = urlparse(url)
data_url_key = None
with NamedTemporaryFile(dir=tdir, delete=False) as df:
if purl.scheme == 'file':
path = unquote(purl.path)
if iswindows and path.startswith('/'):
path = path[1:]
src = open(path, 'rb')
filename = os.path.basename(path)
sz = (src.seek(0, os.SEEK_END), src.tell(), src.seek(0))[1]
elif purl.scheme == 'data':
prefix, payload = purl.path.split(',', 1)
parts = prefix.split(';')
if parts and parts[-1].lower() == 'base64':
payload = re.sub(r'\s+', '', payload)
payload = from_base64_bytes(payload)
else:
payload = payload.encode('utf-8')
seen_before = data_uri_map.get(payload)
if seen_before is not None:
return True, (url, filename, seen_before, guess_type(seen_before))
data_url_key = payload
src = BytesIO(payload)
sz = len(payload)
ext = 'unknown'
for x in parts:
if '=' not in x and '/' in x:
exts = mimetypes.guess_all_extensions(x)
if exts:
ext = exts[0]
break
filename = 'data-uri.' + ext
else:
src = browser().open(url, timeout=timeout)
filename = get_filename(purl, src)
sz = get_content_length(src)
progress_report(url, 0, sz)
dest = ProgressTracker(df, url, sz, progress_report)
with closing(src):
shutil.copyfileobj(src, dest)
if data_url_key is not None:
data_uri_map[data_url_key] = dest.name
filename = sanitize_file_name(filename)
mt = guess_type(filename)
if mt in OEB_DOCS:
raise ValueError(f'The external resource {url} looks like a HTML document ({filename})')
if not mt or mt == 'application/octet-stream' or '.' not in filename:
raise ValueError(f'The external resource {url} is not of a known type')
return True, (url, filename, dest.name, mt)
except Exception as err:
return False, (url, as_unicode(err))
def download_external_resources(container, urls, timeout=60, progress_report=lambda url, done, total: None):
failures = {}
replacements = {}
data_uri_map = {}
with TemporaryDirectory('editor-download') as tdir:
pool = Pool(10)
with closing(pool):
for ok, result in pool.imap_unordered(partial(download_one, tdir, timeout, progress_report, data_uri_map), urls):
if ok:
url, suggested_filename, downloaded_file, mt = result
with open(downloaded_file, 'rb') as src:
name = container.add_file(suggested_filename, src, mt, modify_name_if_needed=True)
replacements[url] = name
else:
url, err = result
failures[url] = err
return replacements, failures
def replacer(url_map):
def replace(url):
r = url_map.get(url)
replace.replaced |= r != url
return url if r is None else r
replace.replaced = False
return replace
def replace_resources(container, urls, replacements):
url_maps = defaultdict(dict)
changed = False
for url, names in iteritems(urls):
replacement = replacements.get(url)
if replacement is not None:
for name in names:
url_maps[name][url] = container.name_to_href(replacement, name)
for name, url_map in iteritems(url_maps):
r = replacer(url_map)
container.replace_links(name, r)
changed |= r.replaced
return changed
| 7,272 | Python | .py | 175 | 31.462857 | 125 | 0.6 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,372 | import_book.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/import_book.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import sys
from calibre.ebooks.conversion.plumber import Plumber
from calibre.ebooks.epub import initialize_container
from calibre.ebooks.oeb.polish.container import OEB_DOCS, OEB_STYLES, Container
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.logging import default_log
from polyglot.builtins import iteritems
IMPORTABLE = {'htm', 'xhtml', 'html', 'xhtm', 'docx'}
def auto_fill_manifest(container):
manifest_id_map = container.manifest_id_map
manifest_name_map = {v:k for k, v in iteritems(manifest_id_map)}
for name, mt in iteritems(container.mime_map):
if name not in manifest_name_map and not container.ok_to_be_unmanifested(name):
mitem = container.generate_item(name, unique_href=False)
gname = container.href_to_name(mitem.get('href'), container.opf_name)
if gname != name:
raise ValueError('This should never happen (gname={!r}, name={!r}, href={!r})'.format(gname, name, mitem.get('href')))
manifest_name_map[name] = mitem.get('id')
manifest_id_map[mitem.get('id')] = name
def import_book_as_epub(srcpath, destpath, log=default_log):
if not destpath.lower().endswith('.epub'):
raise ValueError('Can only import books into the EPUB format, not %s' % (os.path.basename(destpath)))
with TemporaryDirectory('eei') as tdir:
tdir = os.path.abspath(os.path.realpath(tdir)) # Needed to handle the multiple levels of symlinks for /tmp on OS X
plumber = Plumber(srcpath, tdir, log)
plumber.setup_options()
if srcpath.lower().endswith('.opf'):
plumber.opts.dont_package = True
if hasattr(plumber.opts, 'no_process'):
plumber.opts.no_process = True
plumber.input_plugin.for_viewer = True
with plumber.input_plugin, open(plumber.input, 'rb') as inf:
pathtoopf = plumber.input_plugin(inf, plumber.opts, plumber.input_fmt, log, {}, tdir)
if hasattr(pathtoopf, 'manifest'):
from calibre.ebooks.oeb.iterator.book import write_oebbook
pathtoopf = write_oebbook(pathtoopf, tdir)
c = Container(tdir, pathtoopf, log)
auto_fill_manifest(c)
# Auto fix all HTML/CSS
for name, mt in iteritems(c.mime_map):
if mt in set(OEB_DOCS) | set(OEB_STYLES):
c.parsed(name)
c.dirty(name)
c.commit()
zf = initialize_container(destpath, opf_name=c.opf_name)
with zf:
for name in c.name_path_map:
zf.writestr(name, c.raw_data(name, decode=False))
if __name__ == '__main__':
import_book_as_epub(sys.argv[-2], sys.argv[-1])
| 2,814 | Python | .py | 54 | 44.037037 | 134 | 0.659141 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,373 | spell.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/spell.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from collections import Counter, defaultdict
from calibre import replace_entities
from calibre.ebooks.oeb.base import barename
from calibre.ebooks.oeb.polish.container import OPF_NAMESPACES, get_container
from calibre.ebooks.oeb.polish.parsing import parse
from calibre.ebooks.oeb.polish.toc import find_existing_nav_toc, find_existing_ncx_toc
from calibre.spell.break_iterator import index_of, split_into_words
from calibre.spell.dictionary import parse_lang_code
from calibre.utils.icu import ord_string
from polyglot.builtins import iteritems
_patterns = None
class Patterns:
__slots__ = ('sanitize_invisible_pat', 'split_pat', 'digit_pat', 'fr_elision_pat')
def __init__(self):
import regex
# Remove soft hyphens/zero width spaces/control codes
self.sanitize_invisible_pat = regex.compile(
r'[\u00ad\u200b\u200c\u200d\ufeff\0-\x08\x0b\x0c\x0e-\x1f\x7f]', regex.VERSION1 | regex.UNICODE)
self.split_pat = regex.compile(
r'\W+', flags=regex.VERSION1 | regex.WORD | regex.FULLCASE | regex.UNICODE)
self.digit_pat = regex.compile(
r'^\d+$', flags=regex.VERSION1 | regex.WORD | regex.UNICODE)
# French words with prefixes are reduced to the stem word, so that the
# words appear only once in the word list
self.fr_elision_pat = regex.compile(
"^(?:l|d|m|t|s|j|c|ç|lorsqu|puisqu|quoiqu|qu)['’]", flags=regex.UNICODE | regex.VERSION1 | regex.IGNORECASE)
def patterns():
global _patterns
if _patterns is None:
_patterns = Patterns()
return _patterns
class CharCounter:
def __init__(self):
self.counter = Counter()
self.chars = defaultdict(set)
self.update = self.counter.update
class Location:
__slots__ = ('file_name', 'sourceline', 'original_word', 'location_node', 'node_item', 'elided_prefix')
def __init__(self, file_name=None, elided_prefix='', original_word=None, location_node=None, node_item=(None, None)):
self.file_name, self.elided_prefix, self.original_word = file_name, elided_prefix, original_word
self.location_node, self.node_item, self.sourceline = location_node, node_item, location_node.sourceline
def __repr__(self):
return f'{self.original_word} @ {self.file_name}:{self.sourceline}'
__str__ = __repr__
def replace(self, new_word):
self.original_word = self.elided_prefix + new_word
file_word_count = 0
def filter_words(word):
if not word:
return False
p = patterns()
if p.digit_pat.match(word) is not None:
return False
return True
def get_words(text, lang):
global file_word_count
try:
ans = split_into_words(str(text), lang)
except (TypeError, ValueError):
return ()
file_word_count += len(ans)
return list(filter(filter_words, ans))
def add_words(text, node, words, file_name, locale, node_item):
candidates = get_words(text, locale.langcode)
if candidates:
p = patterns()
is_fr = locale.langcode == 'fra'
for word in candidates:
sword = p.sanitize_invisible_pat.sub('', word).strip()
elided_prefix = ''
if is_fr:
m = p.fr_elision_pat.match(sword)
if m is not None and len(sword) > len(elided_prefix):
elided_prefix = m.group()
sword = sword[len(elided_prefix):]
loc = Location(file_name, elided_prefix, word, node, node_item)
words[(sword, locale)].append(loc)
words[None] += 1
def add_chars(text, counter, file_name):
if text:
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
counts = Counter(ord_string(text))
counter.update(counts)
for codepoint in counts:
counter.chars[codepoint].add(file_name)
def add_words_from_attr(node, attr, words, file_name, locale):
text = node.get(attr, None)
if text:
add_words(text, node, words, file_name, locale, (True, attr))
def count_chars_in_attr(node, attr, counter, file_name, locale):
text = node.get(attr, None)
if text:
add_chars(text, counter, file_name)
def add_words_from_text(node, attr, words, file_name, locale):
add_words(getattr(node, attr), node, words, file_name, locale, (False, attr))
def count_chars_in_text(node, attr, counter, file_name, locale):
add_chars(getattr(node, attr), counter, file_name)
def add_words_from_escaped_html(text, words, file_name, node, attr, locale):
text = replace_entities(text)
root = parse('<html><body><div>%s</div></body></html>' % text, decoder=lambda x:x.decode('utf-8'))
ewords = defaultdict(list)
ewords[None] = 0
read_words_from_html(root, ewords, file_name, locale)
words[None] += ewords.pop(None)
for k, locs in iteritems(ewords):
for loc in locs:
loc.location_node, loc.node_item = node, (False, attr)
words[k].extend(locs)
def count_chars_in_escaped_html(text, counter, file_name, node, attr, locale):
text = replace_entities(text)
root = parse('<html><body><div>%s</div></body></html>' % text, decoder=lambda x:x.decode('utf-8'))
count_chars_in_html(root, counter, file_name, locale)
_opf_file_as = '{%s}file-as' % OPF_NAMESPACES['opf']
opf_spell_tags = {'title', 'creator', 'subject', 'description', 'publisher'}
# We can only use barename() for tag names and simple attribute checks so that
# this code matches up with the syntax highlighter base spell checking
def read_words_from_opf(root, words, file_name, book_locale):
for tag in root.iterdescendants('*'):
if barename(tag.tag) in opf_spell_tags:
if barename(tag.tag) == 'description':
if tag.text:
add_words_from_escaped_html(tag.text, words, file_name, tag, 'text', book_locale)
for child in tag:
if child.tail:
add_words_from_escaped_html(child.tail, words, file_name, child, 'tail', book_locale)
else:
if tag.text:
add_words_from_text(tag, 'text', words, file_name, book_locale)
for child in tag:
if child.tail:
add_words_from_text(child, 'tail', words, file_name, book_locale)
add_words_from_attr(tag, _opf_file_as, words, file_name, book_locale)
def count_chars_in_opf(root, counter, file_name, book_locale):
for tag in root.iterdescendants('*'):
if barename(tag.tag) in opf_spell_tags:
if barename(tag.tag) == 'description':
if tag.text:
count_chars_in_escaped_html(tag.text, counter, file_name, tag, 'text', book_locale)
for child in tag:
if child.tail:
count_chars_in_escaped_html(child.tail, counter, file_name, tag, 'tail', book_locale)
else:
if tag.text:
count_chars_in_text(tag, 'text', counter, file_name, book_locale)
for child in tag:
if child.tail:
count_chars_in_text(tag, 'tail', counter, file_name, book_locale)
count_chars_in_attr(tag, _opf_file_as, counter, file_name, book_locale)
ncx_spell_tags = {'text'}
xml_spell_tags = opf_spell_tags | ncx_spell_tags
def read_words_from_ncx(root, words, file_name, book_locale):
for tag in root.xpath('//*[local-name()="text"]'):
if tag.text is not None:
add_words_from_text(tag, 'text', words, file_name, book_locale)
def count_chars_in_ncx(root, counter, file_name, book_locale):
for tag in root.xpath('//*[local-name()="text"]'):
if tag.text is not None:
count_chars_in_text(tag, 'text', counter, file_name, book_locale)
html_spell_tags = {'script', 'style', 'link'}
def read_words_from_html_tag(tag, words, file_name, parent_locale, locale):
if tag.text is not None and isinstance(tag.tag, str) and barename(tag.tag) not in html_spell_tags:
add_words_from_text(tag, 'text', words, file_name, locale)
for attr in {'alt', 'title'}:
add_words_from_attr(tag, attr, words, file_name, locale)
if tag.tail is not None and tag.getparent() is not None and barename(tag.getparent().tag) not in html_spell_tags:
add_words_from_text(tag, 'tail', words, file_name, parent_locale)
def count_chars_in_html_tag(tag, counter, file_name, parent_locale, locale):
if tag.text is not None and isinstance(tag.tag, str) and barename(tag.tag) not in html_spell_tags:
count_chars_in_text(tag, 'text', counter, file_name, locale)
for attr in {'alt', 'title'}:
count_chars_in_attr(tag, attr, counter, file_name, locale)
if tag.tail is not None and tag.getparent() is not None and barename(tag.getparent().tag) not in html_spell_tags:
count_chars_in_text(tag, 'tail', counter, file_name, parent_locale)
def locale_from_tag(tag):
a = tag.attrib
if 'lang' in a:
try:
loc = parse_lang_code(tag.get('lang'))
except ValueError:
loc = None
if loc is not None:
return loc
if '{http://www.w3.org/XML/1998/namespace}lang' in a:
try:
loc = parse_lang_code(tag.get('{http://www.w3.org/XML/1998/namespace}lang'))
except ValueError:
loc = None
if loc is not None:
return loc
def read_words_from_html(root, words, file_name, book_locale):
stack = [(root, book_locale)]
while stack:
parent, parent_locale = stack.pop()
locale = locale_from_tag(parent) or parent_locale
read_words_from_html_tag(parent, words, file_name, parent_locale, locale)
stack.extend((tag, locale) for tag in parent)
def count_chars_in_html(root, counter, file_name, book_locale):
stack = [(root, book_locale)]
while stack:
parent, parent_locale = stack.pop()
locale = locale_from_tag(parent) or parent_locale
count_chars_in_html_tag(parent, counter, file_name, parent_locale, locale)
stack.extend((tag, locale) for tag in parent)
def group_sort(locations):
order = {}
for loc in locations:
if loc.file_name not in order:
order[loc.file_name] = len(order)
return sorted(locations, key=lambda l:(order[l.file_name], l.sourceline or 0))
def get_checkable_file_names(container):
file_names = [name for name, linear in container.spine_names] + [container.opf_name]
ncx_toc = find_existing_ncx_toc(container)
if ncx_toc is not None and container.exists(ncx_toc) and ncx_toc not in file_names:
file_names.append(ncx_toc)
else:
ncx_toc = None
toc = find_existing_nav_toc(container)
if toc is not None and container.exists(toc) and toc not in file_names:
file_names.append(toc)
return file_names, ncx_toc
def root_is_excluded_from_spell_check(root):
for child in root:
q = (getattr(child, 'text', '') or '').strip().lower()
if q == 'calibre-no-spell-check':
return True
return False
def get_all_words(container, book_locale, get_word_count=False, excluded_files=(), file_words_counts=None):
global file_word_count
if file_words_counts is None:
file_words_counts = {}
words = defaultdict(list)
words[None] = 0
file_names, ncx_toc = get_checkable_file_names(container)
for file_name in file_names:
if not container.exists(file_name) or file_name in excluded_files:
continue
root = container.parsed(file_name)
if root_is_excluded_from_spell_check(root):
continue
file_word_count = 0
if file_name == container.opf_name:
read_words_from_opf(root, words, file_name, book_locale)
elif file_name == ncx_toc:
read_words_from_ncx(root, words, file_name, book_locale)
elif hasattr(root, 'xpath'):
read_words_from_html(root, words, file_name, book_locale)
file_words_counts[file_name] = file_word_count
file_word_count = 0
count = words.pop(None)
ans = {k:group_sort(v) for k, v in iteritems(words)}
if get_word_count:
return count, ans
return ans
def count_all_chars(container, book_locale):
ans = CharCounter()
file_names, ncx_toc = get_checkable_file_names(container)
for file_name in file_names:
if not container.exists(file_name):
continue
root = container.parsed(file_name)
if file_name == container.opf_name:
count_chars_in_opf(root, ans, file_name, book_locale)
elif file_name == ncx_toc:
count_chars_in_ncx(root, ans, file_name, book_locale)
elif hasattr(root, 'xpath'):
count_chars_in_html(root, ans, file_name, book_locale)
return ans
def merge_locations(locs1, locs2):
return group_sort(locs1 + locs2)
def replace(text, original_word, new_word, lang):
indices = []
original_word, new_word, text = str(original_word), str(new_word), str(text)
q = text
offset = 0
while True:
idx = index_of(original_word, q, lang=lang)
if idx == -1:
break
indices.append(offset + idx)
offset += idx + len(original_word)
q = text[offset:]
for idx in reversed(indices):
text = text[:idx] + new_word + text[idx+len(original_word):]
return text, bool(indices)
def replace_word(container, new_word, locations, locale, undo_cache=None):
changed = set()
for loc in locations:
node = loc.location_node
is_attr, attr = loc.node_item
if is_attr:
text = node.get(attr)
else:
text = getattr(node, attr)
replacement = loc.elided_prefix + new_word
rtext, replaced = replace(text, loc.original_word, replacement, locale.langcode)
if replaced:
if undo_cache is not None:
undo_cache[(loc.file_name, node, is_attr, attr)] = text
if is_attr:
node.set(attr, rtext)
else:
setattr(node, attr, rtext)
container.replace(loc.file_name, node.getroottree().getroot())
changed.add(loc.file_name)
return changed
def undo_replace_word(container, undo_cache):
changed = set()
for (file_name, node, is_attr, attr), text in iteritems(undo_cache):
node.set(attr, text) if is_attr else setattr(node, attr, text)
container.replace(file_name, node.getroottree().getroot())
changed.add(file_name)
return changed
if __name__ == '__main__':
import pprint
from calibre.gui2.tweak_book import dictionaries, set_book_locale
container = get_container(sys.argv[-1], tweak_mode=True)
set_book_locale(container.mi.language)
pprint.pprint(get_all_words(container, dictionaries.default_locale))
| 15,116 | Python | .py | 323 | 38.718266 | 121 | 0.639301 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,374 | images.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/images.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
import os
from functools import partial
from threading import Event, Thread
from calibre import detect_ncpus, filesystem_encoding, force_unicode, human_readable
from polyglot.builtins import iteritems
from polyglot.queue import Empty, Queue
class Worker(Thread):
daemon = True
def __init__(self, abort, name, queue, results, jpeg_quality, webp_quality, progress_callback):
Thread.__init__(self, name=name)
self.queue, self.results = queue, results
self.progress_callback = progress_callback
self.jpeg_quality = jpeg_quality
self.webp_quality = webp_quality
self.abort = abort
self.start()
def run(self):
while not self.abort.is_set():
try:
name, path, mt = self.queue.get_nowait()
except Empty:
break
try:
self.compress(name, path, mt)
except Exception:
import traceback
self.results[name] = (False, traceback.format_exc())
finally:
try:
self.progress_callback(name)
except Exception:
import traceback
traceback.print_exc()
self.queue.task_done()
def compress(self, name, path, mime_type):
from calibre.utils.img import encode_jpeg, encode_webp, optimize_jpeg, optimize_png, optimize_webp
if 'png' in mime_type:
func = optimize_png
elif 'webp' in mime_type:
if self.webp_quality is None:
func = optimize_webp
else:
func = partial(encode_webp, quality=self.jpeg_quality)
elif self.jpeg_quality is None:
func = optimize_jpeg
else:
func = partial(encode_jpeg, quality=self.jpeg_quality)
before = os.path.getsize(path)
with open(path, 'rb') as f:
old_data = f.read()
func(path)
after = os.path.getsize(path)
if after >= before:
with open(path, 'wb') as f:
f.write(old_data)
after = before
self.results[name] = (True, (before, after))
def get_compressible_images(container):
mt_map = container.manifest_type_map
images = set()
for mt in 'png jpg jpeg webp'.split():
images |= set(mt_map.get('image/' + mt, ()))
return images
def compress_images(container, report=None, names=None, jpeg_quality=None, webp_quality=None, progress_callback=lambda n, t, name:True):
images = get_compressible_images(container)
if names is not None:
images &= set(names)
results = {}
queue = Queue()
abort = Event()
seen = set()
num_to_process = 0
for name in sorted(images):
path = os.path.abspath(container.get_file_path_for_processing(name))
path_key = os.path.normcase(path)
if path_key not in seen:
num_to_process += 1
queue.put((name, path, container.mime_map[name]))
seen.add(path_key)
def pc(name):
keep_going = progress_callback(len(results), num_to_process, name)
if not keep_going:
abort.set()
progress_callback(0, num_to_process, '')
[Worker(abort, 'CompressImage%d' % i, queue, results, jpeg_quality, webp_quality, pc) for i in range(min(detect_ncpus(), num_to_process))]
queue.join()
before_total = after_total = 0
processed_num = 0
changed = False
for name, (ok, res) in iteritems(results):
name = force_unicode(name, filesystem_encoding)
if ok:
before, after = res
if before != after:
changed = True
processed_num += 1
before_total += before
after_total += after
if report:
if before != after:
report(_('{0} compressed from {1} to {2} bytes [{3:.1%} reduction]').format(
name, human_readable(before), human_readable(after), (before - after)/before))
else:
report(_('{0} could not be further compressed').format(name))
else:
report(_('Failed to process {0} with error:').format(name))
report(res)
if report:
if changed:
report('')
report(_('Total image filesize reduced from {0} to {1} [{2:.1%} reduction, {3} images changed]').format(
human_readable(before_total), human_readable(after_total), (before_total - after_total)/before_total, processed_num))
else:
report(_('Images are already fully optimized'))
return changed, results
| 4,784 | Python | .py | 117 | 30.74359 | 142 | 0.586933 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,375 | container.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/container.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2013, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import hashlib
import logging
import os
import re
import shutil
import sys
import unicodedata
import uuid
from collections import defaultdict
from io import BytesIO
from itertools import count
from css_parser import getUrls, replaceUrls
from calibre import CurrentDir, walk
from calibre.constants import iswindows
from calibre.customize.ui import plugin_for_input_format, plugin_for_output_format
from calibre.ebooks import escape_xpath_attr
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.conversion.plugins.epub_input import ADOBE_OBFUSCATION, IDPF_OBFUSCATION, decrypt_font_data
from calibre.ebooks.conversion.preprocess import CSSPreProcessor as cssp
from calibre.ebooks.conversion.preprocess import HTMLPreProcessor
from calibre.ebooks.metadata.opf3 import CALIBRE_PREFIX, ensure_prefix, items_with_property, read_prefixes
from calibre.ebooks.metadata.utils import parse_opf_version
from calibre.ebooks.mobi import MobiError
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.ebooks.oeb.base import (
DC11_NS,
OEB_DOCS,
OEB_STYLES,
OPF,
OPF2_NS,
Manifest,
itercsslinks,
iterlinks,
rewrite_links,
serialize,
urlquote,
urlunquote,
)
from calibre.ebooks.oeb.parse_utils import NotHTML, parse_html
from calibre.ebooks.oeb.polish.errors import DRMError, InvalidBook
from calibre.ebooks.oeb.polish.parsing import parse as parse_html_tweak
from calibre.ebooks.oeb.polish.utils import OEB_FONTS, CommentFinder, PositionFinder, adjust_mime_for_epub, guess_type, parse_css
from calibre.ptempfile import PersistentTemporaryDirectory, PersistentTemporaryFile
from calibre.utils.filenames import hardlink_file, nlinks_file, retry_on_fail
from calibre.utils.ipc.simple_worker import WorkerError, fork_job
from calibre.utils.logging import default_log
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import ZipFile
from polyglot.builtins import iteritems
from polyglot.urllib import urlparse
exists, join, relpath = os.path.exists, os.path.join, os.path.relpath
OPF_NAMESPACES = {'opf':OPF2_NS, 'dc':DC11_NS}
null = object()
OEB_FONTS # for plugin compat
class CSSPreProcessor(cssp):
def __call__(self, data):
return self.MS_PAT.sub(self.ms_sub, data)
def clone_dir(src, dest):
' Clone a folder using hard links for the files, dest must already exist '
for x in os.listdir(src):
dpath = os.path.join(dest, x)
spath = os.path.join(src, x)
if os.path.isdir(spath):
os.mkdir(dpath)
clone_dir(spath, dpath)
else:
try:
hardlink_file(spath, dpath)
except:
shutil.copy2(spath, dpath)
def clone_container(container, dest_dir):
' Efficiently clone a container using hard links '
dest_dir = os.path.abspath(os.path.realpath(dest_dir))
clone_data = container.clone_data(dest_dir)
cls = type(container)
if cls is Container:
return cls(None, None, container.log, clone_data=clone_data)
return cls(None, container.log, clone_data=clone_data)
def name_to_abspath(name, root):
return os.path.abspath(join(root, *name.split('/')))
def abspath_to_name(path, root):
return relpath(os.path.abspath(path), root).replace(os.sep, '/')
def name_to_href(name, root, base=None, quote=urlquote):
fullpath = name_to_abspath(name, root)
basepath = root if base is None else os.path.dirname(name_to_abspath(base, root))
path = relpath(fullpath, basepath).replace(os.sep, '/')
return quote(path)
def href_to_name(href, root, base=None):
base = root if base is None else os.path.dirname(name_to_abspath(base, root))
try:
purl = urlparse(href)
except ValueError:
return None
if purl.scheme or not purl.path:
return None
href = urlunquote(purl.path)
if iswindows and ':' in href:
# path manipulations on windows fail for paths with : in them, so we
# assume all such paths are invalid/absolute paths.
return None
fullpath = os.path.join(base, *href.split('/'))
try:
return unicodedata.normalize('NFC', abspath_to_name(fullpath, root))
except ValueError:
return None
def seconds_to_timestamp(duration: float) -> str:
seconds = int(duration)
float_part = int((duration - seconds) * 1000)
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
ans = f'{hours:02d}:{minutes:02d}:{seconds:02d}'
if float_part:
ans += f'.{float_part}'
return ans
class ContainerBase: # {{{
'''
A base class that implements just the parsing methods. Useful to create
virtual containers for testing.
'''
#: The mode used to parse HTML and CSS (polishing uses tweak_mode=False and the editor uses tweak_mode=True)
tweak_mode = False
def __init__(self, log):
self.log = log
self.parsed_cache = {}
self.mime_map = {}
self.encoding_map = {}
self.html_preprocessor = HTMLPreProcessor()
self.css_preprocessor = CSSPreProcessor()
def guess_type(self, name):
' Return the expected mimetype for the specified file name based on its extension. '
return adjust_mime_for_epub(filename=name, opf_version=self.opf_version_parsed)
def decode(self, data, normalize_to_nfc=True):
"""
Automatically decode ``data`` into a ``unicode`` object.
:param normalize_to_nfc: Normalize returned unicode to the NFC normal form as is required by both the EPUB and AZW3 formats.
"""
def fix_data(d):
return d.replace('\r\n', '\n').replace('\r', '\n')
if isinstance(data, str):
return fix_data(data)
bom_enc = None
if data[:4] in {b'\0\0\xfe\xff', b'\xff\xfe\0\0'}:
bom_enc = {b'\0\0\xfe\xff':'utf-32-be',
b'\xff\xfe\0\0':'utf-32-le'}[data[:4]]
data = data[4:]
elif data[:2] in {b'\xff\xfe', b'\xfe\xff'}:
bom_enc = {b'\xff\xfe':'utf-16-le', b'\xfe\xff':'utf-16-be'}[data[:2]]
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
bom_enc = 'utf-8'
data = data[3:]
if bom_enc is not None:
try:
self.used_encoding = bom_enc
return fix_data(data.decode(bom_enc))
except UnicodeDecodeError:
pass
try:
self.used_encoding = 'utf-8'
return fix_data(data.decode('utf-8'))
except UnicodeDecodeError:
pass
data, self.used_encoding = xml_to_unicode(data)
if normalize_to_nfc:
data = unicodedata.normalize('NFC', data)
return fix_data(data)
def parse_xml(self, data):
data, self.used_encoding = xml_to_unicode(
data, strip_encoding_pats=True, assume_utf8=True, resolve_entities=True)
data = unicodedata.normalize('NFC', data)
return safe_xml_fromstring(data)
def parse_xhtml(self, data, fname='<string>', force_html5_parse=False):
if self.tweak_mode:
return parse_html_tweak(data, log=self.log, decoder=self.decode, force_html5_parse=force_html5_parse)
else:
try:
return parse_html(
data, log=self.log, decoder=self.decode,
preprocessor=self.html_preprocessor, filename=fname,
non_html_file_tags={'ncx'})
except NotHTML:
return self.parse_xml(data)
def parse_css(self, data, fname='<string>', is_declaration=False):
return parse_css(data, fname=fname, is_declaration=is_declaration, decode=self.decode, log_level=logging.WARNING,
css_preprocessor=(None if self.tweak_mode else self.css_preprocessor))
# }}}
class Container(ContainerBase): # {{{
'''
A container represents an open e-book as a folder full of files and an
OPF file. There are two important concepts:
* The root folder. This is the base of the e-book. All the e-books
files are inside this folder or in its sub-folders.
* Names: These are paths to the books' files relative to the root
folder. They always contain POSIX separators and are unquoted. They
can be thought of as canonical identifiers for files in the book.
Most methods on the container object work with names. Names are always
in the NFC Unicode normal form.
* Clones: the container object supports efficient on-disk cloning, which is used to
implement checkpoints in the e-book editor. In order to make this work, you should
never access files on the filesystem directly. Instead, use :meth:`raw_data` or
:meth:`open` to read/write to component files in the book.
When converting between hrefs and names use the methods provided by this
class, they assume all hrefs are quoted.
'''
#: The type of book (epub for EPUB files and azw3 for AZW3 files)
book_type = 'oeb'
#: If this container represents an unzipped book (a directory)
is_dir = False
SUPPORTS_TITLEPAGES = True
SUPPORTS_FILENAMES = True
@property
def book_type_for_display(self):
return self.book_type.upper()
def __init__(self, rootpath, opfpath, log, clone_data=None):
ContainerBase.__init__(self, log)
self.root = clone_data['root'] if clone_data is not None else os.path.abspath(rootpath)
self.name_path_map = {}
self.dirtied = set()
self.pretty_print = set()
self.cloned = False
self.cache_names = ('parsed_cache', 'mime_map', 'name_path_map', 'encoding_map', 'dirtied', 'pretty_print')
self.href_to_name_cache = {}
if clone_data is not None:
self.cloned = True
for x in ('name_path_map', 'opf_name', 'mime_map', 'pretty_print', 'encoding_map', 'tweak_mode'):
setattr(self, x, clone_data[x])
self.opf_dir = os.path.dirname(self.name_path_map[self.opf_name])
return
# Map of relative paths with '/' separators from root of unzipped ePub
# to absolute paths on filesystem with os-specific separators
opfpath = os.path.abspath(os.path.realpath(opfpath))
all_opf_files = []
for dirpath, _dirnames, filenames in os.walk(self.root):
for f in filenames:
path = join(dirpath, f)
name = self.abspath_to_name(path)
self.name_path_map[name] = path
self.mime_map[name] = guess_type(path)
# Special case if we have stumbled onto the opf
if path == opfpath:
self.opf_name = name
self.opf_dir = os.path.dirname(path)
self.mime_map[name] = guess_type('a.opf')
if path.lower().endswith('.opf'):
all_opf_files.append((name, os.path.dirname(path)))
if not hasattr(self, 'opf_name') and all_opf_files:
self.opf_name, self.opf_dir = all_opf_files[0]
self.mime_map[self.opf_name] = guess_type('a.opf')
if not hasattr(self, 'opf_name'):
raise InvalidBook('Could not locate opf file: %r'%opfpath)
# Update mime map with data from the OPF
self.refresh_mime_map()
def refresh_mime_map(self):
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
href = item.get('href')
try:
name = self.href_to_name(href, self.opf_name)
except ValueError:
continue # special filenames such as CON on windows cause relpath to fail
mt = item.get('media-type')
if name in self.mime_map and name != self.opf_name and mt:
# some epubs include the opf in the manifest with an incorrect mime type
self.mime_map[name] = mt
def data_for_clone(self, dest_dir=None):
dest_dir = dest_dir or self.root
return {
'root': dest_dir,
'opf_name': self.opf_name,
'mime_map': self.mime_map.copy(),
'pretty_print': set(self.pretty_print),
'encoding_map': self.encoding_map.copy(),
'tweak_mode': self.tweak_mode,
'name_path_map': {
name:os.path.join(dest_dir, os.path.relpath(path, self.root))
for name, path in iteritems(self.name_path_map)}
}
def clone_data(self, dest_dir):
Container.commit(self, keep_parsed=False)
self.cloned = True
clone_dir(self.root, dest_dir)
return self.data_for_clone(dest_dir)
def add_name_to_manifest(self, name, process_manifest_item=None):
' Add an entry to the manifest for a file with the specified name. Returns the manifest id. '
all_ids = {x.get('id') for x in self.opf_xpath('//*[@id]')}
c = 0
item_id = 'id'
while item_id in all_ids:
c += 1
item_id = 'id' + '%d'%c
manifest = self.opf_xpath('//opf:manifest')[0]
href = self.name_to_href(name, self.opf_name)
item = manifest.makeelement(OPF('item'),
id=item_id, href=href)
item.set('media-type', self.mime_map[name])
self.insert_into_xml(manifest, item)
if process_manifest_item is not None:
process_manifest_item(item)
self.dirty(self.opf_name)
return item_id
def manifest_has_name(self, name):
''' Return True if the manifest has an entry corresponding to name '''
all_names = {self.href_to_name(x.get('href'), self.opf_name) for x in self.opf_xpath('//opf:manifest/opf:item[@href]')}
return name in all_names
def make_name_unique(self, name):
''' Ensure that `name` does not already exist in this book. If it does, return a modified version that does not exist. '''
counter = count()
while self.has_name_case_insensitive(name) or self.manifest_has_name(name):
c = next(counter) + 1
base, ext = name.rpartition('.')[::2]
if c > 1:
base = base.rpartition('-')[0]
name = '%s-%d.%s' % (base, c, ext)
return name
def add_file(self, name, data, media_type=None, spine_index=None, modify_name_if_needed=False, process_manifest_item=None):
''' Add a file to this container. Entries for the file are
automatically created in the OPF manifest and spine
(if the file is a text document) '''
if '..' in name:
raise ValueError('Names are not allowed to have .. in them')
href = self.name_to_href(name, self.opf_name)
if self.has_name_case_insensitive(name) or self.manifest_has_name(name):
if not modify_name_if_needed:
raise ValueError(('A file with the name %s already exists' % name) if self.has_name_case_insensitive(name) else
('An item with the href %s already exists in the manifest' % href))
name = self.make_name_unique(name)
href = self.name_to_href(name, self.opf_name)
path = self.name_to_abspath(name)
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
with open(path, 'wb') as f:
if hasattr(data, 'read'):
shutil.copyfileobj(data, f)
else:
f.write(data)
mt = media_type or self.guess_type(name)
self.name_path_map[name] = path
self.mime_map[name] = mt
if self.ok_to_be_unmanifested(name):
return name
item_id = self.add_name_to_manifest(name, process_manifest_item=process_manifest_item)
if mt in OEB_DOCS:
manifest = self.opf_xpath('//opf:manifest')[0]
spine = self.opf_xpath('//opf:spine')[0]
si = manifest.makeelement(OPF('itemref'), idref=item_id)
self.insert_into_xml(spine, si, index=spine_index)
return name
def rename(self, current_name, new_name):
''' Renames a file from current_name to new_name. It automatically
rebases all links inside the file if the folder the file is in
changes. Note however, that links are not updated in the other files
that could reference this file. This is for performance, such updates
should be done once, in bulk. '''
if current_name in self.names_that_must_not_be_changed:
raise ValueError('Renaming of %s is not allowed' % current_name)
if self.exists(new_name) and (new_name == current_name or new_name.lower() != current_name.lower()):
# The destination exists and does not differ from the current name only by case
raise ValueError(f'Cannot rename {current_name} to {new_name} as {new_name} already exists')
new_path = self.name_to_abspath(new_name)
base = os.path.dirname(new_path)
if os.path.isfile(base):
raise ValueError(f'Cannot rename {current_name} to {new_name} as {base} is a file')
if not os.path.exists(base):
os.makedirs(base)
old_path = parent_dir = self.name_to_abspath(current_name)
self.commit_item(current_name)
os.rename(old_path, new_path)
# Remove empty directories
while parent_dir:
parent_dir = os.path.dirname(parent_dir)
try:
os.rmdir(parent_dir)
except OSError:
break
for x in ('mime_map', 'encoding_map'):
x = getattr(self, x)
if current_name in x:
x[new_name] = x[current_name]
self.name_path_map[new_name] = new_path
for x in self.cache_names:
x = getattr(self, x)
try:
x.pop(current_name, None)
except TypeError:
x.discard(current_name)
if current_name == self.opf_name:
self.opf_name = new_name
if os.path.dirname(old_path) != os.path.dirname(new_path):
from calibre.ebooks.oeb.polish.replace import LinkRebaser
repl = LinkRebaser(self, current_name, new_name)
self.replace_links(new_name, repl)
self.dirty(new_name)
def replace_links(self, name, replace_func):
''' Replace all links in name using replace_func, which must be a
callable that accepts a URL and returns the replaced URL. It must also
have a 'replaced' attribute that is set to True if any actual
replacement is done. Convenient ways of creating such callables are
using the :class:`LinkReplacer` and :class:`LinkRebaser` classes. '''
media_type = self.mime_map.get(name, guess_type(name))
if name == self.opf_name:
replace_func.file_type = 'opf'
for elem in self.opf_xpath('//*[@href]'):
elem.set('href', replace_func(elem.get('href')))
elif media_type.lower() in OEB_DOCS:
replace_func.file_type = 'text'
rewrite_links(self.parsed(name), replace_func)
elif media_type.lower() in OEB_STYLES:
replace_func.file_type = 'style'
replaceUrls(self.parsed(name), replace_func)
elif media_type.lower() == guess_type('toc.ncx'):
replace_func.file_type = 'ncx'
for elem in self.parsed(name).xpath('//*[@src]'):
elem.set('src', replace_func(elem.get('src')))
if replace_func.replaced:
self.dirty(name)
return replace_func.replaced
def iterlinks(self, name, get_line_numbers=True):
''' Iterate over all links in name. If get_line_numbers is True the
yields results of the form (link, line_number, offset). Where
line_number is the line_number at which the link occurs and offset is
the number of characters from the start of the line. Note that offset
could actually encompass several lines if not zero. '''
media_type = self.mime_map.get(name, guess_type(name))
if name == self.opf_name:
for elem in self.opf_xpath('//*[@href]'):
yield (elem.get('href'), elem.sourceline, 0) if get_line_numbers else elem.get('href')
elif media_type.lower() in OEB_DOCS:
for el, attr, link, pos in iterlinks(self.parsed(name)):
yield (link, el.sourceline, pos) if get_line_numbers else link
elif media_type.lower() in OEB_STYLES:
if get_line_numbers:
with self.open(name, 'rb') as f:
raw = self.decode(f.read()).replace('\r\n', '\n').replace('\r', '\n')
position = PositionFinder(raw)
is_in_comment = CommentFinder(raw)
for link, offset in itercsslinks(raw):
if not is_in_comment(offset):
lnum, col = position(offset)
yield link, lnum, col
else:
for link in getUrls(self.parsed(name)):
yield link
elif media_type.lower() == guess_type('toc.ncx'):
for elem in self.parsed(name).xpath('//*[@src]'):
yield (elem.get('src'), elem.sourceline, 0) if get_line_numbers else elem.get('src')
def abspath_to_name(self, fullpath, root=None):
'''
Convert an absolute path to a canonical name relative to :attr:`root`
:param root: The base folder. By default the root for this container object is used.
'''
# OS X silently changes all file names to NFD form. The EPUB
# spec requires all text including filenames to be in NFC form.
# The proper fix is to implement a VFS that maps between
# canonical names and their file system representation, however,
# I dont have the time for that now. Note that the container
# ensures that all text files are normalized to NFC when
# decoding them anyway, so there should be no mismatch between
# names in the text and NFC canonical file names.
return unicodedata.normalize('NFC', abspath_to_name(fullpath, root or self.root))
def name_to_abspath(self, name):
' Convert a canonical name to an absolute OS dependent path '
return name_to_abspath(name, self.root)
def exists(self, name):
''' True iff a file/folder corresponding to the canonical name exists. Note
that this function suffers from the limitations of the underlying OS
filesystem, in particular case (in)sensitivity. So on a case
insensitive filesystem this will return True even if the case of name
is different from the case of the underlying filesystem file. See also :meth:`has_name`'''
return os.path.exists(self.name_to_abspath(name))
def href_to_name(self, href, base=None):
'''
Convert an href (relative to base) to a name. base must be a name or
None, in which case self.root is used.
'''
key = href, base
ans = self.href_to_name_cache.get(key, null)
if ans is null:
ans = self.href_to_name_cache[key] = href_to_name(href, self.root, base=base)
return ans
def name_to_href(self, name, base=None):
'''Convert a name to a href relative to base, which must be a name or
None in which case self.root is used as the base'''
return name_to_href(name, self.root, base=base)
def opf_xpath(self, expr):
' Convenience method to evaluate an XPath expression on the OPF file, has the opf: and dc: namespace prefixes pre-defined. '
return self.opf.xpath(expr, namespaces=OPF_NAMESPACES)
def has_name(self, name):
''' Return True iff a file with the same canonical name as that specified exists. Unlike :meth:`exists` this method is always case-sensitive. '''
return name and name in self.name_path_map
def has_name_and_is_not_empty(self, name):
if not self.has_name(name):
return False
try:
return os.path.getsize(self.name_path_map[name]) > 0
except OSError:
return False
def has_name_case_insensitive(self, name):
if not name:
return False
name = name.lower()
for q in self.name_path_map:
if q.lower() == name:
return True
return False
def relpath(self, path, base=None):
'''Convert an absolute path (with os separators) to a path relative to
base (defaults to self.root). The relative path is *not* a name. Use
:meth:`abspath_to_name` for that.'''
return relpath(path, base or self.root)
def ok_to_be_unmanifested(self, name):
return name in self.names_that_need_not_be_manifested
@property
def names_that_need_not_be_manifested(self):
' Set of names that are allowed to be missing from the manifest. Depends on the e-book file format. '
return {self.opf_name}
@property
def names_that_must_not_be_removed(self):
' Set of names that must never be deleted from the container. Depends on the e-book file format. '
return {self.opf_name}
@property
def names_that_must_not_be_changed(self):
' Set of names that must never be renamed. Depends on the e-book file format. '
return set()
def parse(self, path, mime):
with open(path, 'rb') as src:
data = src.read()
if mime in OEB_DOCS:
data = self.parse_xhtml(data, self.relpath(path))
elif mime[-4:] in {'+xml', '/xml'}:
data = self.parse_xml(data)
elif mime in OEB_STYLES:
data = self.parse_css(data, self.relpath(path))
return data
def raw_data(self, name, decode=True, normalize_to_nfc=True):
'''
Return the raw data corresponding to the file specified by name
:param decode: If True and the file has a text based MIME type, decode it and return a unicode object instead of raw bytes.
:param normalize_to_nfc: If True the returned unicode object is normalized to the NFC normal form as is required for the EPUB and AZW3 file formats.
'''
with self.open(name) as nf:
ans = nf.read()
mime = self.mime_map.get(name, guess_type(name))
if decode and (mime in OEB_STYLES or mime in OEB_DOCS or mime == 'text/plain' or mime[-4:] in {'+xml', '/xml'}):
ans = self.decode(ans, normalize_to_nfc=normalize_to_nfc)
return ans
def parsed(self, name):
''' Return a parsed representation of the file specified by name. For
HTML and XML files an lxml tree is returned. For CSS files a css_parser
stylesheet is returned. Note that parsed objects are cached for
performance. If you make any changes to the parsed object, you must
call :meth:`dirty` so that the container knows to update the cache. See also :meth:`replace`.'''
ans = self.parsed_cache.get(name, None)
if ans is None:
self.used_encoding = None
mime = self.mime_map.get(name, guess_type(name))
ans = self.parse(self.name_path_map[name], mime)
self.parsed_cache[name] = ans
self.encoding_map[name] = self.used_encoding
return ans
def replace(self, name, obj):
'''
Replace the parsed object corresponding to name with obj, which must be
a similar object, i.e. an lxml tree for HTML/XML or a css_parser
stylesheet for a CSS file.
'''
self.parsed_cache[name] = obj
self.dirty(name)
@property
def opf(self):
' The parsed OPF file '
return self.parsed(self.opf_name)
@property
def mi(self):
''' The metadata of this book as a Metadata object. Note that this
object is constructed on the fly every time this property is requested,
so use it sparingly. '''
from calibre.ebooks.metadata.opf2 import OPF as O
mi = self.serialize_item(self.opf_name)
return O(BytesIO(mi), basedir=self.opf_dir, unquote_urls=False,
populate_spine=False).to_book_metadata()
@property
def opf_version(self):
' The version set on the OPF\'s <package> element '
try:
return self.opf_xpath('//opf:package/@version')[0]
except IndexError:
return ''
@property
def opf_version_parsed(self):
' The version set on the OPF\'s <package> element as a tuple of integers '
return parse_opf_version(self.opf_version)
@property
def manifest_items(self):
return self.opf_xpath('//opf:manifest/opf:item[@href and @id]')
@property
def manifest_id_map(self):
' Mapping of manifest id to canonical names '
return {item.get('id'):self.href_to_name(item.get('href'), self.opf_name) for item in self.manifest_items}
@property
def manifest_type_map(self):
' Mapping of manifest media-type to list of canonical names of that media-type '
ans = defaultdict(list)
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
ans[item.get('media-type').lower()].append(self.href_to_name(
item.get('href'), self.opf_name))
return {mt:tuple(v) for mt, v in iteritems(ans)}
def manifest_items_with_property(self, property_name):
' All manifest items that have the specified property '
prefixes = read_prefixes(self.opf)
for item in items_with_property(self.opf, property_name, prefixes):
href = item.get('href')
if href:
yield self.href_to_name(item.get('href'), self.opf_name)
def manifest_items_of_type(self, predicate):
''' The names of all manifest items whose media-type matches predicate.
`predicate` can be a set, a list, a string or a function taking a single
argument, which will be called with the media-type. '''
if isinstance(predicate, str):
predicate = predicate.__eq__
elif hasattr(predicate, '__contains__'):
predicate = predicate.__contains__
for mt, names in iteritems(self.manifest_type_map):
if predicate(mt):
yield from names
def apply_unique_properties(self, name, *properties):
''' Ensure that the specified properties are set on only the manifest item
identified by name. You can pass None as the name to remove the
property from all items. '''
properties = frozenset(properties)
removed_names, added_names = [], []
for p in properties:
if p.startswith('calibre:'):
ensure_prefix(self.opf, None, 'calibre', CALIBRE_PREFIX)
break
for item in self.opf_xpath('//opf:manifest/opf:item'):
iname = self.href_to_name(item.get('href'), self.opf_name)
props = (item.get('properties') or '').split()
lprops = {p.lower() for p in props}
for prop in properties:
if prop.lower() in lprops:
if name != iname:
removed_names.append(iname)
props = [p for p in props if p.lower() != prop]
if props:
item.set('properties', ' '.join(props))
else:
del item.attrib['properties']
else:
if name == iname:
added_names.append(iname)
props.append(prop)
item.set('properties', ' '.join(props))
self.dirty(self.opf_name)
return removed_names, added_names
def add_properties(self, name, *properties):
''' Add the specified properties to the manifest item identified by name. '''
properties = frozenset(properties)
if not properties:
return True
for p in properties:
if p.startswith('calibre:'):
ensure_prefix(self.opf, None, 'calibre', CALIBRE_PREFIX)
break
for item in self.opf_xpath('//opf:manifest/opf:item'):
iname = self.href_to_name(item.get('href'), self.opf_name)
if name == iname:
props = frozenset((item.get('properties') or '').split()) | properties
item.set('properties', ' '.join(props))
return True
return False
@property
def guide_type_map(self):
' Mapping of guide type to canonical name '
return {item.get('type', ''):self.href_to_name(item.get('href'), self.opf_name)
for item in self.opf_xpath('//opf:guide/opf:reference[@href and @type]')}
@property
def spine_iter(self):
''' An iterator that yields item, name is_linear for every item in the
books' spine. item is the lxml element, name is the canonical file name
and is_linear is True if the item is linear. See also: :attr:`spine_names` and :attr:`spine_items`. '''
manifest_id_map = self.manifest_id_map
non_linear = []
for item in self.opf_xpath('//opf:spine/opf:itemref[@idref]'):
idref = item.get('idref')
name = manifest_id_map.get(idref, None)
path = self.name_path_map.get(name, None)
if path:
if item.get('linear', 'yes') == 'yes':
yield item, name, True
else:
non_linear.append((item, name))
for item, name in non_linear:
yield item, name, False
def index_in_spine(self, name):
manifest_id_map = self.manifest_id_map
for i, item in enumerate(self.opf_xpath('//opf:spine/opf:itemref[@idref]')):
idref = item.get('idref')
q = manifest_id_map.get(idref, None)
if q == name:
return i
@property
def spine_names(self):
''' An iterator yielding name and is_linear for every item in the
books' spine. See also: :attr:`spine_iter` and :attr:`spine_items`. '''
for item, name, linear in self.spine_iter:
yield name, linear
@property
def spine_items(self):
''' An iterator yielding the path for every item in the
books' spine. See also: :attr:`spine_iter` and :attr:`spine_items`. '''
for name, linear in self.spine_names:
yield self.name_path_map[name]
def remove_from_spine(self, spine_items, remove_if_no_longer_in_spine=True):
'''
Remove the specified items (by canonical name) from the spine. If ``remove_if_no_longer_in_spine``
is True, the items are also deleted from the book, not just from the spine.
'''
nixed = set()
for (name, remove), (item, xname, linear) in zip(spine_items, self.spine_iter):
if remove and name == xname:
self.remove_from_xml(item)
nixed.add(name)
if remove_if_no_longer_in_spine:
# Remove from the book if no longer in spine
nixed -= {name for name, linear in self.spine_names}
for name in nixed:
self.remove_item(name)
def set_spine(self, spine_items):
''' Set the spine to be spine_items where spine_items is an iterable of
the form (name, linear). Will raise an error if one of the names is not
present in the manifest. '''
imap = self.manifest_id_map
imap = {name:item_id for item_id, name in iteritems(imap)}
items = [item for item, name, linear in self.spine_iter]
tail, last_tail = (items[0].tail, items[-1].tail) if items else ('\n ', '\n ')
for i in items:
self.remove_from_xml(i)
spine = self.opf_xpath('//opf:spine')[0]
spine.text = tail
for name, linear in spine_items:
i = spine.makeelement('{%s}itemref' % OPF_NAMESPACES['opf'], nsmap={'opf':OPF_NAMESPACES['opf']})
i.tail = tail
i.set('idref', imap[name])
spine.append(i)
if not linear:
i.set('linear', 'no')
if len(spine) > 0:
spine[-1].tail = last_tail
self.dirty(self.opf_name)
def remove_item(self, name, remove_from_guide=True):
'''
Remove the item identified by name from this container. This removes all
references to the item in the OPF manifest, guide and spine as well as from
any internal caches.
'''
removed = set()
for elem in self.opf_xpath('//opf:manifest/opf:item[@href]'):
if self.href_to_name(elem.get('href'), self.opf_name) == name:
id_ = elem.get('id', None)
if id_ is not None:
removed.add(id_)
self.remove_from_xml(elem)
self.dirty(self.opf_name)
if removed:
for spine in self.opf_xpath('//opf:spine'):
tocref = spine.attrib.get('toc', None)
if tocref and tocref in removed:
spine.attrib.pop('toc', None)
self.dirty(self.opf_name)
for item in self.opf_xpath('//opf:spine/opf:itemref[@idref]'):
idref = item.get('idref')
if idref in removed:
self.remove_from_xml(item)
self.dirty(self.opf_name)
for meta in self.opf_xpath('//opf:meta[@name="cover" and @content]'):
if meta.get('content') in removed:
self.remove_from_xml(meta)
self.dirty(self.opf_name)
for meta in self.opf_xpath('//opf:meta[@refines]'):
q = meta.get('refines')
if q.startswith('#') and q[1:] in removed:
self.remove_from_xml(meta)
self.dirty(self.opf_name)
if remove_from_guide:
for item in self.opf_xpath('//opf:guide/opf:reference[@href]'):
if self.href_to_name(item.get('href'), self.opf_name) == name:
self.remove_from_xml(item)
self.dirty(self.opf_name)
path = self.name_path_map.pop(name, None)
if path and os.path.exists(path):
os.remove(path)
self.mime_map.pop(name, None)
self.parsed_cache.pop(name, None)
self.dirtied.discard(name)
def set_media_overlay_durations(self, duration_map=None):
self.dirty(self.opf_name)
for meta in self.opf_xpath('//opf:meta[@property="media:duration"]'):
self.remove_from_xml(meta)
metadata = self.opf_xpath('//opf:metadata')[0]
total_duration = 0
for item_id, duration in (duration_map or {}).items():
meta = metadata.makeelement(OPF('meta'), property="media:duration", refines="#" + item_id)
meta.text = seconds_to_timestamp(duration)
self.insert_into_xml(metadata, meta)
total_duration += duration
if duration_map:
meta = metadata.makeelement(OPF('meta'), property="media:duration")
meta.text = seconds_to_timestamp(total_duration)
self.insert_into_xml(metadata, meta)
def dirty(self, name):
''' Mark the parsed object corresponding to name as dirty. See also: :meth:`parsed`. '''
self.dirtied.add(name)
def remove_from_xml(self, item):
'Removes item from parent, fixing indentation (works only with self closing items)'
parent = item.getparent()
idx = parent.index(item)
if idx == 0:
# We are removing the first item - only care about adjusting
# the tail if this was the only child
if len(parent) == 1:
parent.text = item.tail
else:
# Make sure the preceding item has this tail
parent[idx-1].tail = item.tail
parent.remove(item)
return item
def insert_into_xml(self, parent, item, index=None):
'''Insert item into parent (or append if index is None), fixing
indentation. Only works with self closing items.'''
if index is None:
parent.append(item)
else:
parent.insert(index, item)
idx = parent.index(item)
if idx == 0:
item.tail = parent.text
# If this is the only child of this parent element, we need a
# little extra work as we have gone from a self-closing <foo />
# element to <foo><item /></foo>
if len(parent) == 1:
sibling = parent.getprevious()
if sibling is None:
# Give up!
return
parent.text = sibling.text
item.tail = sibling.tail
else:
item.tail = parent[idx-1].tail
if idx == len(parent)-1:
parent[idx-1].tail = parent.text
def opf_get_or_create(self, name):
''' Convenience method to either return the first XML element with the
specified name or create it under the opf:package element and then
return it, if it does not already exist. '''
ans = self.opf_xpath('//opf:'+name)
if ans:
return ans[0]
self.dirty(self.opf_name)
package = self.opf_xpath('//opf:package')[0]
item = package.makeelement(OPF(name))
item.tail = '\n'
package.append(item)
return item
def generate_item(self, name, id_prefix=None, media_type=None, unique_href=True):
'''Add an item to the manifest with href derived from the given
name. Ensures uniqueness of href and id automatically. Returns
generated item.'''
id_prefix = id_prefix or 'id'
media_type = media_type or self.guess_type(name)
if unique_href:
name = self.make_name_unique(name)
href = self.name_to_href(name, self.opf_name)
base, ext = href.rpartition('.')[0::2]
all_ids = {x.get('id') for x in self.opf_xpath('//*[@id]')}
if id_prefix.endswith('-'):
all_ids.add(id_prefix)
c = 0
item_id = id_prefix
while item_id in all_ids:
c += 1
item_id = f'{id_prefix}{c}'
manifest = self.opf_xpath('//opf:manifest')[0]
item = manifest.makeelement(OPF('item'),
id=item_id, href=href)
item.set('media-type', media_type)
self.insert_into_xml(manifest, item)
self.dirty(self.opf_name)
name = self.href_to_name(href, self.opf_name)
self.name_path_map[name] = path = self.name_to_abspath(name)
self.mime_map[name] = media_type
# Ensure that the file corresponding to the newly created item exists
# otherwise cloned containers will fail when they try to get the number
# of links to the file
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
open(path, 'wb').close()
return item
def format_opf(self):
try:
mdata = self.opf_xpath('//opf:metadata')[0]
except IndexError:
pass
else:
mdata.text = '\n '
remove = set()
for child in mdata:
child.tail = '\n '
try:
if (child.get('name', '').startswith('calibre:'
) and child.get('content', '').strip() in {'{}', ''}):
remove.add(child)
except AttributeError:
continue # Happens for XML comments
for child in remove:
mdata.remove(child)
if len(mdata) > 0:
mdata[-1].tail = '\n '
# Ensure name comes before content, needed for Nooks
for meta in self.opf_xpath('//opf:meta[@name="cover"]'):
if 'content' in meta.attrib:
meta.set('content', meta.attrib.pop('content'))
def serialize_item(self, name):
''' Convert a parsed object (identified by canonical name) into a bytestring. See :meth:`parsed`. '''
data = root = self.parsed(name)
if name == self.opf_name:
self.format_opf()
data = serialize(data, self.mime_map[name], pretty_print=name in
self.pretty_print)
if name == self.opf_name and root.nsmap.get(None) == OPF2_NS:
# Needed as I can't get lxml to output opf:role and
# not output <opf:metadata> as well
data = re.sub(br'(<[/]{0,1})opf:', br'\1', data)
return data
def commit_item(self, name, keep_parsed=False):
''' Commit a parsed object to disk (it is serialized and written to the
underlying file). If ``keep_parsed`` is True the parsed representation
is retained in the cache. See also: :meth:`parsed` '''
if name not in self.parsed_cache:
return
data = self.serialize_item(name)
self.dirtied.discard(name)
if not keep_parsed:
self.parsed_cache.pop(name)
dest = self.name_path_map[name]
if self.cloned and nlinks_file(dest) > 1:
# Decouple this file from its links
os.unlink(dest)
with open(dest, 'wb') as f:
f.write(data)
def filesize(self, name):
''' Return the size in bytes of the file represented by the specified
canonical name. Automatically handles dirtied parsed objects. See also:
:meth:`parsed` '''
if name in self.dirtied:
self.commit_item(name, keep_parsed=True)
path = self.name_to_abspath(name)
return os.path.getsize(path)
def get_file_path_for_processing(self, name, allow_modification=True):
''' Similar to open() except that it returns a file path, instead of an open file object. '''
if name in self.dirtied:
self.commit_item(name)
self.parsed_cache.pop(name, False)
path = self.name_to_abspath(name)
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
else:
if self.cloned and allow_modification and os.path.exists(path) and nlinks_file(path) > 1:
# Decouple this file from its links
temp = path + 'xxx'
shutil.copyfile(path, temp)
if iswindows:
retry_on_fail(os.unlink, path)
else:
os.unlink(path)
os.rename(temp, path)
return path
def open(self, name, mode='rb'):
''' Open the file pointed to by name for direct read/write. Note that
this will commit the file if it is dirtied and remove it from the parse
cache. You must finish with this file before accessing the parsed
version of it again, or bad things will happen. '''
return open(self.get_file_path_for_processing(name, mode not in {'r', 'rb'}), mode)
def commit(self, outpath=None, keep_parsed=False):
'''
Commit all dirtied parsed objects to the filesystem and write out the e-book file at outpath.
:param output: The path to write the saved e-book file to. If None, the path of the original book file is used.
:param keep_parsed: If True the parsed representations of committed items are kept in the cache.
'''
for name in tuple(self.dirtied):
self.commit_item(name, keep_parsed=keep_parsed)
def compare_to(self, other):
if set(self.name_path_map) != set(other.name_path_map):
return 'Set of files is not the same'
mismatches = []
for name, path in iteritems(self.name_path_map):
opath = other.name_path_map[name]
with open(path, 'rb') as f1, open(opath, 'rb') as f2:
if f1.read() != f2.read():
mismatches.append('The file %s is not the same'%name)
return '\n'.join(mismatches)
# }}}
# EPUB {{{
class InvalidEpub(InvalidBook):
pass
class ObfuscationKeyMissing(InvalidEpub):
pass
OCF_NS = 'urn:oasis:names:tc:opendocument:xmlns:container'
VCS_IGNORE_FILES = frozenset('.gitignore .hgignore .agignore .bzrignore'.split())
VCS_DIRS = frozenset(('.git', '.hg', '.svn', '.bzr'))
def walk_dir(basedir):
for dirpath, dirnames, filenames in os.walk(basedir):
for vcsdir in VCS_DIRS:
try:
dirnames.remove(vcsdir)
except Exception:
pass
is_root = os.path.abspath(os.path.normcase(dirpath)) == os.path.abspath(os.path.normcase(basedir))
yield is_root, dirpath, None
for fname in filenames:
if fname not in VCS_IGNORE_FILES:
yield is_root, dirpath, fname
class EpubContainer(Container):
book_type = 'epub'
@property
def book_type_for_display(self):
ans = self.book_type.upper()
try:
v = self.opf_version_parsed
except Exception:
pass
else:
try:
if v.major == 2:
ans += ' 2'
else:
if not v.minor:
ans += f' {v.major}'
else:
ans += f' {v.major}.{v.minor}'
except Exception:
pass
return ans
META_INF = {
'container.xml': True,
'manifest.xml': False,
'encryption.xml': False,
'metadata.xml': False,
'signatures.xml': False,
'rights.xml': False,
}
def __init__(self, pathtoepub, log, clone_data=None, tdir=None):
if clone_data is not None:
super().__init__(None, None, log, clone_data=clone_data)
for x in ('pathtoepub', 'obfuscated_fonts', 'is_dir'):
setattr(self, x, clone_data[x])
return
self.pathtoepub = pathtoepub
if tdir is None:
tdir = PersistentTemporaryDirectory('_epub_container')
tdir = os.path.abspath(os.path.realpath(tdir))
self.root = tdir
self.is_dir = os.path.isdir(pathtoepub)
if self.is_dir:
for is_root, dirpath, fname in walk_dir(self.pathtoepub):
if is_root:
base = tdir
else:
base = os.path.join(tdir, os.path.relpath(dirpath, self.pathtoepub))
if fname is None:
os.mkdir(base)
if fname is not None:
shutil.copy(os.path.join(dirpath, fname), os.path.join(base, fname))
else:
with open(self.pathtoepub, 'rb') as stream:
try:
zf = ZipFile(stream)
zf.extractall(tdir)
except:
log.exception('EPUB appears to be invalid ZIP file, trying a'
' more forgiving ZIP parser')
from calibre.utils.localunzip import extractall
stream.seek(0)
extractall(stream, path=tdir)
try:
os.remove(join(tdir, 'mimetype'))
except OSError:
pass
# Ensure all filenames are in NFC normalized form
# has no effect on HFS+ filesystems as they always store filenames
# in NFD form
for filename in walk(self.root):
n = unicodedata.normalize('NFC', filename)
if n != filename:
s = filename + 'suff1x'
os.rename(filename, s)
os.rename(s, n)
container_path = join(self.root, 'META-INF', 'container.xml')
if not exists(container_path):
raise InvalidEpub('No META-INF/container.xml in epub')
with open(container_path, 'rb') as cf:
container = safe_xml_fromstring(cf.read())
opf_files = container.xpath((
r'child::ocf:rootfiles/ocf:rootfile'
'[@media-type="%s" and @full-path]'%guess_type('a.opf')
), namespaces={'ocf':OCF_NS}
)
if not opf_files:
raise InvalidEpub('META-INF/container.xml contains no link to OPF file')
opf_path = os.path.join(self.root, *(urlunquote(opf_files[0].get('full-path')).split('/')))
if not exists(opf_path):
raise InvalidEpub('OPF file does not exist at location pointed to'
' by META-INF/container.xml')
super().__init__(tdir, opf_path, log)
self.obfuscated_fonts = {}
if 'META-INF/encryption.xml' in self.name_path_map:
self.process_encryption()
self.parsed_cache['META-INF/container.xml'] = container
def clone_data(self, dest_dir):
ans = super().clone_data(dest_dir)
ans['pathtoepub'] = self.pathtoepub
ans['obfuscated_fonts'] = self.obfuscated_fonts.copy()
ans['is_dir'] = self.is_dir
return ans
def rename(self, old_name, new_name):
is_opf = old_name == self.opf_name
super().rename(old_name, new_name)
if is_opf:
for elem in self.parsed('META-INF/container.xml').xpath((
r'child::ocf:rootfiles/ocf:rootfile'
'[@media-type="%s" and @full-path]'%guess_type('a.opf')
), namespaces={'ocf':OCF_NS}
):
# The asinine epubcheck cannot handle quoted filenames in
# container.xml
elem.set('full-path', self.opf_name)
self.dirty('META-INF/container.xml')
if old_name in self.obfuscated_fonts:
self.obfuscated_fonts[new_name] = self.obfuscated_fonts.pop(old_name)
enc = self.parsed('META-INF/encryption.xml')
for cr in enc.xpath('//*[local-name()="CipherReference" and @URI]'):
if self.href_to_name(cr.get('URI')) == old_name:
cr.set('URI', self.name_to_href(new_name))
self.dirty('META-INF/encryption.xml')
@property
def names_that_need_not_be_manifested(self):
return super().names_that_need_not_be_manifested | {'META-INF/' + x for x in self.META_INF}
def ok_to_be_unmanifested(self, name):
return name in self.names_that_need_not_be_manifested or name.startswith('META-INF/')
@property
def names_that_must_not_be_removed(self):
return super().names_that_must_not_be_removed | {'META-INF/container.xml'}
@property
def names_that_must_not_be_changed(self):
return super().names_that_must_not_be_changed | {'META-INF/' + x for x in self.META_INF}
def remove_item(self, name, remove_from_guide=True):
# Handle removal of obfuscated fonts
if name == 'META-INF/encryption.xml':
self.obfuscated_fonts.clear()
if name in self.obfuscated_fonts:
self.obfuscated_fonts.pop(name, None)
enc = self.parsed('META-INF/encryption.xml')
for em in enc.xpath('//*[local-name()="EncryptionMethod" and @Algorithm]'):
alg = em.get('Algorithm')
if alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
continue
try:
cr = em.getparent().xpath('descendant::*[local-name()="CipherReference" and @URI]')[0]
except (IndexError, ValueError, KeyError):
continue
if name == self.href_to_name(cr.get('URI')):
self.remove_from_xml(em.getparent())
self.dirty('META-INF/encryption.xml')
super().remove_item(name, remove_from_guide=remove_from_guide)
def read_raw_unique_identifier(self):
package_id = raw_unique_identifier = idpf_key = None
for attrib, val in iteritems(self.opf.attrib):
if attrib.endswith('unique-identifier'):
package_id = val
break
if package_id is not None:
for elem in self.opf_xpath('//*[@id=%s]'%escape_xpath_attr(package_id)):
if elem.text:
raw_unique_identifier = elem.text
break
if raw_unique_identifier is not None:
idpf_key = raw_unique_identifier
idpf_key = re.sub('[\u0020\u0009\u000d\u000a]', '', idpf_key)
idpf_key = hashlib.sha1(idpf_key.encode('utf-8')).digest()
return package_id, raw_unique_identifier, idpf_key
def iter_encryption_entries(self):
if 'META-INF/encryption.xml' in self.name_path_map:
enc = self.parsed('META-INF/encryption.xml')
for em in enc.xpath('//*[local-name()="EncryptionMethod" and @Algorithm]'):
try:
cr = em.getparent().xpath('descendant::*[local-name()="CipherReference" and @URI]')[0]
except Exception:
cr = None
yield em, cr
def process_encryption(self):
fonts = {}
for em, cr in self.iter_encryption_entries():
alg = em.get('Algorithm')
if alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
raise DRMError()
if cr is None:
continue
name = self.href_to_name(cr.get('URI'))
path = self.name_path_map.get(name, None)
if path is not None:
fonts[name] = alg
package_id, raw_unique_identifier, idpf_key = self.read_raw_unique_identifier()
key = None
for item in self.opf_xpath('//*[local-name()="metadata"]/*'
'[local-name()="identifier"]'):
scheme = None
for xkey in item.attrib.keys():
if xkey.endswith('scheme'):
scheme = item.get(xkey)
if (scheme and scheme.lower() == 'uuid') or \
(item.text and item.text.startswith('urn:uuid:')):
try:
key = item.text.rpartition(':')[-1]
key = uuid.UUID(key).bytes
except Exception:
self.log.exception('Failed to parse obfuscation key')
key = None
for font, alg in iteritems(fonts):
tkey = key if alg == ADOBE_OBFUSCATION else idpf_key
if not tkey:
raise ObfuscationKeyMissing('Failed to find obfuscation key')
raw = self.raw_data(font, decode=False)
raw = decrypt_font_data(tkey, raw, alg)
with self.open(font, 'wb') as f:
f.write(raw)
self.obfuscated_fonts[font] = (alg, tkey)
def update_modified_timestamp(self):
from calibre.ebooks.metadata.opf3 import set_last_modified_in_opf
set_last_modified_in_opf(self.opf)
self.dirty(self.opf_name)
def commit(self, outpath=None, keep_parsed=False):
if self.opf_version_parsed.major == 3:
self.update_modified_timestamp()
super().commit(keep_parsed=keep_parsed)
container_path = join(self.root, 'META-INF', 'container.xml')
if not exists(container_path):
raise InvalidEpub('No META-INF/container.xml in EPUB, this typically happens if the temporary files calibre'
' is using are deleted by some other program while calibre is running')
restore_fonts = {}
for name in self.obfuscated_fonts:
if name not in self.name_path_map:
continue
alg, key = self.obfuscated_fonts[name]
# Decrypting and encrypting are the same operation (XOR with key)
restore_fonts[name] = data = self.raw_data(name, decode=False)
with self.open(name, 'wb') as f:
f.write(decrypt_font_data(key, data, alg))
if outpath is None:
outpath = self.pathtoepub
if self.is_dir:
# First remove items from the source dir that do not exist any more
for is_root, dirpath, fname in walk_dir(self.pathtoepub):
if fname is not None:
if is_root and fname == 'mimetype':
continue
base = self.root if is_root else os.path.join(self.root, os.path.relpath(dirpath, self.pathtoepub))
fpath = os.path.join(base, fname)
if not os.path.exists(fpath):
os.remove(os.path.join(dirpath, fname))
try:
os.rmdir(dirpath)
except OSError as err:
if err.errno != errno.ENOTEMPTY:
raise
# Now copy over everything from root to source dir
for dirpath, dirnames, filenames in os.walk(self.root):
is_root = os.path.abspath(os.path.normcase(dirpath)) == os.path.abspath(os.path.normcase(self.root))
base = self.pathtoepub if is_root else os.path.join(self.pathtoepub, os.path.relpath(dirpath, self.root))
try:
os.mkdir(base)
except OSError as err:
if err.errno != errno.EEXIST:
raise
for fname in filenames:
with open(os.path.join(dirpath, fname), 'rb') as src, open(os.path.join(base, fname), 'wb') as dest:
shutil.copyfileobj(src, dest)
else:
from calibre.ebooks.tweak import zip_rebuilder
with open(join(self.root, 'mimetype'), 'wb') as f:
et = guess_type('a.epub')
if not isinstance(et, bytes):
et = et.encode('ascii')
f.write(et)
zip_rebuilder(self.root, outpath)
for name, data in iteritems(restore_fonts):
with self.open(name, 'wb') as f:
f.write(data)
@property
def path_to_ebook(self):
return self.pathtoepub
@path_to_ebook.setter
def path_to_ebook(self, val):
self.pathtoepub = val
# }}}
# AZW3 {{{
class InvalidMobi(InvalidBook):
pass
def do_explode(path, dest):
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
from calibre.ebooks.mobi.reader.mobi8 import Mobi8Reader
with open(path, 'rb') as stream:
mr = MobiReader(stream, default_log, None, None)
with CurrentDir(dest):
mr = Mobi8Reader(mr, default_log, for_tweak=True)
opf = os.path.abspath(mr())
obfuscated_fonts = mr.encrypted_fonts
return opf, obfuscated_fonts
def opf_to_azw3(opf, outpath, container):
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
from calibre.ebooks.mobi.tweak import set_cover
class Item(Manifest.Item):
def _parse_css(self, data):
# The default CSS parser used by oeb.base inserts the h namespace
# and resolves all @import rules. We dont want that.
return container.parse_css(data)
def specialize(oeb):
oeb.manifest.Item = Item
plumber = Plumber(opf, outpath, container.log)
plumber.setup_options()
inp = plugin_for_input_format('azw3')
outp = plugin_for_output_format('azw3')
plumber.opts.mobi_passthrough = True
plumber.opts.keep_ligatures = True
oeb = create_oebbook(container.log, opf, plumber.opts, specialize=specialize)
set_cover(oeb)
outp.convert(oeb, outpath, inp, plumber.opts, container.log)
def epub_to_azw3(epub, outpath=None):
container = get_container(epub, tweak_mode=True)
changed = False
for item in container.opf_xpath('//opf:manifest/opf:item[@properties and @href]'):
p = item.get('properties').split()
if 'cover-image' in p:
href = item.get('href')
guides = container.opf_xpath('//opf:guide')
if not guides:
guides = (container.opf.makeelement(OPF('guide')),)
container.opf.append(guides[0])
for guide in guides:
for child in guide:
if child.get('type') == 'cover':
break
else:
guide.append(guide.makeelement(OPF('reference'), type='cover', href=href))
changed = True
break
elif 'calibre:title-page' in p:
item.getparent().remove(item)
if changed:
container.dirty(container.opf_name)
container.commit_item(container.opf_name)
outpath = outpath or (epub.rpartition('.')[0] + '.azw3')
opf_to_azw3(container.name_to_abspath(container.opf_name), outpath, container)
class AZW3Container(Container):
book_type = 'azw3'
SUPPORTS_TITLEPAGES = False
SUPPORTS_FILENAMES = False
def __init__(self, pathtoazw3, log, clone_data=None, tdir=None):
if clone_data is not None:
super().__init__(None, None, log, clone_data=clone_data)
for x in ('pathtoazw3', 'obfuscated_fonts'):
setattr(self, x, clone_data[x])
return
self.pathtoazw3 = pathtoazw3
if tdir is None:
tdir = PersistentTemporaryDirectory('_azw3_container')
tdir = os.path.abspath(os.path.realpath(tdir))
self.root = tdir
with open(pathtoazw3, 'rb') as stream:
raw = stream.read(3)
if raw == b'TPZ':
raise InvalidMobi(_('This is not a MOBI file. It is a Topaz file.'))
try:
header = MetadataHeader(stream, default_log)
except MobiError:
raise InvalidMobi(_('This is not a MOBI file.'))
if header.encryption_type != 0:
raise DRMError()
kf8_type = header.kf8_type
if kf8_type is None:
raise InvalidMobi(_('This MOBI file does not contain a KF8 format '
'book. KF8 is the new format from Amazon. calibre can '
'only edit MOBI files that contain KF8 books. Older '
'MOBI files without KF8 are not editable.'))
if kf8_type == 'joint':
raise InvalidMobi(_('This MOBI file contains both KF8 and '
'older Mobi6 data. calibre can only edit MOBI files '
'that contain only KF8 data.'))
try:
opf_path, obfuscated_fonts = fork_job(
'calibre.ebooks.oeb.polish.container', 'do_explode',
args=(pathtoazw3, tdir), no_output=True)['result']
except WorkerError as e:
log(e.orig_tb)
raise InvalidMobi('Failed to explode MOBI')
super().__init__(tdir, opf_path, log)
self.obfuscated_fonts = {x.replace(os.sep, '/') for x in obfuscated_fonts}
def clone_data(self, dest_dir):
ans = super().clone_data(dest_dir)
ans['pathtoazw3'] = self.pathtoazw3
ans['obfuscated_fonts'] = self.obfuscated_fonts.copy()
return ans
def commit(self, outpath=None, keep_parsed=False):
super().commit(keep_parsed=keep_parsed)
if outpath is None:
outpath = self.pathtoazw3
opf_to_azw3(self.name_path_map[self.opf_name], outpath, self)
@property
def path_to_ebook(self):
return self.pathtoazw3
@path_to_ebook.setter
def path_to_ebook(self, val):
self.pathtoazw3 = val
@property
def names_that_must_not_be_changed(self):
return set(self.name_path_map)
# }}}
def get_container(path, log=None, tdir=None, tweak_mode=False):
if log is None:
log = default_log
try:
isdir = os.path.isdir(path)
except Exception:
isdir = False
own_tdir = not tdir
ebook_cls = (AZW3Container if path.rpartition('.')[-1].lower() in {'azw3', 'mobi', 'original_azw3', 'original_mobi'} and not isdir
else EpubContainer)
if own_tdir:
tdir = PersistentTemporaryDirectory(f'_{ebook_cls.book_type}_container')
try:
ebook = ebook_cls(path, log, tdir=tdir)
ebook.tweak_mode = tweak_mode
except BaseException:
if own_tdir:
shutil.rmtree(tdir, ignore_errors=True)
raise
return ebook
def test_roundtrip():
ebook = get_container(sys.argv[-1])
p = PersistentTemporaryFile(suffix='.'+sys.argv[-1].rpartition('.')[-1])
p.close()
ebook.commit(outpath=p.name)
ebook2 = get_container(p.name)
ebook3 = get_container(p.name)
diff = ebook3.compare_to(ebook2)
if diff is not None:
print(diff)
if __name__ == '__main__':
test_roundtrip()
| 69,169 | Python | .py | 1,463 | 36.327409 | 156 | 0.595201 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,376 | report.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/report.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import posixpath
import time
import types
from collections import defaultdict, namedtuple
from itertools import chain
from calibre import force_unicode, prepare_string_for_xml
from calibre.ebooks.oeb.base import XPath, xml2text
from calibre.ebooks.oeb.polish.container import OEB_DOCS, OEB_STYLES
from calibre.ebooks.oeb.polish.spell import count_all_chars, get_all_words
from calibre.ebooks.oeb.polish.utils import OEB_FONTS
from calibre.utils.icu import numeric_sort_key, safe_chr
from calibre.utils.imghdr import identify
from css_selectors import Select, SelectorError
from polyglot.builtins import iteritems
File = namedtuple('File', 'name dir basename size category word_count')
def get_category(name, mt):
category = 'misc'
if mt.startswith('image/'):
category = 'image'
elif mt in OEB_FONTS:
category = 'font'
elif mt in OEB_STYLES:
category = 'style'
elif mt in OEB_DOCS:
category = 'text'
ext = name.rpartition('.')[-1].lower()
if ext in {'ttf', 'otf', 'woff', 'woff2'}:
# Probably wrong mimetype in the OPF
category = 'font'
elif ext == 'opf':
category = 'opf'
elif ext == 'ncx':
category = 'toc'
return category
def safe_size(container, name):
try:
return os.path.getsize(container.name_to_abspath(name))
except Exception:
return 0
def safe_img_data(container, name, mt):
if 'svg' in mt:
return 0, 0
try:
fmt, width, height = identify(container.name_to_abspath(name))
except Exception:
width = height = 0
return width, height
def files_data(container, *args):
fwc = file_words_counts or {}
for name, path in iteritems(container.name_path_map):
yield File(name, posixpath.dirname(name), posixpath.basename(name), safe_size(container, name),
get_category(name, container.mime_map.get(name, '')), fwc.get(name, -1))
Image = namedtuple('Image', 'name mime_type usage size basename id width height')
LinkLocation = namedtuple('LinkLocation', 'name line_number text_on_line')
def sort_locations(container, locations):
nmap = {n:i for i, (n, l) in enumerate(container.spine_names)}
def sort_key(l):
return (nmap.get(l.name, len(nmap)), numeric_sort_key(l.name), l.line_number)
return sorted(locations, key=sort_key)
def safe_href_to_name(container, href, base):
try:
return container.href_to_name(href, base)
except ValueError:
pass # Absolute path on windows
def images_data(container, *args):
image_usage = defaultdict(set)
link_sources = OEB_STYLES | OEB_DOCS
for name, mt in iteritems(container.mime_map):
if mt in link_sources:
for href, line_number, offset in container.iterlinks(name):
target = safe_href_to_name(container, href, name)
if target and container.exists(target):
mt = container.mime_map.get(target)
if mt and mt.startswith('image/'):
image_usage[target].add(LinkLocation(name, line_number, href))
image_data = []
for name, mt in iteritems(container.mime_map):
if mt.startswith('image/') and container.exists(name):
image_data.append(Image(name, mt, sort_locations(container, image_usage.get(name, set())), safe_size(container, name),
posixpath.basename(name), len(image_data), *safe_img_data(container, name, mt)))
return tuple(image_data)
def description_for_anchor(elem):
def check(x, min_len=4):
if x:
x = x.strip()
if len(x) >= min_len:
return x[:30]
desc = check(elem.get('title'))
if desc is not None:
return desc
desc = check(elem.text)
if desc is not None:
return desc
if len(elem) > 0:
desc = check(elem[0].text)
if desc is not None:
return desc
# Get full text for tags that have only a few descendants
for i, x in enumerate(elem.iterdescendants('*')):
if i > 5:
break
else:
desc = check(xml2text(elem), min_len=1)
if desc is not None:
return desc
def create_anchor_map(root, pat, name):
ans = {}
for elem in pat(root):
anchor = elem.get('id') or elem.get('name')
if anchor and anchor not in ans:
ans[anchor] = (LinkLocation(name, elem.sourceline, anchor), description_for_anchor(elem))
return ans
Anchor = namedtuple('Anchor', 'id location text')
L = namedtuple('Link', 'location text is_external href path_ok anchor_ok anchor ok')
def Link(location, text, is_external, href, path_ok, anchor_ok, anchor):
if is_external:
ok = None
else:
ok = path_ok and anchor_ok
return L(location, text, is_external, href, path_ok, anchor_ok, anchor, ok)
def links_data(container, *args):
anchor_map = {}
links = []
anchor_pat = XPath('//*[@id or @name]')
link_pat = XPath('//h:a[@href]')
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
root = container.parsed(name)
anchor_map[name] = create_anchor_map(root, anchor_pat, name)
for a in link_pat(root):
href = a.get('href')
text = description_for_anchor(a)
location = LinkLocation(name, a.sourceline, href)
if href:
base, frag = href.partition('#')[0::2]
if frag and not base:
dest = name
else:
dest = safe_href_to_name(container, href, name)
links.append((base, frag, dest, location, text))
else:
links.append(('', '', None, location, text))
for base, frag, dest, location, text in links:
if dest is None:
link = Link(location, text, True, base, True, True, Anchor(frag, None, None))
else:
if dest in anchor_map:
loc = LinkLocation(dest, None, None)
if frag:
anchor = anchor_map[dest].get(frag)
if anchor is None:
link = Link(location, text, False, dest, True, False, Anchor(frag, loc, None))
else:
link = Link(location, text, False, dest, True, True, Anchor(frag, *anchor))
else:
link = Link(location, text, False, dest, True, True, Anchor(None, loc, None))
else:
link = Link(location, text, False, dest, False, False, Anchor(frag, None, None))
yield link
Word = namedtuple('Word', 'id word locale usage')
file_words_counts = None
def words_data(container, book_locale, *args):
count, words = get_all_words(container, book_locale, get_word_count=True, file_words_counts=file_words_counts)
return (count, tuple(Word(i, word, locale, v) for i, ((word, locale), v) in enumerate(iteritems(words))))
Char = namedtuple('Char', 'id char codepoint usage count')
def chars_data(container, book_locale, *args):
cc = count_all_chars(container, book_locale)
nmap = {n:i for i, (n, l) in enumerate(container.spine_names)}
def sort_key(name):
return nmap.get(name, len(nmap)), numeric_sort_key(name)
for i, (codepoint, usage) in enumerate(iteritems(cc.chars)):
yield Char(i, safe_chr(codepoint), codepoint, sorted(usage, key=sort_key), cc.counter[codepoint])
CSSRule = namedtuple('CSSRule', 'selector location')
RuleLocation = namedtuple('RuleLocation', 'file_name line column')
MatchLocation = namedtuple('MatchLocation', 'tag sourceline')
CSSEntry = namedtuple('CSSEntry', 'rule count matched_files sort_key')
CSSFileMatch = namedtuple('CSSFileMatch', 'file_name locations sort_key')
ClassEntry = namedtuple('ClassEntry', 'cls num_of_matches matched_files sort_key')
ClassFileMatch = namedtuple('ClassFileMatch', 'file_name class_elements sort_key')
ClassElement = namedtuple('ClassElement', 'name line_number text_on_line tag matched_rules')
def css_data(container, book_locale, result_data, *args):
import tinycss
from tinycss.css21 import ImportRule, RuleSet
def css_rules(file_name, rules, sourceline=0):
ans = []
for rule in rules:
if isinstance(rule, RuleSet):
selector = rule.selector.as_css()
ans.append(CSSRule(selector, RuleLocation(file_name, sourceline + rule.line, rule.column)))
elif isinstance(rule, ImportRule):
import_name = safe_href_to_name(container, rule.uri, file_name)
if import_name and container.exists(import_name):
ans.append(import_name)
elif getattr(rule, 'rules', False):
ans.extend(css_rules(file_name, rule.rules, sourceline))
return ans
parser = tinycss.make_full_parser()
importable_sheets = {}
html_sheets = {}
spine_names = {name for name, is_linear in container.spine_names}
style_path, link_path = XPath('//h:style'), XPath('//h:link/@href')
for name, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
importable_sheets[name] = css_rules(name, parser.parse_stylesheet(container.raw_data(name)).rules)
elif mt in OEB_DOCS and name in spine_names:
html_sheets[name] = []
for style in style_path(container.parsed(name)):
if style.get('type', 'text/css') == 'text/css' and style.text:
html_sheets[name].append(
css_rules(name, parser.parse_stylesheet(force_unicode(style.text, 'utf-8')).rules, style.sourceline - 1))
rule_map = defaultdict(lambda : defaultdict(list))
def rules_in_sheet(sheet):
for rule in sheet:
if isinstance(rule, CSSRule):
yield rule
else: # @import rule
isheet = importable_sheets.get(rule)
if isheet is not None:
yield from rules_in_sheet(isheet)
def sheets_for_html(name, root):
for href in link_path(root):
tname = safe_href_to_name(container, href, name)
sheet = importable_sheets.get(tname)
if sheet is not None:
yield sheet
tt_cache = {}
def tag_text(elem):
ans = tt_cache.get(elem)
if ans is None:
tag = elem.tag.rpartition('}')[-1]
if elem.attrib:
attribs = ' '.join('{}="{}"'.format(k, prepare_string_for_xml(elem.get(k, ''), True)) for k in elem.keys())
return f'<{tag} {attribs}>'
ans = tt_cache[elem] = '<%s>' % tag
def matches_for_selector(selector, select, class_map, rule):
lsel = selector.lower()
try:
matches = tuple(select(selector))
except SelectorError:
return ()
seen = set()
def get_elem_and_ancestors(elem):
p = elem
while p is not None:
if p not in seen:
yield p
seen.add(p)
p = p.getparent()
for e in matches:
for elem in get_elem_and_ancestors(e):
for cls in elem.get('class', '').split():
if '.' + cls.lower() in lsel:
class_map[cls][elem].append(rule)
return (MatchLocation(tag_text(elem), elem.sourceline) for elem in matches)
class_map = defaultdict(lambda : defaultdict(list))
for name, inline_sheets in iteritems(html_sheets):
root = container.parsed(name)
cmap = defaultdict(lambda : defaultdict(list))
for elem in root.xpath('//*[@class]'):
for cls in elem.get('class', '').split():
cmap[cls][elem] = []
select = Select(root, ignore_inappropriate_pseudo_classes=True)
for sheet in chain(sheets_for_html(name, root), inline_sheets):
for rule in rules_in_sheet(sheet):
rule_map[rule][name].extend(matches_for_selector(rule.selector, select, cmap, rule))
for cls, elem_map in iteritems(cmap):
class_elements = class_map[cls][name]
for elem, usage in iteritems(elem_map):
class_elements.append(
ClassElement(name, elem.sourceline, elem.get('class'), tag_text(elem), tuple(usage)))
result_data['classes'] = ans = []
for cls, name_map in iteritems(class_map):
la = tuple(ClassFileMatch(name, tuple(class_elements), numeric_sort_key(name)) for name, class_elements in iteritems(name_map) if class_elements)
num_of_matches = sum(sum(len(ce.matched_rules) for ce in cfm.class_elements) for cfm in la)
ans.append(ClassEntry(cls, num_of_matches, la, numeric_sort_key(cls)))
ans = []
for rule, loc_map in iteritems(rule_map):
la = tuple(CSSFileMatch(name, tuple(locations), numeric_sort_key(name)) for name, locations in iteritems(loc_map) if locations)
count = sum(len(fm.locations) for fm in la)
ans.append(CSSEntry(rule, count, la, numeric_sort_key(rule.selector)))
return ans
def gather_data(container, book_locale):
global file_words_counts
timing = {}
data = {}
file_words_counts = {}
for x in 'chars images links words css files'.split():
st = time.time()
data[x] = globals()[x + '_data'](container, book_locale, data)
if isinstance(data[x], types.GeneratorType):
data[x] = tuple(data[x])
timing[x] = time.time() - st
file_words_counts = None
return data, timing
def debug_data_gather():
import sys
from calibre.gui2.tweak_book import dictionaries
from calibre.gui2.tweak_book.boss import get_container
c = get_container(sys.argv[-1])
gather_data(c, dictionaries.default_locale)
| 14,076 | Python | .py | 304 | 37.059211 | 153 | 0.617946 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,377 | embed.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/embed.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
from lxml import etree
from calibre import prints
from calibre.ebooks.oeb.base import XHTML
from calibre.utils.filenames import ascii_filename
from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import iteritems, itervalues, string_or_bytes
props = {'font-family':None, 'font-weight':'normal', 'font-style':'normal', 'font-stretch':'normal'}
def matching_rule(font, rules):
ff = font['font-family']
if not isinstance(ff, string_or_bytes):
ff = tuple(ff)[0]
family = icu_lower(ff)
wt = font['font-weight']
style = font['font-style']
stretch = font['font-stretch']
for rule in rules:
if rule['font-style'] == style and rule['font-stretch'] == stretch and rule['font-weight'] == wt:
ff = rule['font-family']
if not isinstance(ff, string_or_bytes):
ff = tuple(ff)[0]
if icu_lower(ff) == family:
return rule
def format_fallback_match_report(matched_font, font_family, css_font, report):
msg = _('Could not find a font in the "%s" family exactly matching the CSS font specification,'
' will embed a fallback font instead. CSS font specification:') % font_family
msg += '\n\n* font-weight: %s' % css_font.get('font-weight', 'normal')
msg += '\n* font-style: %s' % css_font.get('font-style', 'normal')
msg += '\n* font-stretch: %s' % css_font.get('font-stretch', 'normal')
msg += '\n\n' + _('Matched font specification:')
msg += '\n' + matched_font['path']
msg += '\n\n* font-weight: %s' % matched_font.get('font-weight', 'normal').strip()
msg += '\n* font-style: %s' % matched_font.get('font-style', 'normal').strip()
msg += '\n* font-stretch: %s' % matched_font.get('font-stretch', 'normal').strip()
report(msg)
report('')
def stretch_as_number(val):
try:
return int(val)
except Exception:
pass
try:
return ('ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed',
'normal', 'semi-expanded', 'expanded', 'extra-expanded',
'ultra-expanded').index(val)
except Exception:
return 4 # normal
def filter_by_stretch(fonts, val):
val = stretch_as_number(val)
stretch_map = [stretch_as_number(f['font-stretch']) for f in fonts]
equal = [f for i, f in enumerate(fonts) if stretch_map[i] == val]
if equal:
return equal
condensed = [i for i in range(len(fonts)) if stretch_map[i] <= 4]
expanded = [i for i in range(len(fonts)) if stretch_map[i] > 4]
if val <= 4:
candidates = condensed or expanded
else:
candidates = expanded or condensed
distance_map = {i:abs(stretch_map[i] - val) for i in candidates}
min_dist = min(itervalues(distance_map))
return [fonts[i] for i in candidates if distance_map[i] == min_dist]
def filter_by_style(fonts, val):
order = {
'normal':('normal', 'oblique', 'italic'),
'italic':('italic', 'oblique', 'normal'),
'oblique':('oblique', 'italic', 'normal'),
}
if val not in order:
val = 'normal'
for q in order[val]:
ans = [f for f in fonts if f['font-style'] == q]
if ans:
return ans
return fonts
def weight_as_number(wt):
try:
return int(wt)
except Exception:
return {'normal':400, 'bold':700}.get(wt, 400)
def filter_by_weight(fonts, val):
val = weight_as_number(val)
weight_map = [weight_as_number(f['font-weight']) for f in fonts]
equal = [f for i, f in enumerate(fonts) if weight_map[i] == val]
if equal:
return equal
rmap = {w:i for i, w in enumerate(weight_map)}
below = [i for i in range(len(fonts)) if weight_map[i] < val]
above = [i for i in range(len(fonts)) if weight_map[i] > val]
if val < 400:
candidates = below or above
elif val > 500:
candidates = above or below
elif val == 400:
if 500 in rmap:
return [fonts[rmap[500]]]
candidates = below or above
else:
if 400 in rmap:
return [fonts[rmap[400]]]
candidates = below or above
distance_map = {i:abs(weight_map[i] - val) for i in candidates}
min_dist = min(itervalues(distance_map))
return [fonts[i] for i in candidates if distance_map[i] == min_dist]
def find_matching_font(fonts, weight='normal', style='normal', stretch='normal'):
# See https://www.w3.org/TR/css-fonts-3/#font-style-matching
# We dont implement the unicode character range testing
# We also dont implement bolder, lighter
for f, q in ((filter_by_stretch, stretch), (filter_by_style, style), (filter_by_weight, weight)):
fonts = f(fonts, q)
if len(fonts) == 1:
return fonts[0]
return fonts[0]
def do_embed(container, font, report):
from calibre.utils.fonts.scanner import font_scanner
report('Embedding font {} from {}'.format(font['full_name'], font['path']))
data = font_scanner.get_font_data(font)
fname = font['full_name']
ext = 'otf' if font['is_otf'] else 'ttf'
fname = ascii_filename(fname).replace(' ', '-').replace('(', '').replace(')', '')
item = container.generate_item('fonts/%s.%s'%(fname, ext), id_prefix='font')
name = container.href_to_name(item.get('href'), container.opf_name)
with container.open(name, 'wb') as out:
out.write(data)
href = container.name_to_href(name)
rule = {k:font.get(k, v) for k, v in iteritems(props)}
rule['src'] = 'url(%s)' % href
rule['name'] = name
return rule
def embed_font(container, font, all_font_rules, report, warned):
rule = matching_rule(font, all_font_rules)
ff = font['font-family']
if not isinstance(ff, string_or_bytes):
ff = ff[0]
if rule is None:
from calibre.utils.fonts.scanner import NoFonts, font_scanner
if ff in warned:
return
try:
fonts = font_scanner.fonts_for_family(ff)
except NoFonts:
report(_('Failed to find fonts for family: %s, not embedding') % ff)
warned.add(ff)
return
wt = weight_as_number(font.get('font-weight'))
for f in fonts:
if f['weight'] == wt and f['font-style'] == font.get('font-style', 'normal') and f['font-stretch'] == font.get('font-stretch', 'normal'):
return do_embed(container, f, report)
f = find_matching_font(fonts, font.get('font-weight', '400'), font.get('font-style', 'normal'), font.get('font-stretch', 'normal'))
wkey = ('fallback-font', ff, wt, font.get('font-style'), font.get('font-stretch'))
if wkey not in warned:
warned.add(wkey)
format_fallback_match_report(f, ff, font, report)
return do_embed(container, f, report)
else:
name = rule['src']
href = container.name_to_href(name)
rule = {k:ff if k == 'font-family' else rule.get(k, v) for k, v in iteritems(props)}
rule['src'] = 'url(%s)' % href
rule['name'] = name
return rule
def font_key(font):
return tuple(map(font.get, 'font-family font-weight font-style font-stretch'.split()))
def embed_all_fonts(container, stats, report):
all_font_rules = tuple(itervalues(stats.all_font_rules))
warned = set()
rules, nrules = [], {}
modified = set()
for path in container.spine_items:
name = container.abspath_to_name(path)
fu = stats.font_usage_map.get(name, None)
fs = stats.font_spec_map.get(name, None)
fr = stats.font_rule_map.get(name, None)
if None in (fs, fu, fr):
continue
fs = {icu_lower(x) for x in fs}
for font in itervalues(fu):
if icu_lower(font['font-family']) not in fs:
continue
rule = matching_rule(font, fr)
if rule is None:
# This font was not already embedded in this HTML file, before
# processing started
key = font_key(font)
rule = nrules.get(key)
if rule is None:
rule = embed_font(container, font, all_font_rules, report, warned)
if rule is not None:
rules.append(rule)
nrules[key] = rule
modified.add(name)
stats.font_stats[rule['name']] = font['text']
else:
# This font was previously embedded by this code, update its stats
stats.font_stats[rule['name']] |= font['text']
modified.add(name)
if not rules:
report(_('No embeddable fonts found'))
return False
# Write out CSS
rules = [';\n\t'.join('{}: {}'.format(
k, '"%s"' % v if k == 'font-family' else v) for k, v in iteritems(rulel) if (k in props and props[k] != v and v != '400') or k == 'src')
for rulel in rules]
css = '\n\n'.join(['@font-face {\n\t%s\n}' % r for r in rules])
item = container.generate_item('fonts.css', id_prefix='font_embed')
name = container.href_to_name(item.get('href'), container.opf_name)
with container.open(name, 'wb') as out:
out.write(css.encode('utf-8'))
# Add link to CSS in all files that need it
for spine_name in modified:
root = container.parsed(spine_name)
try:
head = root.xpath('//*[local-name()="head"][1]')[0]
except IndexError:
head = root.makeelement(XHTML('head'))
root.insert(0, head)
head.tail = '\n'
head.text = '\n '
href = container.name_to_href(name, spine_name)
etree.SubElement(head, XHTML('link'), rel='stylesheet', type='text/css', href=href).tail = '\n'
container.dirty(spine_name)
return True
if __name__ == '__main__':
from calibre.ebooks.oeb.polish.container import get_container
from calibre.ebooks.oeb.polish.stats import StatsCollector
from calibre.utils.logging import default_log
default_log.filter_level = default_log.DEBUG
inbook = sys.argv[-1]
ebook = get_container(inbook, default_log)
report = []
stats = StatsCollector(ebook, do_embed=True)
embed_all_fonts(ebook, stats, report.append)
outbook, ext = inbook.rpartition('.')[0::2]
outbook += '_subset.'+ext
ebook.commit(outbook)
prints('\nReport:')
for msg in report:
prints(msg)
print()
prints('Output written to:', outbook)
| 10,669 | Python | .py | 244 | 35.942623 | 149 | 0.604062 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,378 | parsing.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/parsing.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
import html5_parser
from lxml.etree import Element as LxmlElement
from calibre.ebooks.chardet import strip_encoding_declarations, xml_to_unicode
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.xml_parse import safe_xml_fromstring
try:
from calibre_extensions.fast_html_entities import replace_all_entities
except ImportError:
def replace_all_entities(raw, keep_xml_entities: bool = False):
from calibre import xml_replace_entities
return xml_replace_entities(raw)
XHTML_NS = 'http://www.w3.org/1999/xhtml'
def parse_html5(raw, decoder=None, log=None, discard_namespaces=False, line_numbers=True, linenumber_attribute=None, replace_entities=True, fix_newlines=True):
if isinstance(raw, bytes):
raw = xml_to_unicode(raw)[0] if decoder is None else decoder(raw)
if replace_entities:
raw = replace_all_entities(raw, True)
if fix_newlines:
raw = raw.replace('\r\n', '\n').replace('\r', '\n')
raw = clean_xml_chars(raw)
root = html5_parser.parse(raw, maybe_xhtml=not discard_namespaces, line_number_attr=linenumber_attribute, keep_doctype=False, sanitize_names=True)
if (discard_namespaces and root.tag != 'html') or (
not discard_namespaces and (root.tag != '{{{}}}{}'.format(XHTML_NS, 'html') or root.prefix)):
raise ValueError(f'Failed to parse correctly, root has tag: {root.tag} and prefix: {root.prefix}')
return root
def handle_private_entities(data):
# Process private entities
pre = ''
idx = data.find('<html')
if idx == -1:
idx = data.find('<HTML')
if idx > -1:
pre = data[:idx]
num_of_nl_in_pre = pre.count('\n')
if '<!DOCTYPE' in pre: # Handle user defined entities
user_entities = {}
for match in re.finditer(r'<!ENTITY\s+(\S+)\s+([^>]+)', pre):
val = match.group(2)
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
user_entities[match.group(1)] = val
if user_entities:
data = ('\n' * num_of_nl_in_pre) + data[idx:]
pat = re.compile(r'&(%s);'%('|'.join(user_entities.keys())))
data = pat.sub(lambda m:user_entities[m.group(1)], data)
return data
def parse(raw, decoder=None, log=None, line_numbers=True, linenumber_attribute=None, replace_entities=True, force_html5_parse=False):
if isinstance(raw, bytes):
raw = xml_to_unicode(raw)[0] if decoder is None else decoder(raw)
raw = handle_private_entities(raw)
if replace_entities:
raw = replace_all_entities(raw, True)
raw = raw.replace('\r\n', '\n').replace('\r', '\n')
# Remove any preamble before the opening html tag as it can cause problems,
# especially doctypes, preserve the original linenumbers by inserting
# newlines at the start
pre = raw[:2048]
for match in re.finditer(r'<\s*html', pre, flags=re.I):
newlines = raw.count('\n', 0, match.start())
raw = ('\n' * newlines) + raw[match.start():]
break
raw = strip_encoding_declarations(raw, limit=10*1024, preserve_newlines=True)
if force_html5_parse:
return parse_html5(raw, log=log, line_numbers=line_numbers, linenumber_attribute=linenumber_attribute, replace_entities=False, fix_newlines=False)
try:
ans = safe_xml_fromstring(raw, recover=False)
if ans.tag != '{%s}html' % XHTML_NS:
raise ValueError('Root tag is not <html> in the XHTML namespace')
if linenumber_attribute:
for elem in ans.iter(LxmlElement):
if elem.sourceline is not None:
elem.set(linenumber_attribute, str(elem.sourceline))
return ans
except Exception:
if log is not None:
log.exception('Failed to parse as XML, parsing as tag soup')
return parse_html5(raw, log=log, line_numbers=line_numbers, linenumber_attribute=linenumber_attribute, replace_entities=False, fix_newlines=False)
if __name__ == '__main__':
from lxml import etree
root = parse_html5('\n<html><head><title>a\n</title><p b=1 c=2 a=0> \n<b>b<svg ass="wipe" viewbox="0">', discard_namespaces=False)
print(etree.tostring(root, encoding='utf-8'))
print()
| 4,420 | Python | .py | 86 | 43.965116 | 159 | 0.651842 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,379 | toc.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/toc.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from collections import Counter, OrderedDict
from functools import partial
from operator import itemgetter
from lxml import etree
from lxml.builder import ElementMaker
from calibre import __version__
from calibre.ebooks.oeb.base import EPUB_NS, NCX, NCX_NS, OEB_DOCS, XHTML, XHTML_NS, XML, XML_NS, XPath, serialize, uuid_id, xml2text
from calibre.ebooks.oeb.polish.errors import MalformedMarkup
from calibre.ebooks.oeb.polish.opf import get_book_language, set_guide_item
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_xml_tree
from calibre.ebooks.oeb.polish.utils import extract, guess_type
from calibre.translations.dynamic import translate
from calibre.utils.localization import canonicalize_lang, get_lang, lang_as_iso639_1
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
from polyglot.urllib import urlparse
ns = etree.FunctionNamespace('calibre_xpath_extensions')
ns.prefix = 'calibre'
ns['lower-case'] = lambda c, x: x.lower() if hasattr(x, 'lower') else x
class TOC:
toc_title = None
def __init__(self, title=None, dest=None, frag=None):
self.title, self.dest, self.frag = title, dest, frag
self.dest_exists = self.dest_error = None
if self.title:
self.title = self.title.strip()
self.parent = None
self.children = []
self.page_list = []
def add(self, title, dest, frag=None):
c = TOC(title, dest, frag)
self.children.append(c)
c.parent = self
return c
def remove(self, child):
self.children.remove(child)
child.parent = None
def remove_from_parent(self):
if self.parent is None:
return
idx = self.parent.children.index(self)
for child in reversed(self.children):
child.parent = self.parent
self.parent.children.insert(idx, child)
self.parent.children.remove(self)
self.parent = None
def __iter__(self):
yield from self.children
def __len__(self):
return len(self.children)
def iterdescendants(self, level=None):
gc_level = None if level is None else level + 1
for child in self:
if level is None:
yield child
else:
yield level, child
yield from child.iterdescendants(level=gc_level)
def remove_duplicates(self, only_text=True):
seen = set()
remove = []
for child in self:
key = child.title if only_text else (child.title, child.dest, (child.frag or None))
if key in seen:
remove.append(child)
else:
seen.add(key)
child.remove_duplicates()
for child in remove:
self.remove(child)
@property
def depth(self):
"""The maximum depth of the navigation tree rooted at this node."""
try:
return max(node.depth for node in self) + 1
except ValueError:
return 1
@property
def last_child(self):
return self.children[-1] if self.children else None
def get_lines(self, lvl=0):
frag = ('#'+self.frag) if self.frag else ''
ans = [('\t'*lvl) + 'TOC: %s --> %s%s'%(self.title, self.dest, frag)]
for child in self:
ans.extend(child.get_lines(lvl+1))
return ans
def __str__(self):
return '\n'.join(self.get_lines())
def to_dict(self, node_counter=None):
ans = {
'title':self.title, 'dest':self.dest, 'frag':self.frag,
'children':[c.to_dict(node_counter) for c in self.children]
}
if self.dest_exists is not None:
ans['dest_exists'] = self.dest_exists
if self.dest_error is not None:
ans['dest_error'] = self.dest_error
if node_counter is not None:
ans['id'] = next(node_counter)
return ans
@property
def as_dict(self):
return self.to_dict()
def child_xpath(tag, name):
return tag.xpath('./*[calibre:lower-case(local-name()) = "%s"]'%name)
def add_from_navpoint(container, navpoint, parent, ncx_name):
dest = frag = text = None
nl = child_xpath(navpoint, 'navlabel')
if nl:
nl = nl[0]
text = ''
for txt in child_xpath(nl, 'text'):
text += etree.tostring(txt, method='text',
encoding='unicode', with_tail=False)
content = child_xpath(navpoint, 'content')
if content:
content = content[0]
href = content.get('src', None)
if href:
dest = container.href_to_name(href, base=ncx_name)
frag = urlparse(href).fragment or None
return parent.add(text or None, dest or None, frag or None)
def process_ncx_node(container, node, toc_parent, ncx_name):
for navpoint in node.xpath('./*[calibre:lower-case(local-name()) = "navpoint"]'):
child = add_from_navpoint(container, navpoint, toc_parent, ncx_name)
if child is not None:
process_ncx_node(container, navpoint, child, ncx_name)
def parse_ncx(container, ncx_name):
root = container.parsed(ncx_name)
toc_root = TOC()
navmaps = root.xpath('//*[calibre:lower-case(local-name()) = "navmap"]')
if navmaps:
process_ncx_node(container, navmaps[0], toc_root, ncx_name)
toc_root.lang = toc_root.uid = None
for attr, val in iteritems(root.attrib):
if attr.endswith('lang'):
toc_root.lang = str(val)
break
for uid in root.xpath('//*[calibre:lower-case(local-name()) = "meta" and @name="dtb:uid"]/@content'):
if uid:
toc_root.uid = str(uid)
break
for pl in root.xpath('//*[calibre:lower-case(local-name()) = "pagelist"]'):
for pt in pl.xpath('descendant::*[calibre:lower-case(local-name()) = "pagetarget"]'):
pagenum = pt.get('value')
if pagenum:
href = pt.xpath('descendant::*[calibre:lower-case(local-name()) = "content"]/@src')
if href:
dest = container.href_to_name(href[0], base=ncx_name)
frag = urlparse(href[0]).fragment or None
toc_root.page_list.append({'dest': dest, 'pagenum': pagenum, 'frag': frag})
return toc_root
def add_from_li(container, li, parent, nav_name):
dest = frag = text = None
for x in li.iterchildren(XHTML('a'), XHTML('span')):
text = etree.tostring(x, method='text', encoding='unicode', with_tail=False).strip() or ' '.join(x.xpath('descendant-or-self::*/@title')).strip()
href = x.get('href')
if href:
dest = nav_name if href.startswith('#') else container.href_to_name(href, base=nav_name)
frag = urlparse(href).fragment or None
break
return parent.add(text or None, dest or None, frag or None)
def first_child(parent, tagname):
try:
return next(parent.iterchildren(tagname))
except StopIteration:
return None
def process_nav_node(container, node, toc_parent, nav_name):
for li in node.iterchildren(XHTML('li')):
child = add_from_li(container, li, toc_parent, nav_name)
ol = first_child(li, XHTML('ol'))
if child is not None and ol is not None:
process_nav_node(container, ol, child, nav_name)
def parse_nav(container, nav_name):
root = container.parsed(nav_name)
toc_root = TOC()
toc_root.lang = toc_root.uid = None
seen_toc = seen_pagelist = False
et = '{%s}type' % EPUB_NS
for nav in XPath('descendant::h:nav[@epub:type]')(root):
nt = nav.get(et)
if nt == 'toc' and not seen_toc:
ol = first_child(nav, XHTML('ol'))
if ol is not None:
seen_toc = True
process_nav_node(container, ol, toc_root, nav_name)
for h in nav.iterchildren(*map(XHTML, 'h1 h2 h3 h4 h5 h6'.split())):
text = etree.tostring(h, method='text', encoding='unicode', with_tail=False) or h.get('title')
if text:
toc_root.toc_title = text
break
elif nt == 'page-list' and not seen_pagelist:
ol = first_child(nav, XHTML('ol'))
if ol is not None and not seen_pagelist:
seen_pagelist = True
for li in ol.iterchildren(XHTML('li')):
for a in li.iterchildren(XHTML('a')):
href = a.get('href')
if href:
text = (etree.tostring(a, method='text', encoding='unicode', with_tail=False) or a.get('title')).strip()
if text:
dest = nav_name if href.startswith('#') else container.href_to_name(href, base=nav_name)
frag = urlparse(href).fragment or None
toc_root.page_list.append({'dest': dest, 'pagenum': text, 'frag': frag})
return toc_root
def verify_toc_destinations(container, toc):
anchor_map = {}
anchor_xpath = XPath('//*/@id|//h:a/@name')
for item in toc.iterdescendants():
name = item.dest
if not name:
item.dest_exists = False
item.dest_error = _('No file named %s exists')%name
continue
try:
root = container.parsed(name)
except KeyError:
item.dest_exists = False
item.dest_error = _('No file named %s exists')%name
continue
if not hasattr(root, 'xpath'):
item.dest_exists = False
item.dest_error = _('No HTML file named %s exists')%name
continue
if not item.frag:
item.dest_exists = True
continue
if name not in anchor_map:
anchor_map[name] = frozenset(anchor_xpath(root))
item.dest_exists = item.frag in anchor_map[name]
if not item.dest_exists:
item.dest_error = _(
'The anchor %(a)s does not exist in file %(f)s')%dict(
a=item.frag, f=name)
def find_existing_ncx_toc(container):
toc = container.opf_xpath('//opf:spine/@toc')
if toc:
toc = container.manifest_id_map.get(toc[0], None)
if not toc:
ncx = guess_type('a.ncx')
toc = container.manifest_type_map.get(ncx, [None])[0]
return toc or None
def find_existing_nav_toc(container):
for name in container.manifest_items_with_property('nav'):
return name
def mark_as_nav(container, name):
if container.opf_version_parsed.major > 2:
container.apply_unique_properties(name, 'nav')
def get_x_toc(container, find_toc, parse_toc, verify_destinations=True):
def empty_toc():
ans = TOC()
ans.lang = ans.uid = None
return ans
toc = find_toc(container)
ans = empty_toc() if toc is None or not container.has_name(toc) else parse_toc(container, toc)
ans.toc_file_name = toc if toc and container.has_name(toc) else None
if verify_destinations:
verify_toc_destinations(container, ans)
return ans
def get_toc(container, verify_destinations=True):
ver = container.opf_version_parsed
if ver.major < 3:
return get_x_toc(container, find_existing_ncx_toc, parse_ncx, verify_destinations=verify_destinations)
else:
ans = get_x_toc(container, find_existing_nav_toc, parse_nav, verify_destinations=verify_destinations)
if len(ans) == 0:
ans = get_x_toc(container, find_existing_ncx_toc, parse_ncx, verify_destinations=verify_destinations)
return ans
def get_guide_landmarks(container):
for ref in container.opf_xpath('./opf:guide/opf:reference'):
href, title, rtype = ref.get('href'), ref.get('title'), ref.get('type')
href, frag = href.partition('#')[::2]
name = container.href_to_name(href, container.opf_name)
if container.has_name(name):
yield {'dest':name, 'frag':frag, 'title':title or '', 'type':rtype or ''}
def get_nav_landmarks(container):
nav = find_existing_nav_toc(container)
if nav and container.has_name(nav):
root = container.parsed(nav)
et = '{%s}type' % EPUB_NS
for elem in root.iterdescendants(XHTML('nav')):
if elem.get(et) == 'landmarks':
for li in elem.iterdescendants(XHTML('li')):
for a in li.iterdescendants(XHTML('a')):
href, rtype = a.get('href'), a.get(et)
if href:
title = etree.tostring(a, method='text', encoding='unicode', with_tail=False).strip()
href, frag = href.partition('#')[::2]
name = container.href_to_name(href, nav)
if container.has_name(name):
yield {'dest':name, 'frag':frag, 'title':title or '', 'type':rtype or ''}
break
def get_landmarks(container):
ver = container.opf_version_parsed
if ver.major < 3:
return list(get_guide_landmarks(container))
ans = list(get_nav_landmarks(container))
if len(ans) == 0:
ans = list(get_guide_landmarks(container))
return ans
def ensure_id(elem, all_ids):
elem_id = elem.get('id')
if elem_id:
return False, elem_id
if elem.tag == XHTML('a'):
anchor = elem.get('name', None)
if anchor:
elem.set('id', anchor)
return False, anchor
c = 0
while True:
c += 1
q = f'toc_{c}'
if q not in all_ids:
elem.set('id', q)
all_ids.add(q)
break
return True, elem.get('id')
def elem_to_toc_text(elem, prefer_title=False):
text = xml2text(elem).strip()
if prefer_title:
text = elem.get('title', '').strip() or text
if not text:
text = elem.get('title', '')
if not text:
text = elem.get('alt', '')
text = re.sub(r'\s+', ' ', text.strip())
text = text[:1000].strip()
if not text:
text = _('(Untitled)')
return text
def item_at_top(elem):
try:
body = XPath('//h:body')(elem.getroottree().getroot())[0]
except (TypeError, IndexError, KeyError, AttributeError):
return False
tree = body.getroottree()
path = tree.getpath(elem)
for el in body.iterdescendants(etree.Element):
epath = tree.getpath(el)
if epath == path:
break
try:
if el.tag.endswith('}img') or (el.text and el.text.strip()):
return False
except:
return False
if not path.startswith(epath):
# Only check tail of non-parent elements
if el.tail and el.tail.strip():
return False
return True
def from_xpaths(container, xpaths, prefer_title=False):
'''
Generate a Table of Contents from a list of XPath expressions. Each
expression in the list corresponds to a level of the generate ToC. For
example: :code:`['//h:h1', '//h:h2', '//h:h3']` will generate a three level
Table of Contents from the ``<h1>``, ``<h2>`` and ``<h3>`` tags.
'''
tocroot = TOC()
xpaths = [XPath(xp) for xp in xpaths]
# Find those levels that have no elements in all spine items
maps = OrderedDict()
empty_levels = {i+1 for i, xp in enumerate(xpaths)}
for spinepath in container.spine_items:
name = container.abspath_to_name(spinepath)
root = container.parsed(name)
level_item_map = maps[name] = {i+1:frozenset(xp(root)) for i, xp in enumerate(xpaths)}
for lvl, elems in iteritems(level_item_map):
if elems:
empty_levels.discard(lvl)
# Remove empty levels from all level_maps
if empty_levels:
for name, lmap in tuple(iteritems(maps)):
lmap = {lvl:items for lvl, items in iteritems(lmap) if lvl not in empty_levels}
lmap = sorted(iteritems(lmap), key=itemgetter(0))
lmap = {i+1:items for i, (l, items) in enumerate(lmap)}
maps[name] = lmap
node_level_map = {tocroot: 0}
def parent_for_level(child_level):
limit = child_level - 1
def process_node(node):
child = node.last_child
if child is None:
return node
lvl = node_level_map[child]
return node if lvl > limit else child if lvl == limit else process_node(child)
return process_node(tocroot)
for name, level_item_map in iteritems(maps):
root = container.parsed(name)
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
item_dirtied = False
all_ids = set(root.xpath('//*/@id'))
for item in root.iterdescendants(etree.Element):
lvl = item_level_map.get(item, None)
if lvl is None:
continue
text = elem_to_toc_text(item, prefer_title)
parent = parent_for_level(lvl)
if item_at_top(item):
dirtied, elem_id = False, None
else:
dirtied, elem_id = ensure_id(item, all_ids)
item_dirtied = dirtied or item_dirtied
toc = parent.add(text, name, elem_id)
node_level_map[toc] = lvl
toc.dest_exists = True
if item_dirtied:
container.commit_item(name, keep_parsed=True)
return tocroot
def from_links(container):
'''
Generate a Table of Contents from links in the book.
'''
toc = TOC()
link_path = XPath('//h:a[@href]')
seen_titles, seen_dests = set(), set()
for name, is_linear in container.spine_names:
root = container.parsed(name)
for a in link_path(root):
href = a.get('href')
if not href or not href.strip():
continue
frag = None
if href.startswith('#'):
dest = name
frag = href[1:]
else:
href, _, frag = href.partition('#')
dest = container.href_to_name(href, base=name)
frag = frag or None
if (dest, frag) in seen_dests:
continue
seen_dests.add((dest, frag))
text = elem_to_toc_text(a)
if text in seen_titles:
continue
seen_titles.add(text)
toc.add(text, dest, frag=frag)
verify_toc_destinations(container, toc)
for child in toc:
if not child.dest_exists:
toc.remove(child)
return toc
def find_text(node):
LIMIT = 200
pat = re.compile(r'\s+')
for child in node:
if isinstance(child, etree._Element):
text = xml2text(child).strip()
text = pat.sub(' ', text)
if len(text) < 1:
continue
if len(text) > LIMIT:
# Look for less text in a child of this node, recursively
ntext = find_text(child)
return ntext or (text[:LIMIT] + '...')
else:
return text
def from_files(container):
'''
Generate a Table of Contents from files in the book.
'''
toc = TOC()
for i, spinepath in enumerate(container.spine_items):
name = container.abspath_to_name(spinepath)
root = container.parsed(name)
body = XPath('//h:body')(root)
if not body:
continue
text = find_text(body[0])
if not text:
text = name.rpartition('/')[-1]
if i == 0 and text.rpartition('.')[0].lower() in {'titlepage', 'cover'}:
text = _('Cover')
toc.add(text, name)
return toc
def node_from_loc(root, locs, totals=None):
node = root.xpath('//*[local-name()="body"]')[0]
for i, loc in enumerate(locs):
children = tuple(node.iterchildren(etree.Element))
if totals is not None and totals[i] != len(children):
raise MalformedMarkup()
node = children[loc]
return node
def add_id(container, name, loc, totals=None):
root = container.parsed(name)
try:
node = node_from_loc(root, loc, totals=totals)
except MalformedMarkup:
# The webkit HTML parser and the container parser have yielded
# different node counts, this can happen if the file is valid XML
# but contains constructs like nested <p> tags. So force parse it
# with the HTML 5 parser and try again.
raw = container.raw_data(name)
root = container.parse_xhtml(raw, fname=name, force_html5_parse=True)
try:
node = node_from_loc(root, loc, totals=totals)
except MalformedMarkup:
raise MalformedMarkup(_('The file %s has malformed markup. Try running the Fix HTML tool'
' before editing.') % name)
container.replace(name, root)
if not node.get('id'):
ensure_id(node, set(root.xpath('//*/@id')))
container.commit_item(name, keep_parsed=True)
return node.get('id')
def create_ncx(toc, to_href, btitle, lang, uid):
lang = lang.replace('_', '-')
ncx = etree.Element(NCX('ncx'),
attrib={'version': '2005-1', XML('lang'): lang},
nsmap={None: NCX_NS})
head = etree.SubElement(ncx, NCX('head'))
etree.SubElement(head, NCX('meta'),
name='dtb:uid', content=str(uid))
etree.SubElement(head, NCX('meta'),
name='dtb:depth', content=str(toc.depth))
generator = ''.join(['calibre (', __version__, ')'])
etree.SubElement(head, NCX('meta'),
name='dtb:generator', content=generator)
etree.SubElement(head, NCX('meta'), name='dtb:totalPageCount', content='0')
etree.SubElement(head, NCX('meta'), name='dtb:maxPageNumber', content='0')
title = etree.SubElement(ncx, NCX('docTitle'))
text = etree.SubElement(title, NCX('text'))
text.text = btitle
navmap = etree.SubElement(ncx, NCX('navMap'))
spat = re.compile(r'\s+')
play_order = Counter()
def process_node(xml_parent, toc_parent):
for child in toc_parent:
play_order['c'] += 1
point = etree.SubElement(xml_parent, NCX('navPoint'), id='num_%d' % play_order['c'],
playOrder=str(play_order['c']))
label = etree.SubElement(point, NCX('navLabel'))
title = child.title
if title:
title = spat.sub(' ', title)
etree.SubElement(label, NCX('text')).text = title
if child.dest:
href = to_href(child.dest)
if child.frag:
href += '#'+child.frag
etree.SubElement(point, NCX('content'), src=href)
process_node(point, child)
process_node(navmap, toc)
return ncx
def commit_ncx_toc(container, toc, lang=None, uid=None):
tocname = find_existing_ncx_toc(container)
if tocname is None:
item = container.generate_item('toc.ncx', id_prefix='toc')
tocname = container.href_to_name(item.get('href'), base=container.opf_name)
ncx_id = item.get('id')
[s.set('toc', ncx_id) for s in container.opf_xpath('//opf:spine')]
if not lang:
lang = get_lang()
for l in container.opf_xpath('//dc:language'):
l = canonicalize_lang(xml2text(l).strip())
if l:
lang = l
lang = lang_as_iso639_1(l) or l
break
lang = lang_as_iso639_1(lang) or lang
if not uid:
uid = uuid_id()
eid = container.opf.get('unique-identifier', None)
if eid:
m = container.opf_xpath('//*[@id="%s"]'%eid)
if m:
uid = xml2text(m[0])
title = _('Table of Contents')
m = container.opf_xpath('//dc:title')
if m:
x = xml2text(m[0]).strip()
title = x or title
to_href = partial(container.name_to_href, base=tocname)
root = create_ncx(toc, to_href, title, lang, uid)
container.replace(tocname, root)
container.pretty_print.add(tocname)
def ensure_single_nav_of_type(root, ntype='toc'):
et = '{%s}type' % EPUB_NS
navs = [n for n in root.iterdescendants(XHTML('nav')) if n.get(et) == ntype]
for x in navs[1:]:
extract(x)
if navs:
nav = navs[0]
tail = nav.tail
attrib = dict(nav.attrib)
nav.clear()
nav.attrib.update(attrib)
nav.tail = tail
else:
nav = root.makeelement(XHTML('nav'))
first_child(root, XHTML('body')).append(nav)
nav.set('{%s}type' % EPUB_NS, ntype)
return nav
def ensure_container_has_nav(container, lang=None, previous_nav=None):
tocname = find_existing_nav_toc(container)
if previous_nav is not None:
nav_name = container.href_to_name(previous_nav[0])
if nav_name and container.exists(nav_name):
tocname = nav_name
container.apply_unique_properties(tocname, 'nav')
if tocname is None:
item = container.generate_item('nav.xhtml', id_prefix='nav')
item.set('properties', 'nav')
tocname = container.href_to_name(item.get('href'), base=container.opf_name)
if previous_nav is not None:
root = previous_nav[1]
else:
root = container.parse_xhtml(P('templates/new_nav.html', data=True).decode('utf-8'))
container.replace(tocname, root)
else:
root = container.parsed(tocname)
if lang:
lang = lang_as_iso639_1(lang) or lang
root.set('lang', lang)
root.set('{%s}lang' % XML_NS, lang)
return tocname, root
def collapse_li(parent):
for li in parent.iterdescendants(XHTML('li')):
if len(li) == 1:
li.text = None
li[0].tail = None
def create_nav_li(container, ol, entry, tocname):
li = ol.makeelement(XHTML('li'))
ol.append(li)
a = li.makeelement(XHTML('a'))
li.append(a)
href = container.name_to_href(entry['dest'], tocname)
if entry['frag']:
href += '#' + entry['frag']
a.set('href', href)
return a
def set_landmarks(container, root, tocname, landmarks):
nav = ensure_single_nav_of_type(root, 'landmarks')
nav.set('hidden', '')
ol = nav.makeelement(XHTML('ol'))
nav.append(ol)
for entry in landmarks:
if entry['type'] and container.has_name(entry['dest']) and container.mime_map[entry['dest']] in OEB_DOCS:
a = create_nav_li(container, ol, entry, tocname)
a.set('{%s}type' % EPUB_NS, entry['type'])
a.text = entry['title'] or None
pretty_xml_tree(nav)
collapse_li(nav)
def commit_nav_toc(container, toc, lang=None, landmarks=None, previous_nav=None):
tocname, root = ensure_container_has_nav(container, lang=lang, previous_nav=previous_nav)
nav = ensure_single_nav_of_type(root, 'toc')
if toc.toc_title:
nav.append(nav.makeelement(XHTML('h1')))
nav[-1].text = toc.toc_title
rnode = nav.makeelement(XHTML('ol'))
nav.append(rnode)
to_href = partial(container.name_to_href, base=tocname)
spat = re.compile(r'\s+')
def process_node(xml_parent, toc_parent):
for child in toc_parent:
li = xml_parent.makeelement(XHTML('li'))
xml_parent.append(li)
title = child.title or ''
title = spat.sub(' ', title).strip()
a = li.makeelement(XHTML('a' if child.dest else 'span'))
a.text = title
li.append(a)
if child.dest:
href = to_href(child.dest)
if child.frag:
href += '#'+child.frag
a.set('href', href)
if len(child):
ol = li.makeelement(XHTML('ol'))
li.append(ol)
process_node(ol, child)
process_node(rnode, toc)
pretty_xml_tree(nav)
collapse_li(nav)
nav.tail = '\n'
if toc.page_list:
nav = ensure_single_nav_of_type(root, 'page-list')
nav.set('hidden', '')
ol = nav.makeelement(XHTML('ol'))
nav.append(ol)
for entry in toc.page_list:
if container.has_name(entry['dest']) and container.mime_map[entry['dest']] in OEB_DOCS:
a = create_nav_li(container, ol, entry, tocname)
a.text = str(entry['pagenum'])
pretty_xml_tree(nav)
collapse_li(nav)
container.replace(tocname, root)
def commit_toc(container, toc, lang=None, uid=None):
commit_ncx_toc(container, toc, lang=lang, uid=uid)
if container.opf_version_parsed.major > 2:
commit_nav_toc(container, toc, lang=lang)
def remove_names_from_toc(container, names):
changed = []
names = frozenset(names)
for find_toc, parse_toc, commit_toc in (
(find_existing_ncx_toc, parse_ncx, commit_ncx_toc),
(find_existing_nav_toc, parse_nav, commit_nav_toc),
):
toc = get_x_toc(container, find_toc, parse_toc, verify_destinations=False)
if len(toc) > 0:
remove = []
for node in toc.iterdescendants():
if node.dest in names:
remove.append(node)
if remove:
for node in reversed(remove):
node.remove_from_parent()
commit_toc(container, toc)
changed.append(find_toc(container))
return changed
def find_inline_toc(container):
for name, linear in container.spine_names:
if container.parsed(name).xpath('//*[local-name()="body" and @id="calibre_generated_inline_toc"]'):
return name
def toc_to_html(toc, container, toc_name, title, lang=None):
def process_node(html_parent, toc, level=1, indent=' ', style_level=2):
li = html_parent.makeelement(XHTML('li'))
li.tail = '\n'+ (indent*level)
html_parent.append(li)
name, frag = toc.dest, toc.frag
href = '#'
if name:
href = container.name_to_href(name, toc_name)
if frag:
href += '#' + frag
a = li.makeelement(XHTML('a'), href=href)
a.text = toc.title
li.append(a)
if len(toc) > 0:
parent = li.makeelement(XHTML('ul'))
parent.set('class', 'level%d' % (style_level))
li.append(parent)
a.tail = '\n\n' + (indent*(level+2))
parent.text = '\n'+(indent*(level+3))
parent.tail = '\n\n' + (indent*(level+1))
for child in toc:
process_node(parent, child, level+3, style_level=style_level + 1)
parent[-1].tail = '\n' + (indent*(level+2))
E = ElementMaker(namespace=XHTML_NS, nsmap={None:XHTML_NS})
html = E.html(
E.head(
E.title(title),
E.style(P('templates/inline_toc_styles.css', data=True).decode('utf-8'), type='text/css'),
),
E.body(
E.h2(title),
E.ul(),
id="calibre_generated_inline_toc",
)
)
ul = html[1][1]
ul.set('class', 'level1')
for child in toc:
process_node(ul, child)
if lang:
html.set('lang', lang)
pretty_html_tree(container, html)
return html
def create_inline_toc(container, title=None):
'''
Create an inline (HTML) Table of Contents from an existing NCX Table of Contents.
:param title: The title for this table of contents.
'''
lang = get_book_language(container)
default_title = 'Table of Contents'
if lang:
lang = lang_as_iso639_1(lang) or lang
default_title = translate(lang, default_title)
title = title or default_title
toc = get_toc(container)
if len(toc) == 0:
return None
toc_name = find_inline_toc(container)
name = toc_name
html = toc_to_html(toc, container, name, title, lang)
raw = serialize(html, 'text/html')
if name is None:
name, c = 'toc.xhtml', 0
while container.has_name(name):
c += 1
name = 'toc%d.xhtml' % c
container.add_file(name, raw, spine_index=0)
else:
with container.open(name, 'wb') as f:
f.write(raw)
set_guide_item(container, 'toc', title, name, frag='calibre_generated_inline_toc')
return name
| 32,555 | Python | .py | 793 | 31.74401 | 153 | 0.585675 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,380 | upgrade.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/upgrade.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from calibre.ebooks.conversion.plugins.epub_input import ADOBE_OBFUSCATION, IDPF_OBFUSCATION
from calibre.ebooks.metadata.opf3 import XPath
from calibre.ebooks.metadata.opf_2_to_3 import upgrade_metadata
from calibre.ebooks.oeb.base import DC, EPUB_NS, OEB_DOCS, xpath
from calibre.ebooks.oeb.parse_utils import ensure_namespace_prefixes
from calibre.ebooks.oeb.polish.opf import get_book_language
from calibre.ebooks.oeb.polish.toc import commit_nav_toc, find_existing_ncx_toc, get_landmarks, get_toc
from calibre.ebooks.oeb.polish.utils import OEB_FONTS
from calibre.utils.short_uuid import uuid4
def add_properties(item, *props):
existing = set((item.get('properties') or '').split())
existing |= set(props)
item.set('properties', ' '.join(sorted(existing)))
def fix_font_mime_types(container):
changed = False
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
mt = item.get('media-type') or ''
if mt.lower() in OEB_FONTS:
name = container.href_to_name(item.get('href'), container.opf_name)
item.set('media-type', container.guess_type(name))
changed = True
return changed
def migrate_obfuscated_fonts(container):
if not container.obfuscated_fonts:
return
name_to_elem_map = {}
for em, cr in container.iter_encryption_entries():
alg = em.get('Algorithm')
if cr is None or alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
continue
name = container.href_to_name(cr.get('URI'))
name_to_elem_map[name] = em, cr
package_id, raw_unique_identifier, idpf_key = container.read_raw_unique_identifier()
if not idpf_key:
if not package_id:
package_id = uuid4()
container.opf.set('unique-identifier', package_id)
metadata = XPath('./opf:metadata')(container.opf)[0]
ident = metadata.makeelement(DC('identifier'))
ident.text = uuid4()
metadata.append(ident)
package_id, raw_unique_identifier, idpf_key = container.read_raw_unique_identifier()
for name in tuple(container.obfuscated_fonts):
try:
em, cr = name_to_elem_map[name]
except KeyError:
container.obfuscated_fonts.pop(name)
continue
em.set('Algorithm', IDPF_OBFUSCATION)
cr.set('URI', container.name_to_href(name))
container.obfuscated_fonts[name] = (IDPF_OBFUSCATION, idpf_key)
container.commit_item('META-INF/encryption.xml')
def collect_properties(container):
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
mt = item.get('media-type') or ''
if mt.lower() not in OEB_DOCS:
continue
name = container.href_to_name(item.get('href'), container.opf_name)
try:
root = container.parsed(name)
except KeyError:
continue
root = ensure_namespace_prefixes(root, {'epub': EPUB_NS})
properties = set()
container.replace(name, root) # Ensure entities are converted
if xpath(root, '//svg:svg'):
properties.add('svg')
if xpath(root, '//h:script'):
properties.add('scripted')
if xpath(root, '//mathml:math'):
properties.add('mathml')
if xpath(root, '//epub:switch'):
properties.add('switch')
if properties:
add_properties(item, *tuple(properties))
guide_epubtype_map = {
'acknowledgements' : 'acknowledgments',
'other.afterword' : 'afterword',
'other.appendix' : 'appendix',
'other.backmatter' : 'backmatter',
'bibliography' : 'bibliography',
'text' : 'bodymatter',
'other.chapter' : 'chapter',
'colophon' : 'colophon',
'other.conclusion' : 'conclusion',
'other.contributors' : 'contributors',
'copyright-page' : 'copyright-page',
'cover' : 'cover',
'dedication' : 'dedication',
'other.division' : 'division',
'epigraph' : 'epigraph',
'other.epilogue' : 'epilogue',
'other.errata' : 'errata',
'other.footnotes' : 'footnotes',
'foreword' : 'foreword',
'other.frontmatter' : 'frontmatter',
'glossary' : 'glossary',
'other.halftitlepage': 'halftitlepage',
'other.imprint' : 'imprint',
'other.imprimatur' : 'imprimatur',
'index' : 'index',
'other.introduction' : 'introduction',
'other.landmarks' : 'landmarks',
'other.loa' : 'loa',
'loi' : 'loi',
'lot' : 'lot',
'other.lov' : 'lov',
'notes' : '',
'other.notice' : 'notice',
'other.other-credits': 'other-credits',
'other.part' : 'part',
'other.preamble' : 'preamble',
'preface' : 'preface',
'other.prologue' : 'prologue',
'other.rearnotes' : 'rearnotes',
'other.subchapter' : 'subchapter',
'title-page' : 'titlepage',
'toc' : 'toc',
'other.volume' : 'volume',
'other.warning' : 'warning'
}
def create_nav(container, toc, landmarks, previous_nav=None):
lang = get_book_language(container)
if lang == 'und':
lang = None
if landmarks:
for entry in landmarks:
entry['type'] = guide_epubtype_map.get(entry['type'].lower())
if entry['type'] == 'cover' and container.mime_map.get(entry['dest'], '').lower() in OEB_DOCS:
container.apply_unique_properties(entry['dest'], 'calibre:title-page')
commit_nav_toc(container, toc, lang=lang, landmarks=landmarks, previous_nav=previous_nav)
def epub_2_to_3(container, report, previous_nav=None, remove_ncx=True):
upgrade_metadata(container.opf)
collect_properties(container)
toc = get_toc(container)
toc_name = find_existing_ncx_toc(container)
if toc_name and remove_ncx:
container.remove_item(toc_name)
container.opf_xpath('./opf:spine')[0].attrib.pop('toc', None)
landmarks = get_landmarks(container)
for guide in container.opf_xpath('./opf:guide'):
guide.getparent().remove(guide)
create_nav(container, toc, landmarks, previous_nav)
container.opf.set('version', '3.0')
if fix_font_mime_types(container):
container.refresh_mime_map()
migrate_obfuscated_fonts(container)
container.dirty(container.opf_name)
def upgrade_book(container, report, remove_ncx=True):
if container.book_type != 'epub' or container.opf_version_parsed.major >= 3:
report(_('No upgrade needed'))
return False
epub_2_to_3(container, report, remove_ncx=remove_ncx)
report(_('Updated EPUB from version 2 to 3'))
return True
if __name__ == '__main__':
from calibre.ebooks.oeb.polish.container import get_container
from calibre.utils.logging import default_log
default_log.filter_level = default_log.DEBUG
inbook = sys.argv[-1]
ebook = get_container(inbook, default_log)
if upgrade_book(ebook, print):
outbook = inbook.rpartition('.')[0] + '-upgraded.' + inbook.rpartition('.')[-1]
ebook.commit(outbook)
print('Upgraded book written to:', outbook)
| 7,460 | Python | .py | 168 | 37.565476 | 106 | 0.625636 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,381 | cover.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/cover.py | __license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import shutil
from calibre.ebooks.oeb.base import OEB_DOCS, OPF, XLINK, XPath, xml2text
from calibre.ebooks.oeb.polish.replace import get_recommended_folders, replace_links
from calibre.utils.imghdr import identify
from polyglot.builtins import iteritems
def set_azw3_cover(container, cover_path, report, options=None):
existing_image = options is not None and options.get('existing_image', False)
name = None
found = True
for gi in container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]'):
href = gi.get('href')
name = container.href_to_name(href, container.opf_name)
container.remove_from_xml(gi)
if existing_image:
name = cover_path
found = False
else:
if name is None or not container.has_name(name):
item = container.generate_item(name='cover.jpeg', id_prefix='cover')
name = container.href_to_name(item.get('href'), container.opf_name)
found = False
href = container.name_to_href(name, container.opf_name)
guide = container.opf_xpath('//opf:guide')[0]
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), href=href, type='cover'))
if not existing_image:
with open(cover_path, 'rb') as src, container.open(name, 'wb') as dest:
shutil.copyfileobj(src, dest)
container.dirty(container.opf_name)
report(_('Cover updated') if found else _('Cover inserted'))
def get_azw3_raster_cover_name(container):
items = container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]')
if items:
return container.href_to_name(items[0].get('href'))
def mark_as_cover_azw3(container, name):
href = container.name_to_href(name, container.opf_name)
found = False
for item in container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]'):
item.set('href', href)
found = True
if not found:
for guide in container.opf_xpath('//opf:guide'):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), href=href, type='cover'))
container.dirty(container.opf_name)
def get_raster_cover_name(container):
if container.book_type == 'azw3':
return get_azw3_raster_cover_name(container)
return find_cover_image(container, strict=True)
def get_cover_page_name(container):
if container.book_type == 'azw3':
return
return find_cover_page(container)
def set_cover(container, cover_path, report=None, options=None):
'''
Set the cover of the book to the image pointed to by cover_path.
:param cover_path: Either the absolute path to an image file or the
canonical name of an image in the book. When using an image in the book,
you must also set options, see below.
:param report: An optional callable that takes a single argument. It will
be called with information about the tasks being processed.
:param options: None or a dictionary that controls how the cover is set. The dictionary can have entries:
**keep_aspect**: True or False (Preserve aspect ratio of covers in EPUB)
**no_svg**: True or False (Use an SVG cover wrapper in the EPUB titlepage)
**existing**: True or False (``cover_path`` refers to an existing image in the book)
'''
report = report or (lambda x:x)
if container.book_type == 'azw3':
set_azw3_cover(container, cover_path, report, options=options)
else:
set_epub_cover(container, cover_path, report, options=options)
def mark_as_cover(container, name):
'''
Mark the specified image as the cover image.
'''
if name not in container.mime_map:
raise ValueError('Cannot mark %s as cover as it does not exist' % name)
mt = container.mime_map[name]
if not is_raster_image(mt):
raise ValueError('Cannot mark %s as the cover image as it is not a raster image' % name)
if container.book_type == 'azw3':
mark_as_cover_azw3(container, name)
else:
mark_as_cover_epub(container, name)
###############################################################################
# The delightful EPUB cover processing
def is_raster_image(media_type):
return media_type and media_type.lower() in {
'image/png', 'image/jpeg', 'image/jpg', 'image/gif'}
COVER_TYPES = {
'coverimagestandard', 'other.ms-coverimage-standard',
'other.ms-titleimage-standard', 'other.ms-titleimage',
'other.ms-coverimage', 'other.ms-thumbimage-standard',
'other.ms-thumbimage', 'thumbimagestandard', 'cover'}
def find_cover_image2(container, strict=False):
manifest_id_map = container.manifest_id_map
mm = container.mime_map
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
item_id = meta.get('content')
name = manifest_id_map.get(item_id, None)
media_type = mm.get(name, None)
if is_raster_image(media_type):
return name
# First look for a guide item with type == 'cover'
guide_type_map = container.guide_type_map
for ref_type, name in iteritems(guide_type_map):
if ref_type.lower() == 'cover' and is_raster_image(mm.get(name, None)):
return name
if strict:
return
# Find the largest image from all possible guide cover items
largest_cover = (None, 0)
for ref_type, name in iteritems(guide_type_map):
if ref_type.lower() in COVER_TYPES and is_raster_image(mm.get(name, None)):
path = container.name_path_map.get(name, None)
if path:
sz = os.path.getsize(path)
if sz > largest_cover[1]:
largest_cover = (name, sz)
if largest_cover[0]:
return largest_cover[0]
def find_cover_image3(container):
for name in container.manifest_items_with_property('cover-image'):
return name
manifest_id_map = container.manifest_id_map
mm = container.mime_map
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
item_id = meta.get('content')
name = manifest_id_map.get(item_id, None)
media_type = mm.get(name, None)
if is_raster_image(media_type):
return name
def find_cover_image(container, strict=False):
'Find a raster image marked as a cover in the OPF'
ver = container.opf_version_parsed
if ver.major < 3:
return find_cover_image2(container, strict=strict)
else:
return find_cover_image3(container)
def get_guides(container):
guides = container.opf_xpath('//opf:guide')
if not guides:
container.insert_into_xml(container.opf, container.opf.makeelement(
OPF('guide')))
guides = container.opf_xpath('//opf:guide')
return guides
def mark_as_cover_epub(container, name):
mmap = {v:k for k, v in iteritems(container.manifest_id_map)}
if name not in mmap:
raise ValueError('Cannot mark %s as cover as it is not in manifest' % name)
mid = mmap[name]
ver = container.opf_version_parsed
# Remove all entries from the opf that identify a raster image as cover
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
container.remove_from_xml(meta)
for ref in container.opf_xpath('//opf:guide/opf:reference[@href and @type]'):
if ref.get('type').lower() not in COVER_TYPES:
continue
rname = container.href_to_name(ref.get('href'), container.opf_name)
mt = container.mime_map.get(rname, None)
if is_raster_image(mt):
container.remove_from_xml(ref)
if ver.major < 3:
# Add reference to image in <metadata>
for metadata in container.opf_xpath('//opf:metadata'):
m = metadata.makeelement(OPF('meta'), name='cover', content=mid)
container.insert_into_xml(metadata, m)
# If no entry for cover exists in guide, insert one that points to this
# image
if not container.opf_xpath('//opf:guide/opf:reference[@type="cover"]'):
for guide in get_guides(container):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', href=container.name_to_href(name, container.opf_name)))
else:
container.apply_unique_properties(name, 'cover-image')
container.dirty(container.opf_name)
def mark_as_titlepage(container, name, move_to_start=True):
'''
Mark the specified HTML file as the titlepage of the EPUB.
:param move_to_start: If True the HTML file is moved to the start of the spine
'''
ver = container.opf_version_parsed
if move_to_start:
for item, q, linear in container.spine_iter:
if name == q:
break
if not linear:
item.set('linear', 'yes')
if item.getparent().index(item) > 0:
container.insert_into_xml(item.getparent(), item, 0)
if ver.major < 3:
for ref in container.opf_xpath('//opf:guide/opf:reference[@type="cover"]'):
ref.getparent().remove(ref)
for guide in get_guides(container):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', href=container.name_to_href(name, container.opf_name)))
else:
container.apply_unique_properties(name, 'calibre:title-page')
container.dirty(container.opf_name)
def find_cover_page(container):
'Find a document marked as a cover in the OPF'
ver = container.opf_version_parsed
mm = container.mime_map
if ver.major < 3:
guide_type_map = container.guide_type_map
for ref_type, name in iteritems(guide_type_map):
if ref_type.lower() == 'cover' and mm.get(name, '').lower() in OEB_DOCS:
return name
else:
for name in container.manifest_items_with_property('calibre:title-page'):
return name
from calibre.ebooks.oeb.polish.toc import get_landmarks
for landmark in get_landmarks(container):
if landmark['type'] == 'cover' and mm.get(landmark['dest'], '').lower() in OEB_DOCS:
return landmark['dest']
def fix_conversion_titlepage_links_in_nav(container):
from calibre.ebooks.oeb.polish.toc import find_existing_nav_toc
cover_page_name = find_cover_page(container)
if not cover_page_name:
return
nav_page_name = find_existing_nav_toc(container)
if not nav_page_name:
return
for elem in container.parsed(nav_page_name).xpath('//*[@data-calibre-removed-titlepage]'):
elem.attrib.pop('data-calibre-removed-titlepage')
elem.set('href', container.name_to_href(cover_page_name, nav_page_name))
container.dirty(nav_page_name)
def find_cover_image_in_page(container, cover_page):
root = container.parsed(cover_page)
body = XPath('//h:body')(root)
if len(body) != 1:
return
body = body[0]
images = []
for img in XPath('descendant::h:img[@src]|descendant::svg:svg/descendant::svg:image')(body):
href = img.get('src') or img.get(XLINK('href'))
if href:
name = container.href_to_name(href, base=cover_page)
images.append(name)
text = re.sub(r'\s+', '', xml2text(body))
if text or len(images) > 1:
# Document has more content than a single image
return
if images:
return images[0]
def clean_opf(container):
'Remove all references to covers from the OPF'
manifest_id_map = container.manifest_id_map
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
name = manifest_id_map.get(meta.get('content', None), None)
container.remove_from_xml(meta)
if name and name in container.name_path_map:
yield name
gtm = container.guide_type_map
for ref in container.opf_xpath('//opf:guide/opf:reference[@type]'):
typ = ref.get('type', '')
if typ.lower() in COVER_TYPES:
container.remove_from_xml(ref)
name = gtm.get(typ, None)
if name and name in container.name_path_map:
yield name
ver = container.opf_version_parsed
if ver.major > 2:
removed_names = container.apply_unique_properties(None, 'cover-image', 'calibre:title-page')[0]
for name in removed_names:
yield name
container.dirty(container.opf_name)
def create_epub_cover(container, cover_path, existing_image, options=None):
from calibre.ebooks.conversion.config import load_defaults
from calibre.ebooks.oeb.transforms.cover import CoverManager
try:
ext = cover_path.rpartition('.')[-1].lower()
except Exception:
ext = 'jpeg'
cname, tname = 'cover.' + ext, 'titlepage.xhtml'
recommended_folders = get_recommended_folders(container, (cname, tname))
if existing_image:
raster_cover = existing_image
manifest_id = {v:k for k, v in iteritems(container.manifest_id_map)}[existing_image]
raster_cover_item = container.opf_xpath('//opf:manifest/*[@id="%s"]' % manifest_id)[0]
else:
folder = recommended_folders[cname]
if folder:
cname = folder + '/' + cname
raster_cover_item = container.generate_item(cname, id_prefix='cover')
raster_cover = container.href_to_name(raster_cover_item.get('href'), container.opf_name)
with container.open(raster_cover, 'wb') as dest:
if callable(cover_path):
cover_path('write_image', dest)
else:
with open(cover_path, 'rb') as src:
shutil.copyfileobj(src, dest)
if options is None:
opts = load_defaults('epub_output')
keep_aspect = opts.get('preserve_cover_aspect_ratio', False)
no_svg = opts.get('no_svg_cover', False)
else:
keep_aspect = options.get('keep_aspect', False)
no_svg = options.get('no_svg', False)
if no_svg:
style = 'style="height: 100%%"'
templ = CoverManager.NONSVG_TEMPLATE.replace('__style__', style)
has_svg = False
else:
if callable(cover_path):
templ = (options or {}).get('template', CoverManager.SVG_TEMPLATE)
has_svg = 'xlink:href' in templ
else:
width, height = 600, 800
has_svg = True
try:
if existing_image:
width, height = identify(container.raw_data(existing_image, decode=False))[1:]
else:
with open(cover_path, 'rb') as csrc:
width, height = identify(csrc)[1:]
except:
container.log.exception("Failed to get width and height of cover")
ar = 'xMidYMid meet' if keep_aspect else 'none'
templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar)
templ = templ.replace('__viewbox__', '0 0 %d %d'%(width, height))
templ = templ.replace('__width__', str(width))
templ = templ.replace('__height__', str(height))
folder = recommended_folders[tname]
if folder:
tname = folder + '/' + tname
titlepage_item = container.generate_item(tname, id_prefix='titlepage')
titlepage = container.href_to_name(titlepage_item.get('href'),
container.opf_name)
raw = templ % container.name_to_href(raster_cover, titlepage)
with container.open(titlepage, 'wb') as f:
if not isinstance(raw, bytes):
raw = raw.encode('utf-8')
f.write(raw)
# We have to make sure the raster cover item has id="cover" for the moron
# that wrote the Nook firmware
if raster_cover_item.get('id') != 'cover':
from calibre.ebooks.oeb.base import uuid_id
newid = uuid_id()
for item in container.opf_xpath('//*[@id="cover"]'):
item.set('id', newid)
for item in container.opf_xpath('//*[@idref="cover"]'):
item.set('idref', newid)
raster_cover_item.set('id', 'cover')
spine = container.opf_xpath('//opf:spine')[0]
ref = spine.makeelement(OPF('itemref'), idref=titlepage_item.get('id'))
container.insert_into_xml(spine, ref, index=0)
ver = container.opf_version_parsed
if ver.major < 3:
guide = container.opf_get_or_create('guide')
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', title=_('Cover'),
href=container.name_to_href(titlepage, base=container.opf_name)))
metadata = container.opf_get_or_create('metadata')
meta = metadata.makeelement(OPF('meta'), name='cover')
meta.set('content', raster_cover_item.get('id'))
container.insert_into_xml(metadata, meta)
else:
container.apply_unique_properties(raster_cover, 'cover-image')
container.apply_unique_properties(titlepage, 'calibre:title-page')
if has_svg:
container.add_properties(titlepage, 'svg')
return raster_cover, titlepage
def remove_cover_image_in_page(container, page, cover_images):
for img in container.parsed(page).xpath('//*[local-name()="img" and @src]'):
href = img.get('src')
name = container.href_to_name(href, page)
if name in cover_images:
img.getparent().remove(img)
break
def has_epub_cover(container):
if find_cover_image(container):
return True
if find_cover_page(container):
return True
spine_items = tuple(container.spine_items)
if spine_items:
candidate = container.abspath_to_name(spine_items[0])
if find_cover_image_in_page(container, candidate) is not None:
return True
return False
def set_epub_cover(container, cover_path, report, options=None, image_callback=None):
existing_image = options is not None and options.get('existing_image', False)
if existing_image:
existing_image = cover_path
cover_image = find_cover_image(container)
cover_page = find_cover_page(container)
wrapped_image = extra_cover_page = None
updated = False
log = container.log
possible_removals = set(clean_opf(container))
possible_removals
# TODO: Handle possible_removals and also iterate over links in the removed
# pages and handle possibly removing stylesheets referred to by them.
image_callback_called = False
spine_items = tuple(container.spine_items)
if cover_page is None and spine_items:
# Check if the first item in the spine is a simple cover wrapper
candidate = container.abspath_to_name(spine_items[0])
if find_cover_image_in_page(container, candidate) is not None:
cover_page = candidate
if cover_page is not None:
log('Found existing cover page')
wrapped_image = find_cover_image_in_page(container, cover_page)
if len(spine_items) > 1:
# Look for an extra cover page
c = container.abspath_to_name(spine_items[1])
if c != cover_page:
candidate = find_cover_image_in_page(container, c)
if candidate and candidate in {wrapped_image, cover_image}:
log('Found an extra cover page that is a simple wrapper, removing it')
# This page has only a single image and that image is the
# cover image, remove it.
container.remove_item(c)
extra_cover_page = c
spine_items = spine_items[:1] + spine_items[2:]
elif candidate is None:
# Remove the cover image if it is the first image in this
# page
remove_cover_image_in_page(container, c, {wrapped_image,
cover_image})
if wrapped_image is not None:
# The cover page is a simple wrapper around a single cover image,
# we can remove it safely.
log(f'Existing cover page {cover_page} is a simple wrapper, removing it')
container.remove_item(cover_page)
if wrapped_image != existing_image:
if image_callback is not None and not image_callback_called:
image_callback(cover_image, wrapped_image)
image_callback_called = True
container.remove_item(wrapped_image)
updated = True
if image_callback is not None and not image_callback_called:
image_callback_called = True
image_callback(cover_image, wrapped_image)
if cover_image and cover_image != wrapped_image:
# Remove the old cover image
if cover_image != existing_image:
container.remove_item(cover_image)
# Insert the new cover
raster_cover, titlepage = create_epub_cover(container, cover_path, existing_image, options=options)
report(_('Cover updated') if updated else _('Cover inserted'))
# Replace links to the old cover image/cover page
link_sub = {s:d for s, d in iteritems({
cover_page:titlepage, wrapped_image:raster_cover,
cover_image:raster_cover, extra_cover_page:titlepage})
if s is not None and s != d}
if link_sub:
replace_links(container, link_sub, frag_map=lambda x, y:None)
return raster_cover, titlepage
| 21,607 | Python | .py | 455 | 38.916484 | 109 | 0.639431 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,382 | opf.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/opf.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from lxml import etree
from calibre.ebooks.oeb.polish.container import OPF_NAMESPACES
from calibre.utils.localization import canonicalize_lang
def get_book_language(container):
for lang in container.opf_xpath('//dc:language'):
raw = lang.text
if raw:
code = canonicalize_lang(raw.split(',')[0].strip())
if code:
return code
def set_guide_item(container, item_type, title, name, frag=None):
ref_tag = '{%s}reference' % OPF_NAMESPACES['opf']
href = None
if name:
href = container.name_to_href(name, container.opf_name)
if frag:
href += '#' + frag
guides = container.opf_xpath('//opf:guide')
if not guides and href:
g = container.opf.makeelement('{%s}guide' % OPF_NAMESPACES['opf'], nsmap={'opf':OPF_NAMESPACES['opf']})
container.insert_into_xml(container.opf, g)
guides = [g]
for guide in guides:
matches = []
for child in guide.iterchildren(etree.Element):
if child.tag == ref_tag and child.get('type', '').lower() == item_type.lower():
matches.append(child)
if not matches and href:
r = guide.makeelement(ref_tag, type=item_type, nsmap={'opf':OPF_NAMESPACES['opf']})
container.insert_into_xml(guide, r)
matches.append(r)
for m in matches:
if href:
m.set('title', title), m.set('href', href), m.set('type', item_type)
else:
container.remove_from_xml(m)
container.dirty(container.opf_name)
| 1,698 | Python | .py | 40 | 33.875 | 111 | 0.608379 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,383 | hyphenation.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/hyphenation.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.ebooks.oeb.base import OEB_DOCS
from polyglot.builtins import iteritems
def add_soft_hyphens(container, report=None):
from calibre.utils.hyphenation.hyphenate import add_soft_hyphens_to_html
for name, mt in iteritems(container.mime_map):
if mt not in OEB_DOCS:
continue
add_soft_hyphens_to_html(container.parsed(name), container.mi.language)
container.dirty(name)
if report is not None:
report(_('Soft hyphens added'))
def remove_soft_hyphens(container, report=None):
from calibre.utils.hyphenation.hyphenate import remove_soft_hyphens_from_html
for name, mt in iteritems(container.mime_map):
if mt not in OEB_DOCS:
continue
remove_soft_hyphens_from_html(container.parsed(name))
container.dirty(name)
if report is not None:
report(_('Soft hyphens removed'))
| 983 | Python | .py | 22 | 38.363636 | 81 | 0.715481 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,384 | utils.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import re
from bisect import bisect
from calibre import guess_type as _guess_type
from calibre import replace_entities
from calibre.utils.icu import upper as icu_upper
BLOCK_TAG_NAMES = frozenset((
'address', 'article', 'aside', 'blockquote', 'center', 'dir', 'fieldset',
'isindex', 'menu', 'noframes', 'hgroup', 'noscript', 'pre', 'section',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'p', 'div', 'dd', 'dl', 'ul',
'ol', 'li', 'body', 'td', 'th'))
def guess_type(x):
return _guess_type(x)[0] or 'application/octet-stream'
# All font mimetypes seen in e-books
OEB_FONTS = frozenset({
'font/otf',
'font/woff',
'font/woff2',
'font/ttf',
'application/x-font-ttf',
'application/x-font-otf',
'application/font-sfnt',
'application/vnd.ms-opentype',
'application/x-font-truetype',
})
def adjust_mime_for_epub(filename='', mime='', opf_version=(2, 0)):
mime = mime or guess_type(filename)
if mime == 'text/html':
# epubcheck complains if the mimetype for text documents is set to text/html in EPUB 2 books. Sigh.
return 'application/xhtml+xml'
if mime not in OEB_FONTS:
return mime
if 'ttf' in mime or 'truetype' in mime:
mime = 'font/ttf'
elif 'otf' in mime or 'opentype' in mime:
mime = 'font/otf'
elif mime == 'application/font-sfnt':
mime = 'font/otf' if filename.lower().endswith('.otf') else 'font/ttf'
elif 'woff2' in mime:
mime = 'font/woff2'
elif 'woff' in mime:
mime = 'font/woff'
opf_version = tuple(opf_version[:2])
if opf_version == (3, 0):
mime = {
'font/ttf': 'application/vnd.ms-opentype', # this is needed by the execrable epubchek
'font/otf': 'application/vnd.ms-opentype',
'font/woff': 'application/font-woff'}.get(mime, mime)
elif opf_version == (3, 1):
mime = {
'font/ttf': 'application/font-sfnt',
'font/otf': 'application/font-sfnt',
'font/woff': 'application/font-woff'}.get(mime, mime)
elif opf_version < (3, 0):
mime = {
'font/ttf': 'application/x-font-truetype',
'font/otf': 'application/vnd.ms-opentype',
'font/woff': 'application/font-woff'}.get(mime, mime)
return mime
def setup_css_parser_serialization(tab_width=2):
import css_parser
prefs = css_parser.ser.prefs
prefs.indent = tab_width * ' '
prefs.indentClosingBrace = False
prefs.omitLastSemicolon = False
prefs.formatUnknownAtRules = False # True breaks @supports rules
def actual_case_for_name(container, name):
from calibre.utils.filenames import samefile
if not container.exists(name):
raise ValueError('Cannot get actual case for %s as it does not exist' % name)
parts = name.split('/')
base = ''
ans = []
for i, x in enumerate(parts):
base = '/'.join(ans + [x])
path = container.name_to_abspath(base)
pdir = os.path.dirname(path)
candidates = {os.path.join(pdir, q) for q in os.listdir(pdir)}
if x in candidates:
correctx = x
else:
for q in candidates:
if samefile(q, path):
correctx = os.path.basename(q)
break
else:
raise RuntimeError('Something bad happened')
ans.append(correctx)
return '/'.join(ans)
def corrected_case_for_name(container, name):
parts = name.split('/')
ans = []
base = ''
for i, x in enumerate(parts):
base = '/'.join(ans + [x])
if container.exists(base):
correctx = x
else:
try:
candidates = {q for q in os.listdir(os.path.dirname(container.name_to_abspath(base)))}
except OSError:
return None # one of the non-terminal components of name is a file instead of a directory
for q in candidates:
if q.lower() == x.lower():
correctx = q
break
else:
return None
ans.append(correctx)
return '/'.join(ans)
class PositionFinder:
def __init__(self, raw):
pat = br'\n' if isinstance(raw, bytes) else r'\n'
self.new_lines = tuple(m.start() + 1 for m in re.finditer(pat, raw))
def __call__(self, pos):
lnum = bisect(self.new_lines, pos)
try:
offset = abs(pos - self.new_lines[lnum - 1])
except IndexError:
offset = pos
return (lnum + 1, offset)
class CommentFinder:
def __init__(self, raw, pat=r'(?s)/\*.*?\*/'):
self.starts, self.ends = [], []
for m in re.finditer(pat, raw):
start, end = m.span()
self.starts.append(start), self.ends.append(end)
def __call__(self, offset):
if not self.starts:
return False
q = bisect(self.starts, offset) - 1
return q >= 0 and self.starts[q] <= offset <= self.ends[q]
def link_stylesheets(container, names, sheets, remove=False, mtype='text/css'):
from calibre.ebooks.oeb.base import XHTML, XPath
changed_names = set()
snames = set(sheets)
lp = XPath('//h:link[@href]')
hp = XPath('//h:head')
for name in names:
root = container.parsed(name)
if remove:
for link in lp(root):
if (link.get('type', mtype) or mtype) == mtype:
container.remove_from_xml(link)
changed_names.add(name)
container.dirty(name)
existing = {container.href_to_name(l.get('href'), name) for l in lp(root) if (l.get('type', mtype) or mtype) == mtype}
extra = snames - existing
if extra:
changed_names.add(name)
try:
parent = hp(root)[0]
except (TypeError, IndexError):
parent = root.makeelement(XHTML('head'))
container.insert_into_xml(root, parent, index=0)
for sheet in sheets:
if sheet in extra:
container.insert_into_xml(
parent, parent.makeelement(XHTML('link'), rel='stylesheet', type=mtype,
href=container.name_to_href(sheet, name)))
container.dirty(name)
return changed_names
def lead_text(top_elem, num_words=10):
''' Return the leading text contained in top_elem (including descendants)
up to a maximum of num_words words. More efficient than using
etree.tostring(method='text') as it does not have to serialize the entire
sub-tree rooted at top_elem.'''
pat = re.compile(r'\s+', flags=re.UNICODE)
words = []
def get_text(x, attr='text'):
ans = getattr(x, attr)
if ans:
words.extend(filter(None, pat.split(ans)))
stack = [(top_elem, 'text')]
while stack and len(words) < num_words:
elem, attr = stack.pop()
get_text(elem, attr)
if attr == 'text':
if elem is not top_elem:
stack.append((elem, 'tail'))
stack.extend(reversed(list((c, 'text') for c in elem.iterchildren('*'))))
return ' '.join(words[:num_words])
def parse_css(data, fname='<string>', is_declaration=False, decode=None, log_level=None, css_preprocessor=None):
if log_level is None:
import logging
log_level = logging.WARNING
from css_parser import CSSParser, log
from calibre.ebooks.oeb.base import _css_logger
log.setLevel(log_level)
log.raiseExceptions = False
data = data or ''
if isinstance(data, bytes):
data = data.decode('utf-8') if decode is None else decode(data)
if css_preprocessor is not None:
data = css_preprocessor(data)
parser = CSSParser(loglevel=log_level,
# We dont care about @import rules
fetcher=lambda x: (None, None), log=_css_logger)
if is_declaration:
data = parser.parseStyle(data, validate=False)
else:
data = parser.parseString(data, href=fname, validate=False)
return data
def handle_entities(text, func):
return func(replace_entities(text))
def apply_func_to_match_groups(match, func=icu_upper, handle_entities=handle_entities):
'''Apply the specified function to individual groups in the match object (the result of re.search() or
the whole match if no groups were defined. Returns the replaced string.'''
found_groups = False
i = 0
parts, pos = [], match.start()
def f(text):
return handle_entities(text, func)
while True:
i += 1
try:
start, end = match.span(i)
except IndexError:
break
found_groups = True
if start > -1:
parts.append(match.string[pos:start])
parts.append(f(match.string[start:end]))
pos = end
if not found_groups:
return f(match.group())
parts.append(match.string[pos:match.end()])
return ''.join(parts)
def apply_func_to_html_text(match, func=icu_upper, handle_entities=handle_entities):
''' Apply the specified function only to text between HTML tag definitions. '''
def f(text):
return handle_entities(text, func)
parts = re.split(r'(<[^>]+>)', match.group())
parts = (x if x.startswith('<') else f(x) for x in parts)
return ''.join(parts)
def extract(elem):
''' Remove an element from the tree, keeping elem.tail '''
p = elem.getparent()
if p is not None:
idx = p.index(elem)
p.remove(elem)
if elem.tail:
if idx > 0:
p[idx-1].tail = (p[idx-1].tail or '') + elem.tail
else:
p.text = (p.text or '') + elem.tail
| 9,930 | Python | .py | 249 | 31.365462 | 126 | 0.592301 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,385 | create.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/create.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import sys
from lxml import etree
from calibre import CurrentDir, prepare_string_for_xml
from calibre.ebooks.metadata import authors_to_string
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ebooks.oeb.base import serialize
from calibre.ebooks.oeb.polish.container import OPF_NAMESPACES, Container, opf_to_azw3
from calibre.ebooks.oeb.polish.parsing import parse
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_xml_tree
from calibre.ebooks.oeb.polish.toc import TOC, create_ncx
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.localization import lang_as_iso639_1
from calibre.utils.logging import DevNull
from calibre.utils.resources import get_path as P
from calibre.utils.zipfile import ZIP_STORED, ZipFile
from polyglot.builtins import as_bytes
valid_empty_formats = {'epub', 'txt', 'docx', 'azw3', 'md'}
def create_toc(mi, opf, html_name, lang):
uuid = ''
for u in opf.xpath('//*[@id="uuid_id"]'):
uuid = u.text
toc = TOC()
toc.add(_('Start'), html_name)
return create_ncx(toc, lambda x:x, mi.title, lang, uuid)
def create_book(mi, path, fmt='epub', opf_name='metadata.opf', html_name='start.xhtml', toc_name='toc.ncx'):
''' Create an empty book in the specified format at the specified location. '''
if fmt not in valid_empty_formats:
raise ValueError('Cannot create empty book in the %s format' % fmt)
if fmt == 'txt':
with open(path, 'wb') as f:
if not mi.is_null('title'):
f.write(as_bytes(mi.title))
return
if fmt == 'md':
with open(path, 'w', encoding='utf-8') as f:
if not mi.is_null('title'):
print('#', mi.title, file=f)
return
if fmt == 'docx':
from calibre.ebooks.conversion.plumber import Plumber
from calibre.ebooks.docx.writer.container import DOCX
from calibre.utils.logging import default_log
p = Plumber('a.docx', 'b.docx', default_log)
p.setup_options()
# Use the word default of one inch page margins
for x in 'left right top bottom'.split():
setattr(p.opts, 'margin_' + x, 72)
DOCX(p.opts, default_log).write(path, mi, create_empty_document=True)
return
path = os.path.abspath(path)
lang = 'und'
opf = metadata_to_opf(mi, as_string=False)
for l in opf.xpath('//*[local-name()="language"]'):
if l.text:
lang = l.text
break
lang = lang_as_iso639_1(lang) or lang
opfns = OPF_NAMESPACES['opf']
m = opf.makeelement('{%s}manifest' % opfns)
opf.insert(1, m)
i = m.makeelement('{%s}item' % opfns, href=html_name, id='start')
i.set('media-type', guess_type('a.xhtml'))
m.append(i)
i = m.makeelement('{%s}item' % opfns, href=toc_name, id='ncx')
i.set('media-type', guess_type(toc_name))
m.append(i)
s = opf.makeelement('{%s}spine' % opfns, toc="ncx")
opf.insert(2, s)
i = s.makeelement('{%s}itemref' % opfns, idref='start')
s.append(i)
CONTAINER = '''\
<?xml version="1.0"?>
<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="{}" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>
'''.format(prepare_string_for_xml(opf_name, True)).encode('utf-8')
HTML = P('templates/new_book.html', data=True).decode('utf-8').replace(
'_LANGUAGE_', prepare_string_for_xml(lang, True)
).replace(
'_TITLE_', prepare_string_for_xml(mi.title)
).replace(
'_AUTHORS_', prepare_string_for_xml(authors_to_string(mi.authors))
).encode('utf-8')
h = parse(HTML)
pretty_html_tree(None, h)
HTML = serialize(h, 'text/html')
ncx = etree.tostring(create_toc(mi, opf, html_name, lang), encoding='utf-8', xml_declaration=True, pretty_print=True)
pretty_xml_tree(opf)
opf = etree.tostring(opf, encoding='utf-8', xml_declaration=True, pretty_print=True)
if fmt == 'azw3':
with TemporaryDirectory('create-azw3') as tdir, CurrentDir(tdir):
for name, data in ((opf_name, opf), (html_name, HTML), (toc_name, ncx)):
with open(name, 'wb') as f:
f.write(data)
c = Container(os.path.dirname(os.path.abspath(opf_name)), opf_name, DevNull())
opf_to_azw3(opf_name, path, c)
else:
with ZipFile(path, 'w', compression=ZIP_STORED) as zf:
zf.writestr('mimetype', b'application/epub+zip', compression=ZIP_STORED)
zf.writestr('META-INF/', b'', 0o755)
zf.writestr('META-INF/container.xml', CONTAINER)
zf.writestr(opf_name, opf)
zf.writestr(html_name, HTML)
zf.writestr(toc_name, ncx)
if __name__ == '__main__':
from calibre.ebooks.metadata.book.base import Metadata
mi = Metadata('Test book', authors=('Kovid Goyal',))
path = sys.argv[-1]
ext = path.rpartition('.')[-1].lower()
if ext not in valid_empty_formats:
print('Unsupported format:', ext)
raise SystemExit(1)
create_book(mi, path, fmt=ext)
| 5,316 | Python | .py | 120 | 37.991667 | 121 | 0.648852 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,386 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 152 | Python | .py | 4 | 35.75 | 61 | 0.678322 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,387 | fonts.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/fonts.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.fonts3 import parse_font, parse_font_family, serialize_font, serialize_font_family
from calibre.ebooks.oeb.base import css_text
from calibre.ebooks.oeb.normalize_css import normalize_font
from calibre.ebooks.oeb.polish.container import OEB_DOCS, OEB_STYLES
from polyglot.builtins import iteritems
def unquote(x):
if x and len(x) > 1 and x[0] == x[-1] and x[0] in ('"', "'"):
x = x[1:-1]
return x
def font_family_data_from_declaration(style, families):
font_families = []
f = style.getProperty('font')
if f is not None:
f = normalize_font(f.propertyValue, font_family_as_list=True).get('font-family', None)
if f is not None:
font_families = [unquote(x) for x in f]
f = style.getProperty('font-family')
if f is not None:
font_families = parse_font_family(css_text(f.propertyValue))
for f in font_families:
families[f] = families.get(f, False)
def font_family_data_from_sheet(sheet, families):
for rule in sheet.cssRules:
if rule.type == rule.STYLE_RULE:
font_family_data_from_declaration(rule.style, families)
elif rule.type == rule.FONT_FACE_RULE:
ff = rule.style.getProperty('font-family')
if ff is not None:
for f in parse_font_family(css_text(ff.propertyValue)):
families[f] = True
def font_family_data(container):
families = {}
for name, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
sheet = container.parsed(name)
font_family_data_from_sheet(sheet, families)
elif mt in OEB_DOCS:
root = container.parsed(name)
for style in root.xpath('//*[local-name() = "style"]'):
if style.text and style.get('type', 'text/css').lower() == 'text/css':
sheet = container.parse_css(style.text)
font_family_data_from_sheet(sheet, families)
for style in root.xpath('//*/@style'):
if style:
style = container.parse_css(style, is_declaration=True)
font_family_data_from_declaration(style, families)
return families
def change_font_in_declaration(style, old_name, new_name=None):
changed = False
ff = style.getProperty('font-family')
if ff is not None:
fams = parse_font_family(css_text(ff.propertyValue))
nfams = list(filter(None, [new_name if x == old_name else x for x in fams]))
if fams != nfams:
if nfams:
ff.propertyValue.cssText = serialize_font_family(nfams)
else:
style.removeProperty(ff.name)
changed = True
ff = style.getProperty('font')
if ff is not None:
props = parse_font(css_text(ff.propertyValue))
fams = props.get('font-family') or []
nfams = list(filter(None, [new_name if x == old_name else x for x in fams]))
if fams != nfams:
props['font-family'] = nfams
if nfams:
ff.propertyValue.cssText = serialize_font(props)
else:
style.removeProperty(ff.name)
changed = True
return changed
def remove_embedded_font(container, sheet, rule, sheet_name):
src = getattr(rule.style.getProperty('src'), 'value', None)
if src is not None:
if src.startswith('url('):
src = src[4:-1]
sheet.cssRules.remove(rule)
if src:
src = unquote(src)
name = container.href_to_name(src, sheet_name)
if container.has_name(name):
container.remove_item(name)
def change_font_in_sheet(container, sheet, old_name, new_name, sheet_name):
changed = False
removals = []
for rule in sheet.cssRules:
if rule.type == rule.STYLE_RULE:
changed |= change_font_in_declaration(rule.style, old_name, new_name)
elif rule.type == rule.FONT_FACE_RULE:
ff = rule.style.getProperty('font-family')
if ff is not None:
families = {x for x in parse_font_family(css_text(ff.propertyValue))}
if old_name in families:
changed = True
removals.append(rule)
for rule in reversed(removals):
remove_embedded_font(container, sheet, rule, sheet_name)
return changed
def change_font(container, old_name, new_name=None):
'''
Change a font family from old_name to new_name. Changes all occurrences of
the font family in stylesheets, style tags and style attributes.
If the old_name refers to an embedded font, it is removed. You can set
new_name to None to remove the font family instead of changing it.
'''
changed = False
for name, mt in tuple(iteritems(container.mime_map)):
if mt in OEB_STYLES:
sheet = container.parsed(name)
if change_font_in_sheet(container, sheet, old_name, new_name, name):
container.dirty(name)
changed = True
elif mt in OEB_DOCS:
root = container.parsed(name)
for style in root.xpath('//*[local-name() = "style"]'):
if style.text and style.get('type', 'text/css').lower() == 'text/css':
sheet = container.parse_css(style.text)
if change_font_in_sheet(container, sheet, old_name, new_name, name):
container.dirty(name)
changed = True
for elem in root.xpath('//*[@style]'):
style = elem.get('style', '')
if style:
style = container.parse_css(style, is_declaration=True)
if change_font_in_declaration(style, old_name, new_name):
style = css_text(style).strip().rstrip(';').strip()
if style:
elem.set('style', style)
else:
del elem.attrib['style']
container.dirty(name)
changed = True
return changed
| 6,233 | Python | .py | 137 | 34.678832 | 95 | 0.591111 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,388 | jacket.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/jacket.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.customize.ui import output_profiles
from calibre.ebooks.conversion.config import load_defaults
from calibre.ebooks.oeb.base import OPF, XPath
from calibre.ebooks.oeb.polish.cover import find_cover_page
from calibre.ebooks.oeb.transforms.jacket import referenced_images
from calibre.ebooks.oeb.transforms.jacket import render_jacket as render
def render_jacket(container, jacket):
mi = container.mi
ps = load_defaults('page_setup')
op = ps.get('output_profile', 'default')
opmap = {x.short_name:x for x in output_profiles()}
output_profile = opmap.get(op, opmap['default'])
root = render(mi, output_profile)
for img, path in referenced_images(root):
container.log('Embedding referenced image: %s into jacket' % path)
ext = path.rpartition('.')[-1]
jacket_item = container.generate_item('jacket_image.'+ext, id_prefix='jacket_img')
name = container.href_to_name(jacket_item.get('href'), container.opf_name)
with open(path, 'rb') as f:
container.parsed_cache[name] = f.read()
container.commit_item(name)
href = container.name_to_href(name, jacket)
img.set('src', href)
return root
def is_legacy_jacket(root):
return len(root.xpath(
'//*[starts-with(@class,"calibrerescale") and (local-name()="h1" or local-name()="h2")]')) > 0
def is_current_jacket(root):
return len(XPath(
'//h:meta[@name="calibre-content" and @content="jacket"]')(root)) > 0
def find_existing_jacket(container):
for item in container.spine_items:
name = container.abspath_to_name(item)
if container.book_type == 'azw3':
root = container.parsed(name)
if is_current_jacket(root):
return name
else:
if name.rpartition('/')[-1].startswith('jacket') and name.endswith('.xhtml'):
root = container.parsed(name)
if is_current_jacket(root) or is_legacy_jacket(root):
return name
def replace_jacket(container, name):
root = render_jacket(container, name)
container.parsed_cache[name] = root
container.dirty(name)
def remove_jacket(container):
' Remove an existing jacket, if any. Returns False if no existing jacket was found. '
name = find_existing_jacket(container)
if name is not None:
remove_jacket_images(container, name)
container.remove_item(name)
return True
return False
def remove_jacket_images(container, name):
root = container.parsed_cache[name]
for img in root.xpath('//*[local-name() = "img" and @src]'):
iname = container.href_to_name(img.get('src'), name)
if container.has_name(iname):
container.remove_item(iname)
def add_or_replace_jacket(container):
''' Either create a new jacket from the book's metadata or replace an
existing jacket. Returns True if an existing jacket was replaced. '''
name = find_existing_jacket(container)
found = True
if name is None:
jacket_item = container.generate_item('jacket.xhtml', id_prefix='jacket')
name = container.href_to_name(jacket_item.get('href'), container.opf_name)
found = False
if found:
remove_jacket_images(container, name)
replace_jacket(container, name)
if not found:
# Insert new jacket into spine
index = 0
sp = container.abspath_to_name(next(container.spine_items))
if sp == find_cover_page(container):
index = 1
itemref = container.opf.makeelement(OPF('itemref'),
idref=jacket_item.get('id'))
container.insert_into_xml(container.opf_xpath('//opf:spine')[0], itemref,
index=index)
return found
| 3,960 | Python | .py | 87 | 37.862069 | 102 | 0.656282 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,389 | cascade.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/cascade.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import re
from collections import defaultdict, namedtuple
from functools import partial
from itertools import count
from operator import itemgetter
from css_parser.css import CSSRule, CSSStyleSheet, Property
from css_selectors import INAPPROPRIATE_PSEUDO_CLASSES, Select, SelectorError
from tinycss.fonts3 import parse_font_family, serialize_font_family
from calibre import as_unicode
from calibre.ebooks.css_transform_rules import all_properties
from calibre.ebooks.oeb.base import OEB_STYLES, SVG, XHTML, css_text
from calibre.ebooks.oeb.normalize_css import DEFAULTS, normalizers
from calibre.ebooks.oeb.stylizer import INHERITED, media_ok
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems, itervalues
_html_css_stylesheet = None
def html_css_stylesheet(container):
global _html_css_stylesheet
if _html_css_stylesheet is None:
data = P('templates/html.css', data=True).decode('utf-8')
_html_css_stylesheet = container.parse_css(data, 'user-agent.css')
return _html_css_stylesheet
def media_allowed(media):
if not media or not media.mediaText:
return True
return media_ok(media.mediaText)
def iterrules(container, sheet_name, rules=None, media_rule_ok=media_allowed, rule_index_counter=None, rule_type=None, importing=None):
''' Iterate over all style rules in the specified sheet. Import and Media rules are
automatically resolved. Yields (rule, sheet_name, rule_number).
:param rules: List of CSSRules or a CSSStyleSheet instance or None in which case it is read from container using sheet_name
:param sheet_name: The name of the sheet in the container (in case of inline style sheets, the name of the html file)
:param media_rule_ok: A function to test if a @media rule is allowed
:param rule_index_counter: A counter object, rule numbers will be calculated by incrementing the counter.
:param rule_type: Only yield rules of this type, where type is a string type name, see css_parser.css.CSSRule for the names (
by default all rules are yielded)
:return: (CSSRule object, the name of the sheet from which it comes, rule index - a monotonically increasing number)
'''
rule_index_counter = rule_index_counter or count()
if importing is None:
importing = set()
importing.add(sheet_name)
riter = partial(iterrules, container, rule_index_counter=rule_index_counter, media_rule_ok=media_rule_ok, rule_type=rule_type, importing=importing)
if rules is None:
rules = container.parsed(sheet_name)
if rule_type is not None:
rule_type = getattr(CSSRule, rule_type)
for rule in rules:
if rule.type == CSSRule.IMPORT_RULE:
if media_rule_ok(rule.media):
name = container.href_to_name(rule.href, sheet_name)
if container.has_name(name):
if name in importing:
container.log.error(f'Recursive import of {name} from {sheet_name}, ignoring')
else:
csheet = container.parsed(name)
if isinstance(csheet, CSSStyleSheet):
yield from riter(name, rules=csheet)
elif rule.type == CSSRule.MEDIA_RULE:
if media_rule_ok(rule.media):
yield from riter(sheet_name, rules=rule.cssRules)
elif rule_type is None or rule.type == rule_type:
num = next(rule_index_counter)
yield rule, sheet_name, num
importing.discard(sheet_name)
StyleDeclaration = namedtuple('StyleDeclaration', 'index declaration pseudo_element')
Specificity = namedtuple('Specificity', 'is_style num_id num_class num_elem rule_index')
def specificity(rule_index, selector, is_style=0):
s = selector.specificity
return Specificity(is_style, s[1], s[2], s[3], rule_index)
def iterdeclaration(decl):
for p in all_properties(decl):
n = normalizers.get(p.name)
if n is None:
yield p
else:
for k, v in iteritems(n(p.name, p.propertyValue)):
yield Property(k, v, p.literalpriority)
class Values(tuple):
''' A tuple of `css_parser.css.Value ` (and its subclasses) objects. Also has a
`sheet_name` attribute that is the canonical name relative to which URLs
for this property should be resolved. '''
def __new__(typ, pv, sheet_name=None, priority=''):
ans = tuple.__new__(typ, pv)
ans.sheet_name = sheet_name
ans.is_important = priority == 'important'
return ans
@property
def cssText(self):
' This will return either a string or a tuple of strings '
if len(self) == 1:
return css_text(self[0])
return tuple(css_text(x) for x in self)
def normalize_style_declaration(decl, sheet_name):
ans = {}
for prop in iterdeclaration(decl):
if prop.name == 'font-family':
# Needed because of https://bitbucket.org/cthedot/cssutils/issues/66/incorrect-handling-of-spaces-in-font
prop.propertyValue.cssText = serialize_font_family(parse_font_family(css_text(prop.propertyValue)))
ans[prop.name] = Values(prop.propertyValue, sheet_name, prop.priority)
return ans
def resolve_declarations(decls):
property_names = set()
for d in decls:
property_names |= set(d.declaration)
ans = {}
for name in property_names:
first_val = None
for decl in decls:
x = decl.declaration.get(name)
if x is not None:
if x.is_important:
first_val = x
break
if first_val is None:
first_val = x
ans[name] = first_val
return ans
def resolve_pseudo_declarations(decls):
groups = defaultdict(list)
for d in decls:
groups[d.pseudo_element].append(d)
return {k:resolve_declarations(v) for k, v in iteritems(groups)}
def resolve_styles(container, name, select=None, sheet_callback=None):
root = container.parsed(name)
select = select or Select(root, ignore_inappropriate_pseudo_classes=True)
style_map = defaultdict(list)
pseudo_style_map = defaultdict(list)
rule_index_counter = count()
pseudo_pat = re.compile(':{1,2}(%s)' % ('|'.join(INAPPROPRIATE_PSEUDO_CLASSES)), re.I)
def process_sheet(sheet, sheet_name):
if sheet_callback is not None:
sheet_callback(sheet, sheet_name)
for rule, sheet_name, rule_index in iterrules(container, sheet_name, rules=sheet, rule_index_counter=rule_index_counter, rule_type='STYLE_RULE'):
for selector in rule.selectorList:
text = selector.selectorText
try:
matches = tuple(select(text))
except SelectorError as err:
container.log.error(f'Ignoring CSS rule with invalid selector: {text!r} ({as_unicode(err)})')
continue
m = pseudo_pat.search(text)
style = normalize_style_declaration(rule.style, sheet_name)
if m is None:
for elem in matches:
style_map[elem].append(StyleDeclaration(specificity(rule_index, selector), style, None))
else:
for elem in matches:
pseudo_style_map[elem].append(StyleDeclaration(specificity(rule_index, selector), style, m.group(1)))
process_sheet(html_css_stylesheet(container), 'user-agent.css')
for elem in root.iterdescendants(XHTML('style'), SVG('style'), XHTML('link')):
if elem.tag.lower().endswith('style'):
if not elem.text:
continue
sheet = container.parse_css(elem.text)
sheet_name = name
else:
if (elem.get('type') or 'text/css').lower() not in OEB_STYLES or \
(elem.get('rel') or 'stylesheet').lower() != 'stylesheet' or \
not media_ok(elem.get('media')):
continue
href = elem.get('href')
if not href:
continue
sheet_name = container.href_to_name(href, name)
if not container.has_name(sheet_name):
continue
sheet = container.parsed(sheet_name)
if not isinstance(sheet, CSSStyleSheet):
continue
process_sheet(sheet, sheet_name)
for elem in root.xpath('//*[@style]'):
text = elem.get('style')
if text:
style = container.parse_css(text, is_declaration=True)
style_map[elem].append(StyleDeclaration(Specificity(1, 0, 0, 0, 0), normalize_style_declaration(style, name), None))
for l in (style_map, pseudo_style_map):
for x in itervalues(l):
x.sort(key=itemgetter(0), reverse=True)
style_map = {elem:resolve_declarations(x) for elem, x in iteritems(style_map)}
pseudo_style_map = {elem:resolve_pseudo_declarations(x) for elem, x in iteritems(pseudo_style_map)}
return partial(resolve_property, style_map), partial(resolve_pseudo_property, style_map, pseudo_style_map), select
_defvals = None
def defvals():
global _defvals
if _defvals is None:
_defvals = {k:Values(Property(k, str(val)).propertyValue) for k, val in iteritems(DEFAULTS)}
return _defvals
def resolve_property(style_map, elem, name):
''' Given a `style_map` previously generated by :func:`resolve_styles()` and
a property `name`, returns the effective value of that property for the
specified element. Handles inheritance and CSS cascading rules. Returns
an instance of :class:`Values`. If the property was never set and
is not a known property, then it will return None. '''
inheritable = name in INHERITED
q = elem
while q is not None:
s = style_map.get(q)
if s is not None:
val = s.get(name)
if val is not None:
return val
q = q.getparent() if inheritable else None
return defvals().get(name)
def resolve_pseudo_property(
style_map, pseudo_style_map, elem, prop, name,
abort_on_missing=False, check_if_pseudo_applies=False, check_ancestors=False
):
if check_if_pseudo_applies:
q = elem
while q is not None:
val = pseudo_style_map.get(q, {}).get(prop, {}).get(name)
if val is not None:
return True
if not check_ancestors:
break
q = q.getparent()
return False
sub_map = pseudo_style_map.get(elem)
if abort_on_missing and sub_map is None:
return None
if sub_map is not None:
prop_map = sub_map.get(prop)
if abort_on_missing and prop_map is None:
return None
if prop_map is not None:
val = prop_map.get(name)
if val is not None:
return val
if name in INHERITED:
if check_ancestors:
q = elem.getparent()
while q is not None:
val = pseudo_style_map.get(q, {}).get(prop, {}).get(name)
if val is not None:
return val
if not check_ancestors:
break
q = q.getparent()
return resolve_property(style_map, elem, name)
return defvals().get(name)
| 11,523 | Python | .py | 242 | 38.190083 | 153 | 0.640873 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,390 | css.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/css.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from collections import defaultdict
from functools import partial
from operator import itemgetter
from css_parser.css import CSSRule, CSSStyleDeclaration
from css_selectors import Select, SelectorError, SelectorSyntaxError, parse
from calibre import force_unicode
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, XHTML, css_text
from calibre.ebooks.oeb.normalize_css import normalize_filter_css, normalizers
from calibre.ebooks.oeb.polish.pretty import pretty_script_or_style, pretty_xml_tree, serialize
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import numeric_sort_key
from calibre.utils.localization import ngettext
from polyglot.builtins import iteritems, itervalues
from polyglot.functools import lru_cache
class SelectorStatus:
any_used: bool = False
any_unused: bool = False
def mark_used_selectors(rules, log, select):
ans = SelectorStatus()
for rule in rules:
for selector in rule.selectorList:
if getattr(selector, 'calibre_used', False):
ans.any_used = True
continue
try:
if select.has_matches(selector.selectorText):
selector.calibre_used = True
ans.any_used = True
else:
ans.any_unused = True
selector.calibre_used = False
except SelectorError:
# Cannot parse/execute this selector, be safe and assume it
# matches something
selector.calibre_used = True
ans.any_used = True
return ans
def get_imported_sheets(name, container, sheets, recursion_level=10, sheet=None):
ans = set()
sheet = sheet or sheets[name]
for rule in sheet.cssRules.rulesOfType(CSSRule.IMPORT_RULE):
if rule.href:
iname = container.href_to_name(rule.href, name)
if iname in sheets:
ans.add(iname)
if recursion_level > 0:
for imported_sheet in tuple(ans):
ans |= get_imported_sheets(imported_sheet, container, sheets, recursion_level=recursion_level-1)
ans.discard(name)
return ans
def merge_declarations(first, second):
for prop in second.getProperties():
first.setProperty(prop)
def merge_identical_selectors(sheet):
' Merge rules that have identical selectors '
selector_map = defaultdict(list)
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
selector_map[rule.selectorText].append(rule)
remove = []
for rule_group in itervalues(selector_map):
if len(rule_group) > 1:
for i in range(1, len(rule_group)):
merge_declarations(rule_group[0].style, rule_group[i].style)
remove.append(rule_group[i])
for rule in remove:
sheet.cssRules.remove(rule)
return len(remove)
def merge_identical_properties(sheet):
' Merge rules having identical properties '
properties_map = defaultdict(list)
def declaration_key(declaration):
return tuple(sorted(
((prop.name, prop.propertyValue.value) for prop in declaration.getProperties()),
key=itemgetter(0)
))
for idx, rule in enumerate(sheet.cssRules):
if rule.type == CSSRule.STYLE_RULE:
properties_map[declaration_key(rule.style)].append((idx, rule))
removals = []
num_merged = 0
for rule_group in properties_map.values():
if len(rule_group) < 2:
continue
num_merged += len(rule_group)
selectors = rule_group[0][1].selectorList
seen = {s.selectorText for s in selectors}
rules = iter(rule_group)
next(rules)
for idx, rule in rules:
removals.append(idx)
for s in rule.selectorList:
q = s.selectorText
if q not in seen:
seen.add(q)
selectors.append(s)
for idx in sorted(removals, reverse=True):
sheet.cssRules.pop(idx)
return num_merged
def remove_unused_selectors_and_rules(rules_container, rules, removal_stats):
ans = SelectorStatus()
for r in rules:
removals = []
for i, sel in enumerate(r.selectorList):
if getattr(sel, 'calibre_used', True):
ans.any_used = True
else:
removals.append(i)
if removals:
ans.any_unused = True
if len(removals) == len(r.selectorList):
rules_container.remove(r)
removal_stats['rules'] += 1
else:
removal_stats['selectors'] += len(removals)
for i in reversed(removals):
del r.selectorList[i]
return ans
def remove_unused_css(
container, report=None,
remove_unused_classes=False,
merge_rules=False,
merge_rules_with_identical_properties=False,
remove_unreferenced_sheets=False,
):
'''
Remove all unused CSS rules from the book. An unused CSS rule is one that does not match any actual content.
:param report: An optional callable that takes a single argument. It is called with information about the operations being performed.
:param remove_unused_classes: If True, class attributes in the HTML that do not match any CSS rules are also removed.
:param merge_rules: If True, rules with identical selectors are merged.
:param merge_rules_with_identical_properties: If True, rules with identical properties are merged.
:param remove_unreferenced_sheets: If True, stylesheets that are not referenced by any content are removed
'''
report = report or (lambda x:x)
def safe_parse(name):
try:
return container.parsed(name)
except TypeError:
pass
sheets = {name:safe_parse(name) for name, mt in iteritems(container.mime_map) if mt in OEB_STYLES}
sheets = {k:v for k, v in iteritems(sheets) if v is not None}
num_merged = num_rules_merged = 0
if merge_rules:
for name, sheet in iteritems(sheets):
num = merge_identical_selectors(sheet)
if num:
container.dirty(name)
num_merged += num
if merge_rules_with_identical_properties:
for name, sheet in iteritems(sheets):
num = merge_identical_properties(sheet)
if num:
container.dirty(name)
num_rules_merged += num
import_map = {name:get_imported_sheets(name, container, sheets) for name in sheets}
unreferenced_sheets = set(sheets)
if remove_unused_classes:
class_map = {name:{icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)} for name, sheet in iteritems(sheets)}
style_rules = {name:tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE)) for name, sheet in iteritems(sheets)}
removal_stats = {'rules': 0, 'selectors': 0}
num_of_removed_classes = 0
for name, mt in iteritems(container.mime_map):
if mt not in OEB_DOCS:
continue
root = container.parsed(name)
select = Select(root, ignore_inappropriate_pseudo_classes=True)
used_classes = set()
for style in root.xpath('//*[local-name()="style"]'):
if style.get('type', 'text/css') == 'text/css' and style.text:
sheet = container.parse_css(style.text)
if merge_rules:
num = merge_identical_selectors(sheet)
if num:
num_merged += num
container.dirty(name)
if merge_rules_with_identical_properties:
num = merge_identical_properties(sheet)
if num:
num_rules_merged += num
container.dirty(name)
if remove_unused_classes:
used_classes |= {icu_lower(x) for x in classes_in_rule_list(sheet.cssRules)}
imports = get_imported_sheets(name, container, sheets, sheet=sheet)
for imported_sheet in imports:
unreferenced_sheets.discard(imported_sheet)
mark_used_selectors(style_rules[imported_sheet], container.log, select)
if remove_unused_classes:
used_classes |= class_map[imported_sheet]
rules = tuple(sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE))
if mark_used_selectors(rules, container.log, select).any_unused:
remove_unused_selectors_and_rules(sheet.cssRules, rules, removal_stats)
style.text = force_unicode(sheet.cssText, 'utf-8')
pretty_script_or_style(container, style)
container.dirty(name)
for link in root.xpath('//*[local-name()="link" and @href]'):
sname = container.href_to_name(link.get('href'), name)
if sname not in sheets:
continue
mark_used_selectors(style_rules[sname], container.log, select)
if remove_unused_classes:
used_classes |= class_map[sname]
unreferenced_sheets.discard(sname)
for iname in import_map[sname]:
unreferenced_sheets.discard(iname)
mark_used_selectors(style_rules[iname], container.log, select)
if remove_unused_classes:
used_classes |= class_map[iname]
if remove_unused_classes:
for elem in root.xpath('//*[@class]'):
original_classes, classes = elem.get('class', '').split(), []
for x in original_classes:
if icu_lower(x) in used_classes:
classes.append(x)
if len(classes) != len(original_classes):
if classes:
elem.set('class', ' '.join(classes))
else:
del elem.attrib['class']
num_of_removed_classes += len(original_classes) - len(classes)
container.dirty(name)
for name, sheet in iteritems(sheets):
if name in unreferenced_sheets:
continue
q = remove_unused_selectors_and_rules(sheet.cssRules, style_rules[name], removal_stats)
if q.any_unused:
container.dirty(name)
num_sheets_removed = 0
if remove_unreferenced_sheets and len(unreferenced_sheets):
num_sheets_removed += len(unreferenced_sheets)
for uname in unreferenced_sheets:
container.remove_item(uname)
num_changes = num_merged + num_of_removed_classes + num_rules_merged + removal_stats['rules'] + removal_stats['selectors'] + num_sheets_removed
if num_changes > 0:
if removal_stats['rules']:
report(ngettext('Removed one unused CSS style rule', 'Removed {} unused CSS style rules',
removal_stats['rules']).format(removal_stats['rules']))
if removal_stats['selectors']:
report(ngettext('Removed one unused CSS selector', 'Removed {} unused CSS selectors',
removal_stats['selectors']).format(removal_stats['selectors']))
if num_of_removed_classes > 0:
report(ngettext('Removed one unused class from the HTML', 'Removed {} unused classes from the HTML',
num_of_removed_classes).format(num_of_removed_classes))
if num_merged > 0:
report(ngettext('Merged one CSS style rule with identical selectors', 'Merged {} CSS style rules with identical selectors',
num_merged).format(num_merged))
if num_rules_merged > 0:
report(ngettext('Merged one CSS style rule with identical properties', 'Merged {} CSS style rules with identical properties',
num_rules_merged).format(num_rules_merged))
if num_sheets_removed:
report(ngettext('Removed one unreferenced stylesheet', 'Removed {} unreferenced stylesheets',
num_sheets_removed).format(num_sheets_removed))
if not removal_stats['rules']:
report(_('No unused CSS style rules found'))
if not removal_stats['selectors']:
report(_('No unused CSS selectors found'))
if remove_unused_classes and num_of_removed_classes == 0:
report(_('No unused class attributes found'))
if merge_rules and num_merged == 0:
report(_('No style rules that could be merged found'))
if remove_unreferenced_sheets and num_sheets_removed == 0:
report(_('No unused stylesheets found'))
return num_changes > 0
def filter_declaration(style, properties=()):
changed = False
for prop in properties:
if style.removeProperty(prop) != '':
changed = True
all_props = set(style.keys())
for prop in style.getProperties():
n = normalizers.get(prop.name, None)
if n is not None:
normalized = n(prop.name, prop.propertyValue)
removed = properties.intersection(set(normalized))
if removed:
changed = True
style.removeProperty(prop.name)
for prop in set(normalized) - removed - all_props:
style.setProperty(prop, normalized[prop])
return changed
def filter_sheet(sheet, properties=()):
from css_parser.css import CSSRule
changed = False
remove = []
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
if filter_declaration(rule.style, properties):
changed = True
if rule.style.length == 0:
remove.append(rule)
for rule in remove:
sheet.cssRules.remove(rule)
return changed
def transform_inline_styles(container, name, transform_sheet, transform_style):
root = container.parsed(name)
changed = False
for style in root.xpath('//*[local-name()="style"]'):
if style.text and (style.get('type') or 'text/css').lower() == 'text/css':
sheet = container.parse_css(style.text)
if transform_sheet(sheet):
changed = True
style.text = force_unicode(sheet.cssText, 'utf-8')
pretty_script_or_style(container, style)
for elem in root.xpath('//*[@style]'):
text = elem.get('style', None)
if text:
style = container.parse_css(text, is_declaration=True)
if transform_style(style):
changed = True
if style.length == 0:
del elem.attrib['style']
else:
elem.set('style', force_unicode(style.getCssText(separator=' '), 'utf-8'))
return changed
def transform_css(container, transform_sheet=None, transform_style=None, names=()):
if not names:
types = OEB_STYLES | OEB_DOCS
names = []
for name, mt in iteritems(container.mime_map):
if mt in types:
names.append(name)
doc_changed = False
for name in names:
mt = container.mime_map[name]
if mt in OEB_STYLES:
sheet = container.parsed(name)
if transform_sheet(sheet):
container.dirty(name)
doc_changed = True
elif mt in OEB_DOCS:
if transform_inline_styles(container, name, transform_sheet, transform_style):
container.dirty(name)
doc_changed = True
return doc_changed
def filter_css(container, properties, names=()):
'''
Remove the specified CSS properties from all CSS rules in the book.
:param properties: Set of properties to remove. For example: :code:`{'font-family', 'color'}`.
:param names: The files from which to remove the properties. Defaults to all HTML and CSS files in the book.
'''
properties = normalize_filter_css(properties)
return transform_css(container, transform_sheet=partial(filter_sheet, properties=properties),
transform_style=partial(filter_declaration, properties=properties), names=names)
def _classes_in_selector(selector, classes):
for attr in ('selector', 'subselector', 'parsed_tree'):
s = getattr(selector, attr, None)
if s is not None:
_classes_in_selector(s, classes)
cn = getattr(selector, 'class_name', None)
if cn is not None:
classes.add(cn)
@lru_cache(maxsize=4096)
def classes_in_selector(text):
classes = set()
try:
for selector in parse(text):
_classes_in_selector(selector, classes)
except SelectorSyntaxError:
pass
return classes
def classes_in_rule_list(css_rules):
classes = set()
for rule in css_rules:
if rule.type == rule.STYLE_RULE:
classes |= classes_in_selector(rule.selectorText)
elif hasattr(rule, 'cssRules'):
classes |= classes_in_rule_list(rule.cssRules)
return classes
def iter_declarations(sheet_or_rule):
if hasattr(sheet_or_rule, 'cssRules'):
for rule in sheet_or_rule.cssRules:
yield from iter_declarations(rule)
elif hasattr(sheet_or_rule, 'style'):
yield sheet_or_rule.style
elif isinstance(sheet_or_rule, CSSStyleDeclaration):
yield sheet_or_rule
def remove_property_value(prop, predicate):
''' Remove the Values that match the predicate from this property. If all
values of the property would be removed, the property is removed from its
parent instead. Note that this means the property must have a parent (a
CSSStyleDeclaration). '''
removed_vals = list(filter(predicate, prop.propertyValue))
if len(removed_vals) == len(prop.propertyValue):
prop.parent.removeProperty(prop.name)
else:
x = css_text(prop.propertyValue)
for v in removed_vals:
x = x.replace(css_text(v), '').strip()
prop.propertyValue.cssText = x
return bool(removed_vals)
RULE_PRIORITIES = {t:i for i, t in enumerate((CSSRule.COMMENT, CSSRule.CHARSET_RULE, CSSRule.IMPORT_RULE, CSSRule.NAMESPACE_RULE))}
def sort_sheet(container, sheet_or_text):
''' Sort the rules in a stylesheet. Note that in the general case this can
change the effective styles, but for most common sheets, it should be safe.
'''
sheet = container.parse_css(sheet_or_text) if isinstance(sheet_or_text, str) else sheet_or_text
def text_sort_key(x):
return numeric_sort_key(str(x or ''))
def selector_sort_key(x):
return (x.specificity, text_sort_key(x.selectorText))
def rule_sort_key(rule):
primary = RULE_PRIORITIES.get(rule.type, len(RULE_PRIORITIES))
secondary = text_sort_key(getattr(rule, 'atkeyword', '') or '')
tertiary = None
if rule.type == CSSRule.STYLE_RULE:
primary += 1
selectors = sorted(rule.selectorList, key=selector_sort_key)
tertiary = selector_sort_key(selectors[0])
rule.selectorText = ', '.join(s.selectorText for s in selectors)
elif rule.type == CSSRule.FONT_FACE_RULE:
try:
tertiary = text_sort_key(rule.style.getPropertyValue('font-family'))
except Exception:
pass
return primary, secondary, tertiary
sheet.cssRules.sort(key=rule_sort_key)
return sheet
def add_stylesheet_links(container, name, text):
root = container.parse_xhtml(text, name)
head = root.xpath('//*[local-name() = "head"]')
if not head:
return
head = head[0]
sheets = tuple(container.manifest_items_of_type(lambda mt: mt in OEB_STYLES))
if not sheets:
return
for sname in sheets:
link = head.makeelement(XHTML('link'), type='text/css', rel='stylesheet', href=container.name_to_href(sname, name))
head.append(link)
pretty_xml_tree(head)
return serialize(root, 'text/html')
def rename_class_in_rule_list(css_rules, old_name, new_name):
# this regex will not match class names inside attribute value selectors
# and it will match id selectors that contain .old_name but its the best
# that can be done without implementing a full parser for CSS selectors
pat = re.compile(rf'(?<=\.){re.escape(old_name)}(?:\W|$)')
def repl(m):
return m.group().replace(old_name, new_name)
changed = False
for rule in css_rules:
if rule.type == rule.STYLE_RULE:
old = rule.selectorText
q = pat.sub(repl, old)
if q != old:
changed = True
rule.selectorText = q
elif hasattr(rule, 'cssRules'):
if rename_class_in_rule_list(rule.cssRules, old_name, new_name):
changed = True
return changed
def rename_class_in_doc(container, root, old_name, new_name):
changed = False
pat = re.compile(rf'(?:^|\W){re.escape(old_name)}(?:\W|$)')
def repl(m):
return m.group().replace(old_name, new_name)
for elem in root.xpath('//*[@class]'):
old = elem.get('class')
if old:
new = pat.sub(repl, old)
if new != old:
changed = True
elem.set('class', new)
for style in root.xpath('//*[local-name()="style"]'):
if style.get('type', 'text/css') == 'text/css' and style.text:
sheet = container.parse_css(style.text)
if rename_class_in_rule_list(sheet.cssRules, old_name, new_name):
changed = True
style.text = force_unicode(sheet.cssText, 'utf-8')
return changed
def rename_class(container, old_name, new_name):
changed = False
if not old_name or old_name == new_name:
return changed
for sheet_name in container.manifest_items_of_type(lambda mt: mt in OEB_STYLES):
sheet = container.parsed(sheet_name)
if rename_class_in_rule_list(sheet.cssRules, old_name, new_name):
container.dirty(sheet_name)
changed = True
for doc_name in container.manifest_items_of_type(lambda mt: mt in OEB_DOCS):
doc = container.parsed(doc_name)
if rename_class_in_doc(container, doc, old_name, new_name):
container.dirty(doc_name)
changed = True
return changed
| 22,414 | Python | .py | 485 | 36.131959 | 147 | 0.624703 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,391 | stats.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/stats.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
from functools import partial
import regex
from lxml.etree import tostring
from tinycss.fonts3 import parse_font_family
from calibre.ebooks.oeb.base import XHTML, css_text
from calibre.ebooks.oeb.polish.cascade import iterdeclaration, iterrules, resolve_styles
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import ord_string, safe_chr
from calibre.utils.icu import upper as icu_upper
from polyglot.builtins import iteritems, itervalues
def normalize_font_properties(font):
w = font.get('font-weight', None)
if not w and w != 0:
w = 'normal'
w = str(w)
w = {'normal':'400', 'bold':'700'}.get(w, w)
if w not in {'100', '200', '300', '400', '500', '600', '700',
'800', '900'}:
w = '400'
font['font-weight'] = w
val = font.get('font-style', None)
if val not in {'normal', 'italic', 'oblique'}:
val = 'normal'
font['font-style'] = val
val = font.get('font-stretch', None)
if val not in {'normal', 'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'semi-expanded', 'expanded',
'extra-expanded', 'ultra-expanded'}:
val = 'normal'
font['font-stretch'] = val
return font
widths = {x:i for i, x in enumerate(('ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'
))}
def get_matching_rules(rules, font):
matches = []
# Filter on family
for rule in reversed(rules):
ff = frozenset(icu_lower(x) for x in font.get('font-family', []))
if ff.intersection(rule['font-family']):
matches.append(rule)
if not matches:
return []
# Filter on font stretch
width = widths[font.get('font-stretch', 'normal')]
min_dist = min(abs(width-y['width']) for y in matches)
nearest = [x for x in matches if abs(width-x['width']) == min_dist]
if width <= 4:
lmatches = [f for f in nearest if f['width'] <= width]
else:
lmatches = [f for f in nearest if f['width'] >= width]
matches = (lmatches or nearest)
# Filter on font-style
fs = font.get('font-style', 'normal')
order = {
'oblique':['oblique', 'italic', 'normal'],
'normal':['normal', 'oblique', 'italic']
}.get(fs, ['italic', 'oblique', 'normal'])
for q in order:
m = [f for f in matches if f.get('font-style', 'normal') == q]
if m:
matches = m
break
# Filter on font weight
fw = int(font.get('font-weight', '400'))
if fw == 400:
q = [400, 500, 300, 200, 100, 600, 700, 800, 900]
elif fw == 500:
q = [500, 400, 300, 200, 100, 600, 700, 800, 900]
elif fw < 400:
q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100,
100, 1000))
else:
q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100,
-100, -100))
for wt in q:
m = [f for f in matches if f['weight'] == wt]
if m:
return m
return []
def get_css_text(elem, resolve_pseudo_property, which='before'):
text = resolve_pseudo_property(elem, which, 'content')[0].value
if text and len(text) > 2 and text[0] == '"' and text[-1] == '"':
return text[1:-1]
return ''
caps_variants = {'smallcaps', 'small-caps', 'all-small-caps', 'petite-caps', 'all-petite-caps', 'unicase'}
def get_element_text(elem, resolve_property, resolve_pseudo_property, capitalize_pat, for_pseudo=None):
ans = []
before = get_css_text(elem, resolve_pseudo_property)
if before:
ans.append(before)
if for_pseudo is not None:
ans.append(tostring(elem, method='text', encoding='unicode', with_tail=False))
else:
if elem.text:
ans.append(elem.text)
for child in elem.iterchildren():
t = getattr(child, 'tail', '')
if t:
ans.append(t)
after = get_css_text(elem, resolve_pseudo_property, 'after')
if after:
ans.append(after)
ans = ''.join(ans)
if for_pseudo is not None:
tt = resolve_pseudo_property(elem, for_pseudo, 'text-transform')[0].value
fv = resolve_pseudo_property(elem, for_pseudo, 'font-variant')[0].value
else:
tt = resolve_property(elem, 'text-transform')[0].value
fv = resolve_property(elem, 'font-variant')[0].value
if fv in caps_variants:
ans += icu_upper(ans)
if tt != 'none':
if tt == 'uppercase':
ans = icu_upper(ans)
elif tt == 'lowercase':
ans = icu_lower(ans)
elif tt == 'capitalize':
m = capitalize_pat.search(ans)
if m is not None:
ans += icu_upper(m.group())
return ans
def get_font_dict(elem, resolve_property, pseudo=None):
ans = {}
if pseudo is None:
ff = resolve_property(elem, 'font-family')
else:
ff = resolve_property(elem, pseudo, 'font-family')
ans['font-family'] = tuple(x.value for x in ff)
for p in 'weight', 'style', 'stretch':
p = 'font-' + p
rp = resolve_property(elem, p) if pseudo is None else resolve_property(elem, pseudo, p)
ans[p] = str(rp[0].value)
normalize_font_properties(ans)
return ans
bad_fonts = {'serif', 'sans-serif', 'monospace', 'cursive', 'fantasy', 'sansserif', 'inherit'}
exclude_chars = frozenset(ord_string('\n\r\t'))
skip_tags = {XHTML(x) for x in 'script style title meta link'.split()}
font_keys = {'font-weight', 'font-style', 'font-stretch', 'font-family'}
def prepare_font_rule(cssdict):
cssdict['font-family'] = frozenset(cssdict['font-family'][:1])
cssdict['width'] = widths[cssdict['font-stretch']]
cssdict['weight'] = int(cssdict['font-weight'])
class StatsCollector:
first_letter_pat = capitalize_pat = None
def __init__(self, container, do_embed=False):
if self.first_letter_pat is None:
StatsCollector.first_letter_pat = self.first_letter_pat = regex.compile(
r'^[\p{P}]*[\p{L}\p{N}]', regex.VERSION1 | regex.UNICODE)
StatsCollector.capitalize_pat = self.capitalize_pat = regex.compile(
r'[\p{L}\p{N}]', regex.VERSION1 | regex.UNICODE)
self.collect_font_stats(container, do_embed)
def collect_font_face_rules(self, container, processed, spine_name, sheet, sheet_name):
if sheet_name in processed:
sheet_rules = processed[sheet_name]
else:
sheet_rules = []
if sheet_name != spine_name:
processed[sheet_name] = sheet_rules
for rule, base_name, rule_index in iterrules(container, sheet_name, rules=sheet, rule_type='FONT_FACE_RULE'):
cssdict = {}
for prop in iterdeclaration(rule.style):
if prop.name == 'font-family':
cssdict['font-family'] = [icu_lower(x) for x in parse_font_family(css_text(prop.propertyValue))]
elif prop.name.startswith('font-'):
cssdict[prop.name] = prop.propertyValue[0].value
elif prop.name == 'src':
for val in prop.propertyValue:
x = val.value
fname = container.href_to_name(x, sheet_name)
if container.has_name(fname):
cssdict['src'] = fname
break
else:
container.log.warn('The @font-face rule refers to a font file that does not exist in the book: %s' % css_text(prop.propertyValue))
if 'src' not in cssdict:
continue
ff = cssdict.get('font-family')
if not ff or ff[0] in bad_fonts:
continue
normalize_font_properties(cssdict)
prepare_font_rule(cssdict)
sheet_rules.append(cssdict)
self.font_rule_map[spine_name].extend(sheet_rules)
def get_element_font_usage(self, elem, resolve_property, resolve_pseudo_property, font_face_rules, do_embed, font_usage_map, font_spec):
text = get_element_text(elem, resolve_property, resolve_pseudo_property, self.capitalize_pat)
if not text:
return
def update_usage_for_embed(font, chars):
if not do_embed:
return
ff = [icu_lower(x) for x in font.get('font-family', ())]
if ff and ff[0] not in bad_fonts:
key = frozenset(((k, ff[0] if k == 'font-family' else v) for k, v in iteritems(font) if k in font_keys))
val = font_usage_map.get(key)
if val is None:
val = font_usage_map[key] = {'text': set()}
for k in font_keys:
val[k] = font[k][0] if k == 'font-family' else font[k]
val['text'] |= chars
for ff in font.get('font-family', ()):
if ff and icu_lower(ff) not in bad_fonts:
font_spec.add(ff)
font = get_font_dict(elem, resolve_property)
chars = frozenset(ord_string(text)) - exclude_chars
update_usage_for_embed(font, chars)
for rule in get_matching_rules(font_face_rules, font):
self.font_stats[rule['src']] |= chars
if resolve_pseudo_property(elem, 'first-letter', 'font-family', check_if_pseudo_applies=True):
font = get_font_dict(elem, resolve_pseudo_property, pseudo='first-letter')
text = get_element_text(elem, resolve_property, resolve_pseudo_property, self.capitalize_pat, for_pseudo='first-letter')
m = self.first_letter_pat.search(text.lstrip())
if m is not None:
chars = frozenset(ord_string(m.group())) - exclude_chars
update_usage_for_embed(font, chars)
for rule in get_matching_rules(font_face_rules, font):
self.font_stats[rule['src']] |= chars
if resolve_pseudo_property(elem, 'first-line', 'font-family', check_if_pseudo_applies=True, check_ancestors=True):
font = get_font_dict(elem, partial(resolve_pseudo_property, check_ancestors=True), pseudo='first-line')
text = get_element_text(elem, resolve_property, resolve_pseudo_property, self.capitalize_pat, for_pseudo='first-line')
chars = frozenset(ord_string(text)) - exclude_chars
update_usage_for_embed(font, chars)
for rule in get_matching_rules(font_face_rules, font):
self.font_stats[rule['src']] |= chars
def get_font_usage(self, container, spine_name, resolve_property, resolve_pseudo_property, font_face_rules, do_embed):
root = container.parsed(spine_name)
for body in root.iterchildren(XHTML('body')):
for elem in body.iter('*'):
if elem.tag not in skip_tags:
self.get_element_font_usage(
elem, resolve_property, resolve_pseudo_property, font_face_rules, do_embed,
self.font_usage_map[spine_name], self.font_spec_map[spine_name])
def collect_font_stats(self, container, do_embed=False):
self.font_stats = {}
self.font_usage_map = {}
self.font_spec_map = {}
self.font_rule_map = {}
self.all_font_rules = {}
processed_sheets = {}
for name, is_linear in container.spine_names:
self.font_rule_map[name] = font_face_rules = []
resolve_property, resolve_pseudo_property, select = resolve_styles(container, name, sheet_callback=partial(
self.collect_font_face_rules, container, processed_sheets, name))
for rule in font_face_rules:
self.all_font_rules[rule['src']] = rule
if rule['src'] not in self.font_stats:
self.font_stats[rule['src']] = set()
self.font_usage_map[name] = {}
self.font_spec_map[name] = set()
self.get_font_usage(container, name, resolve_property, resolve_pseudo_property, font_face_rules, do_embed)
self.font_stats = {k:{safe_chr(x) for x in v} for k, v in iteritems(self.font_stats)}
for fum in itervalues(self.font_usage_map):
for v in itervalues(fum):
v['text'] = {safe_chr(x) for x in v['text']}
if __name__ == '__main__':
from calibre.ebooks.oeb.polish.container import get_container
from calibre.utils.logging import default_log
default_log.filter_level = default_log.DEBUG
ebook = get_container(sys.argv[-1], default_log)
from pprint import pprint
pprint(StatsCollector(ebook, do_embed=True).font_stats)
| 13,034 | Python | .py | 267 | 38.632959 | 158 | 0.59052 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,392 | subset.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/subset.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
from io import BytesIO
from calibre import as_unicode, prints
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, XPath, css_text
from calibre.ebooks.oeb.polish.utils import OEB_FONTS
from calibre.utils.fonts.subset import subset
from calibre.utils.fonts.utils import get_font_names
from polyglot.builtins import iteritems
def remove_font_face_rules(container, sheet, remove_names, base):
changed = False
for rule in tuple(sheet.cssRules):
if rule.type != rule.FONT_FACE_RULE:
continue
try:
uri = rule.style.getProperty('src').propertyValue[0].uri
except (IndexError, KeyError, AttributeError, TypeError, ValueError):
continue
name = container.href_to_name(uri, base)
if name in remove_names:
sheet.deleteRule(rule)
changed = True
return changed
def iter_subsettable_fonts(container):
for name, mt in iteritems(container.mime_map):
if (mt in OEB_FONTS or name.rpartition('.')[-1].lower() in {'otf', 'ttf'}):
yield name, mt
def subset_all_fonts(container, font_stats, report):
remove = set()
total_old = total_new = 0
changed = False
for name, mt in iter_subsettable_fonts(container):
chars = font_stats.get(name, set())
with container.open(name, 'rb') as f:
f.seek(0, os.SEEK_END)
font_size = f.tell()
if not chars:
remove.add(name)
report(_('Removed unused font: %s')%name)
continue
with container.open(name, 'r+b') as f:
raw = f.read()
try:
font_name = get_font_names(raw)[-1]
except Exception as e:
report(
'Corrupted font: %s, ignoring. Error: %s'%(
name, as_unicode(e)))
continue
warnings = []
report('Subsetting font: %s'%(font_name or name))
font_type = os.path.splitext(name)[1][1:].lower()
output = BytesIO()
try:
warnings = subset(BytesIO(raw), output, font_type, chars)
except Exception as e:
report(
'Unsupported font: %s, ignoring. Error: %s'%(
name, as_unicode(e)))
continue
nraw = output.getvalue()
total_old += font_size
for w in warnings:
report(w)
olen = len(raw)
nlen = len(nraw)
total_new += len(nraw)
if nlen == olen:
report(_('The font %s was already subset')%font_name)
else:
report(_('Decreased the font {0} to {1} of its original size').format(
font_name, ('%.1f%%' % (nlen/olen * 100))))
changed = True
f.seek(0), f.truncate(), f.write(nraw)
for name in remove:
container.remove_item(name)
changed = True
if remove:
for name, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
sheet = container.parsed(name)
if remove_font_face_rules(container, sheet, remove, name):
container.dirty(name)
elif mt in OEB_DOCS:
for style in XPath('//h:style')(container.parsed(name)):
if style.get('type', 'text/css') == 'text/css' and style.text:
sheet = container.parse_css(style.text, name)
if remove_font_face_rules(container, sheet, remove, name):
style.text = css_text(sheet)
container.dirty(name)
if total_old > 0:
report(_('Reduced total font size to %.1f%% of original')%(
total_new/total_old*100))
else:
report(_('No embedded fonts found'))
return changed
if __name__ == '__main__':
from calibre.ebooks.oeb.polish.container import get_container
from calibre.ebooks.oeb.polish.stats import StatsCollector
from calibre.utils.logging import default_log
default_log.filter_level = default_log.DEBUG
inbook = sys.argv[-1]
ebook = get_container(inbook, default_log)
report = []
stats = StatsCollector(ebook).font_stats
subset_all_fonts(ebook, stats, report.append)
outbook, ext = inbook.rpartition('.')[0::2]
outbook += '_subset.'+ext
ebook.commit(outbook)
prints('\nReport:')
for msg in report:
prints(msg)
print()
prints('Output written to:', outbook)
| 4,751 | Python | .py | 118 | 29.915254 | 86 | 0.574058 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,393 | main.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/main.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import sys
import time
from collections import namedtuple
from functools import partial
from calibre.ebooks.oeb.polish.container import get_container
from calibre.ebooks.oeb.polish.cover import set_cover
from calibre.ebooks.oeb.polish.css import remove_unused_css
from calibre.ebooks.oeb.polish.download import download_external_resources, get_external_resources, replace_resources
from calibre.ebooks.oeb.polish.embed import embed_all_fonts
from calibre.ebooks.oeb.polish.hyphenation import add_soft_hyphens, remove_soft_hyphens
from calibre.ebooks.oeb.polish.images import compress_images
from calibre.ebooks.oeb.polish.jacket import add_or_replace_jacket, find_existing_jacket, remove_jacket, replace_jacket
from calibre.ebooks.oeb.polish.replace import smarten_punctuation
from calibre.ebooks.oeb.polish.stats import StatsCollector
from calibre.ebooks.oeb.polish.subset import iter_subsettable_fonts, subset_all_fonts
from calibre.ebooks.oeb.polish.upgrade import upgrade_book
from calibre.utils.localization import ngettext
from calibre.utils.logging import Log
from polyglot.builtins import iteritems
ALL_OPTS = {
'embed': False,
'subset': False,
'opf': None,
'cover': None,
'jacket': False,
'remove_jacket':False,
'smarten_punctuation':False,
'remove_unused_css':False,
'compress_images': False,
'upgrade_book': False,
'add_soft_hyphens': False,
'remove_soft_hyphens': False,
'download_external_resources': False,
}
CUSTOMIZATION = {
'remove_unused_classes': False,
'merge_identical_selectors': False,
'merge_rules_with_identical_properties': False,
'remove_unreferenced_sheets': True,
'remove_ncx': True,
}
SUPPORTED = {'EPUB', 'AZW3'}
# Help {{{
HELP = {'about': _(
'''\
<p><i>Polishing books</i> is all about putting the shine of perfection onto
your carefully crafted e-books.</p>
<p>Polishing tries to minimize the changes to the internal code of your e-book.
Unlike conversion, it <i>does not</i> flatten CSS, rename files, change font
sizes, adjust margins, etc. Every action performs only the minimum set of
changes needed for the desired effect.</p>
<p>You should use this tool as the last step in your e-book creation process.</p>
{0}
<p>Note that polishing only works on files in the %s formats.</p>\
''')%_(' or ').join(sorted('<b>%s</b>'%x for x in SUPPORTED)),
'embed': _('''\
<p>Embed all fonts that are referenced in the document and are not already embedded.
This will scan your computer for the fonts, and if they are found, they will be
embedded into the document.</p>
<p>Please ensure that you have the proper license for embedding the fonts used in this document.</p>
'''),
'subset': _('''\
<p>Subsetting fonts means reducing an embedded font to contain
only the characters used from that font in the book. This
greatly reduces the size of the font files (halving the font
file sizes is common).</p>
<p>For example, if the book uses a specific font for headers,
then subsetting will reduce that font to contain only the
characters present in the actual headers in the book. Or if the
book embeds the bold and italic versions of a font, but bold
and italic text is relatively rare, or absent altogether, then
the bold and italic fonts can either be reduced to only a few
characters or completely removed.</p>
<p>The only downside to subsetting fonts is that if, at a later
date you decide to add more text to your books, the newly added
text might not be covered by the subset font.</p>
'''),
'jacket': _('''\
<p>Insert a "book jacket" page at the start of the book that contains
all the book metadata such as title, tags, authors, series, comments,
etc. Any previous book jacket will be replaced.</p>'''),
'remove_jacket': _('''\
<p>Remove a previous inserted book jacket page.</p>
'''),
'smarten_punctuation': _('''\
<p>Convert plain text dashes, ellipsis, quotes, multiple hyphens, etc. into their
typographically correct equivalents.</p>
<p>Note that the algorithm can sometimes generate incorrect results, especially
when single quotes at the start of contractions are involved.</p>
'''),
'remove_unused_css': _('''\
<p>Remove all unused CSS rules from stylesheets and <style> tags. Some books
created from production templates can have a large number of extra CSS rules
that don't match any actual content. These extra rules can slow down readers
that need to parse them all.</p>
'''),
'compress_images': _('''\
<p>Losslessly compress images in the book, to reduce the filesize, without
affecting image quality.</p>
'''),
'upgrade_book': _('''\
<p>Upgrade the internal structures of the book, if possible. For instance,
upgrades EPUB 2 books to EPUB 3 books.</p>
'''),
'add_soft_hyphens': _('''\
<p>Add soft hyphens to all words in the book. This allows the book to be rendered
better when the text is justified, in readers that do not support hyphenation.</p>
'''),
'remove_soft_hyphens': _('''\
<p>Remove soft hyphens from all text in the book.</p>
'''),
'download_external_resources': _('''\
<p>Download external resources such as images, stylesheets, etc. that point to URLs instead of files in the book.
All such resources will be downloaded and added to the book so that the book no longer references any external resources.
</p>
'''),
}
def hfix(name, raw):
if name == 'about':
return raw.format('')
raw = raw.replace('\n\n', '__XX__')
raw = raw.replace('\n', ' ')
raw = raw.replace('__XX__', '\n')
raw = raw.replace('<', '<').replace('>', '>')
return raw
CLI_HELP = {x:hfix(x, re.sub('<.*?>', '', y)) for x, y in iteritems(HELP)}
# }}}
def update_metadata(ebook, new_opf):
from calibre.ebooks.metadata.opf import get_metadata, set_metadata
with ebook.open(ebook.opf_name, 'r+b') as stream, open(new_opf, 'rb') as ns:
mi = get_metadata(ns)[0]
mi.cover, mi.cover_data = None, (None, None)
opfbytes = set_metadata(stream, mi, apply_null=True, update_timestamp=True)[0]
stream.seek(0)
stream.truncate()
stream.write(opfbytes)
def download_resources(ebook, report) -> bool:
changed = False
url_to_referrer_map = get_external_resources(ebook)
if url_to_referrer_map:
n = len(url_to_referrer_map)
report(ngettext('Downloading one external resource', 'Downloading {} external resources', n).format(n))
replacements, failures = download_external_resources(ebook, url_to_referrer_map)
if not failures:
report(_('Successfully downloaded all resources'))
else:
tb = [f'{url}\n\t{err}\n' for url, err in iteritems(failures)]
if replacements:
report(_('Failed to download some resources, see details below:'))
else:
report(_('Failed to download all resources, see details below:'))
report(tb)
if replacements:
if replace_resources(ebook, url_to_referrer_map, replacements):
changed = True
else:
report(_('No external resources found in book'))
return changed
def polish_one(ebook, opts, report, customization=None):
def rt(x):
return report('\n### ' + x)
jacket = None
changed = False
customization = customization or CUSTOMIZATION.copy()
has_subsettable_fonts = False
for x in iter_subsettable_fonts(ebook):
has_subsettable_fonts = True
break
if (opts.subset and has_subsettable_fonts) or opts.embed:
stats = StatsCollector(ebook, do_embed=opts.embed)
if opts.opf:
changed = True
rt(_('Updating metadata'))
update_metadata(ebook, opts.opf)
jacket = find_existing_jacket(ebook)
if jacket is not None:
replace_jacket(ebook, jacket)
report(_('Updated metadata jacket'))
report(_('Metadata updated\n'))
if opts.cover:
changed = True
rt(_('Setting cover'))
set_cover(ebook, opts.cover, report)
report('')
if opts.jacket:
changed = True
rt(_('Inserting metadata jacket'))
if jacket is None:
if add_or_replace_jacket(ebook):
report(_('Existing metadata jacket replaced'))
else:
report(_('Metadata jacket inserted'))
else:
report(_('Existing metadata jacket replaced'))
report('')
if opts.remove_jacket:
rt(_('Removing metadata jacket'))
if remove_jacket(ebook):
report(_('Metadata jacket removed'))
changed = True
else:
report(_('No metadata jacket found'))
report('')
if opts.smarten_punctuation:
rt(_('Smartening punctuation'))
if smarten_punctuation(ebook, report):
changed = True
report('')
if opts.embed:
rt(_('Embedding referenced fonts'))
if embed_all_fonts(ebook, stats, report):
changed = True
has_subsettable_fonts = True
report('')
if opts.subset:
if has_subsettable_fonts:
rt(_('Subsetting embedded fonts'))
if subset_all_fonts(ebook, stats.font_stats, report):
changed = True
else:
rt(_('No embedded fonts to subset'))
report('')
if opts.remove_unused_css:
rt(_('Removing unused CSS rules'))
if remove_unused_css(
ebook, report,
remove_unused_classes=customization['remove_unused_classes'],
merge_rules=customization['merge_identical_selectors'],
merge_rules_with_identical_properties=customization['merge_rules_with_identical_properties'],
remove_unreferenced_sheets=customization['remove_unreferenced_sheets']
):
changed = True
report('')
if opts.compress_images:
rt(_('Losslessly compressing images'))
if compress_images(ebook, report)[0]:
changed = True
report('')
if opts.upgrade_book:
rt(_('Upgrading book, if possible'))
if upgrade_book(ebook, report, remove_ncx=customization['remove_ncx']):
changed = True
report('')
if opts.remove_soft_hyphens:
rt(_('Removing soft hyphens'))
remove_soft_hyphens(ebook, report)
changed = True
elif opts.add_soft_hyphens:
rt(_('Adding soft hyphens'))
add_soft_hyphens(ebook, report)
changed = True
if opts.download_external_resources:
rt(_('Downloading external resources'))
try:
download_resources(ebook, report)
except Exception:
import traceback
report(_('Failed to download resources with error:'))
report(traceback.format_exc())
report('')
return changed
def polish(file_map, opts, log, report):
st = time.time()
for inbook, outbook in iteritems(file_map):
report(_('## Polishing: %s')%(inbook.rpartition('.')[-1].upper()))
ebook = get_container(inbook, log)
polish_one(ebook, opts, report)
ebook.commit(outbook)
report('-'*70)
report(_('Polishing took: %.1f seconds')%(time.time()-st))
REPORT = '{0} REPORT {0}'.format('-'*30)
def gui_polish(data):
files = data.pop('files')
if not data.pop('metadata'):
data.pop('opf')
if not data.pop('do_cover'):
data.pop('cover', None)
file_map = {x:x for x in files}
opts = ALL_OPTS.copy()
opts.update(data)
O = namedtuple('Options', ' '.join(ALL_OPTS))
opts = O(**opts)
log = Log(level=Log.DEBUG)
report = []
polish(file_map, opts, log, report.append)
log('')
log(REPORT)
for msg in report:
log(msg)
return '\n\n'.join(report)
def tweak_polish(container, actions, customization=None):
opts = ALL_OPTS.copy()
opts.update(actions)
O = namedtuple('Options', ' '.join(ALL_OPTS))
opts = O(**opts)
report = []
changed = polish_one(container, opts, report.append, customization=customization)
return report, changed
def option_parser():
from calibre.utils.config import OptionParser
USAGE = _('%prog [options] input_file [output_file]\n\n') + re.sub(
r'<.*?>', '', CLI_HELP['about'])
parser = OptionParser(usage=USAGE)
a = parser.add_option
o = partial(a, default=False, action='store_true')
o('--embed-fonts', '-e', dest='embed', help=CLI_HELP['embed'])
o('--subset-fonts', '-f', dest='subset', help=CLI_HELP['subset'])
a('--cover', '-c', help=_(
'Path to a cover image. Changes the cover specified in the e-book. '
'If no cover is present, or the cover is not properly identified, inserts a new cover.'))
a('--opf', '-o', help=_(
'Path to an OPF file. The metadata in the book is updated from the OPF file.'))
o('--jacket', '-j', help=CLI_HELP['jacket'])
o('--remove-jacket', help=CLI_HELP['remove_jacket'])
o('--smarten-punctuation', '-p', help=CLI_HELP['smarten_punctuation'])
o('--remove-unused-css', '-u', help=CLI_HELP['remove_unused_css'])
o('--compress-images', '-i', help=CLI_HELP['compress_images'])
o('--add-soft-hyphens', '-H', help=CLI_HELP['add_soft_hyphens'])
o('--remove-soft-hyphens', help=CLI_HELP['remove_soft_hyphens'])
o('--upgrade-book', '-U', help=CLI_HELP['upgrade_book'])
o('--download-external-resources', '-d', help=CLI_HELP['download_external_resources'])
o('--verbose', help=_('Produce more verbose output, useful for debugging.'))
return parser
def main(args=None):
parser = option_parser()
opts, args = parser.parse_args(args or sys.argv[1:])
log = Log(level=Log.DEBUG if opts.verbose else Log.INFO)
if not args:
parser.print_help()
log.error(_('You must provide the input file to polish'))
raise SystemExit(1)
if len(args) > 2:
parser.print_help()
log.error(_('Unknown extra arguments'))
raise SystemExit(1)
if len(args) == 1:
inbook = args[0]
base, ext = inbook.rpartition('.')[0::2]
outbook = base + '_polished.' + ext
else:
inbook, outbook = args
popts = ALL_OPTS.copy()
for k, v in iteritems(popts):
popts[k] = getattr(opts, k, None)
O = namedtuple('Options', ' '.join(popts))
popts = O(**popts)
report = []
if not tuple(filter(None, (getattr(popts, name) for name in ALL_OPTS))):
parser.print_help()
log.error(_('You must specify at least one action to perform'))
raise SystemExit(1)
polish({inbook:outbook}, popts, log, report.append)
log('')
log(REPORT)
for msg in report:
log(msg)
log('Output written to:', outbook)
if __name__ == '__main__':
main()
| 14,929 | Python | .py | 364 | 35.192308 | 121 | 0.66085 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,394 | replace.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/replace.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs
import os
import posixpath
import shutil
from collections import Counter, defaultdict
from functools import partial
from calibre import sanitize_file_name
from calibre.ebooks.chardet import strip_encoding_declarations
from calibre.ebooks.oeb.base import css_text
from calibre.ebooks.oeb.polish.css import iter_declarations, remove_property_value
from calibre.ebooks.oeb.polish.utils import extract
from polyglot.builtins import iteritems, itervalues
from polyglot.urllib import urlparse, urlunparse
class LinkReplacer:
def __init__(self, base, container, link_map, frag_map):
self.base = base
self.frag_map = frag_map
self.link_map = link_map
self.container = container
self.replaced = False
def __call__(self, url):
if url and url.startswith('#'):
repl = self.frag_map(self.base, url[1:])
if not repl or repl == url[1:]:
return url
self.replaced = True
return '#' + repl
name = self.container.href_to_name(url, self.base)
if not name:
return url
nname = self.link_map.get(name, None)
if not nname:
return url
purl = urlparse(url)
href = self.container.name_to_href(nname, self.base)
if purl.fragment:
nfrag = self.frag_map(name, purl.fragment)
if nfrag:
href += '#%s'%nfrag
if href != url:
self.replaced = True
return href
class IdReplacer:
def __init__(self, base, container, id_map):
self.base, self.container, self.replaced = base, container, False
self.id_map = id_map
def __call__(self, url):
if url and url.startswith('#'):
repl = self.id_map.get(self.base, {}).get(url[1:])
if repl is None or repl == url[1:]:
return url
self.replaced = True
return '#' + repl
name = self.container.href_to_name(url, self.base)
if not name:
return url
id_map = self.id_map.get(name)
if id_map is None:
return url
purl = urlparse(url)
nfrag = id_map.get(purl.fragment)
if nfrag is None:
return url
purl = purl._replace(fragment=nfrag)
href = urlunparse(purl)
if href != url:
self.replaced = True
return href
class LinkRebaser:
def __init__(self, container, old_name, new_name):
self.old_name, self.new_name = old_name, new_name
self.container = container
self.replaced = False
def __call__(self, url):
if url and url.startswith('#'):
return url
purl = urlparse(url)
frag = purl.fragment
name = self.container.href_to_name(url, self.old_name)
if not name:
return url
if name == self.old_name:
name = self.new_name
href = self.container.name_to_href(name, self.new_name)
if frag:
href += '#' + frag
if href != url:
self.replaced = True
return href
def replace_links(container, link_map, frag_map=lambda name, frag:frag, replace_in_opf=False):
'''
Replace links to files in the container. Will iterate over all files in the container and change the specified links in them.
:param link_map: A mapping of old canonical name to new canonical name. For example: :code:`{'images/old.png': 'images/new.png'}`
:param frag_map: A callable that takes two arguments ``(name, anchor)`` and
returns a new anchor. This is useful if you need to change the anchors in
HTML files. By default, it does nothing.
:param replace_in_opf: If False, links are not replaced in the OPF file.
'''
for name, media_type in iteritems(container.mime_map):
if name == container.opf_name and not replace_in_opf:
continue
repl = LinkReplacer(name, container, link_map, frag_map)
container.replace_links(name, repl)
def replace_ids(container, id_map):
'''
Replace all links in the container that pointed to the changed ids.
:param id_map: A mapping of {name:id_map} where each id_map is a mapping of {old_id:new_id}
:return: True iff at least one link was changed
'''
changed = False
for name, media_type in iteritems(container.mime_map):
repl = IdReplacer(name, container, id_map)
container.replace_links(name, repl)
if name == container.opf_name:
imap = id_map.get(name, {})
for item in container.opf_xpath('//*[@idref]'):
old_id = item.get('idref')
if old_id is not None:
new_id = imap.get(old_id)
if new_id is not None:
item.set('idref', new_id)
if repl.replaced:
changed = True
return changed
def smarten_punctuation(container, report):
from calibre.ebooks.conversion.preprocess import smarten_punctuation
smartened = False
for path in container.spine_items:
name = container.abspath_to_name(path)
changed = False
with container.open(name, 'r+b') as f:
html = container.decode(f.read())
newhtml = smarten_punctuation(html, container.log)
if newhtml != html:
changed = True
report(_('Smartened punctuation in: %s')%name)
newhtml = strip_encoding_declarations(newhtml)
f.seek(0)
f.truncate()
f.write(codecs.BOM_UTF8 + newhtml.encode('utf-8'))
if changed:
# Add an encoding declaration (it will be added automatically when
# serialized)
root = container.parsed(name)
for m in root.xpath('descendant::*[local-name()="meta" and @http-equiv]'):
m.getparent().remove(m)
container.dirty(name)
smartened = True
if not smartened:
report(_('No punctuation that could be smartened found'))
return smartened
def rename_files(container, file_map):
'''
Rename files in the container, automatically updating all links to them.
:param file_map: A mapping of old canonical name to new canonical name, for
example: :code:`{'text/chapter1.html': 'chapter1.html'}`.
'''
overlap = set(file_map).intersection(set(itervalues(file_map)))
if overlap:
raise ValueError('Circular rename detected. The files %s are both rename targets and destinations' % ', '.join(overlap))
for name, dest in iteritems(file_map):
if container.exists(dest):
if name != dest and name.lower() == dest.lower():
# A case change on an OS with a case insensitive file-system.
continue
raise ValueError('Cannot rename {0} to {1} as {1} already exists'.format(name, dest))
if len(tuple(itervalues(file_map))) != len(set(itervalues(file_map))):
raise ValueError('Cannot rename, the set of destination files contains duplicates')
link_map = {}
for current_name, new_name in iteritems(file_map):
container.rename(current_name, new_name)
if new_name != container.opf_name: # OPF is handled by the container
link_map[current_name] = new_name
replace_links(container, link_map, replace_in_opf=True)
def replace_file(container, name, path, basename, force_mt=None):
dirname, base = name.rpartition('/')[0::2]
nname = sanitize_file_name(basename)
if dirname:
nname = dirname + '/' + nname
with open(path, 'rb') as src:
if name != nname:
count = 0
b, e = nname.rpartition('.')[0::2]
while container.exists(nname):
count += 1
nname = b + ('_%d.%s' % (count, e))
rename_files(container, {name:nname})
mt = force_mt or container.guess_type(nname)
container.mime_map[nname] = mt
for itemid, q in iteritems(container.manifest_id_map):
if q == nname:
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @id="%s"]' % itemid):
item.set('media-type', mt)
container.dirty(container.opf_name)
with container.open(nname, 'wb') as dest:
shutil.copyfileobj(src, dest)
def mt_to_category(container, mt):
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES
from calibre.ebooks.oeb.polish.utils import OEB_FONTS, guess_type
if mt in OEB_DOCS:
category = 'text'
elif mt in OEB_STYLES:
category = 'style'
elif mt in OEB_FONTS:
category = 'font'
elif mt == guess_type('a.opf'):
category = 'opf'
elif mt == guess_type('a.ncx'):
category = 'toc'
else:
category = mt.partition('/')[0]
return category
def get_recommended_folders(container, names):
''' Return the folders that are recommended for the given filenames. The
recommendation is based on where the majority of files of the same type are
located in the container. If no files of a particular type are present, the
recommended folder is assumed to be the folder containing the OPF file. '''
from calibre.ebooks.oeb.polish.utils import guess_type
counts = defaultdict(Counter)
for name, mt in iteritems(container.mime_map):
folder = name.rpartition('/')[0] if '/' in name else ''
counts[mt_to_category(container, mt)][folder] += 1
try:
opf_folder = counts['opf'].most_common(1)[0][0]
except KeyError:
opf_folder = ''
recommendations = {category:counter.most_common(1)[0][0] for category, counter in iteritems(counts)}
return {n:recommendations.get(mt_to_category(container, guess_type(os.path.basename(n))), opf_folder) for n in names}
def normalize_case(container, val):
def safe_listdir(x):
try:
return os.listdir(x)
except OSError:
return ()
parts = val.split('/')
ans = []
for i in range(len(parts)):
q = '/'.join(parts[:i+1])
x = container.name_to_abspath(q)
xl = parts[i].lower()
candidates = [c for c in safe_listdir(os.path.dirname(x)) if c != parts[i] and c.lower() == xl]
ans.append(candidates[0] if candidates else parts[i])
return '/'.join(ans)
def rationalize_folders(container, folder_type_map):
all_names = set(container.mime_map)
new_names = set()
name_map = {}
for key in tuple(folder_type_map):
val = folder_type_map[key]
folder_type_map[key] = normalize_case(container, val)
for name in all_names:
if name.startswith('META-INF/'):
continue
category = mt_to_category(container, container.mime_map[name])
folder = folder_type_map.get(category, None)
if folder is not None:
bn = posixpath.basename(name)
new_name = posixpath.join(folder, bn)
if new_name != name:
c = 0
while new_name in all_names or new_name in new_names:
c += 1
n, ext = bn.rpartition('.')[0::2]
new_name = posixpath.join(folder, '%s_%d.%s' % (n, c, ext))
name_map[name] = new_name
new_names.add(new_name)
return name_map
def remove_links_in_sheet(href_to_name, sheet, predicate):
import_rules_to_remove = []
changed = False
for i, r in enumerate(sheet):
if r.type == r.IMPORT_RULE:
name = href_to_name(r.href)
if predicate(name, r.href, None):
import_rules_to_remove.append(i)
for i in sorted(import_rules_to_remove, reverse=True):
sheet.deleteRule(i)
changed = True
for dec in iter_declarations(sheet):
changed = remove_links_in_declaration(href_to_name, dec, predicate) or changed
return changed
def remove_links_in_declaration(href_to_name, style, predicate):
def check_pval(v):
if v.type == v.URI:
name = href_to_name(v.uri)
return predicate(name, v.uri, None)
return False
changed = False
for p in tuple(style.getProperties(all=True)):
changed = remove_property_value(p, check_pval) or changed
return changed
def remove_links_to(container, predicate):
''' predicate must be a function that takes the arguments (name, href,
fragment=None) and returns True iff the link should be removed '''
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, XHTML, XPath, iterlinks
stylepath = XPath('//h:style')
styleattrpath = XPath('//*[@style]')
changed = set()
for name, mt in iteritems(container.mime_map):
removed = False
if mt in OEB_DOCS:
root = container.parsed(name)
for el, attr, href, pos in iterlinks(root, find_links_in_css=False):
hname = container.href_to_name(href, name)
frag = href.partition('#')[-1]
if predicate(hname, href, frag):
if attr is None:
el.text = None
else:
if el.tag == XHTML('link') or el.tag == XHTML('img'):
extract(el)
else:
del el.attrib[attr]
removed = True
for tag in stylepath(root):
if tag.text and (tag.get('type') or 'text/css').lower() == 'text/css':
sheet = container.parse_css(tag.text)
if remove_links_in_sheet(partial(container.href_to_name, base=name), sheet, predicate):
tag.text = css_text(sheet)
removed = True
for tag in styleattrpath(root):
style = tag.get('style')
if style:
style = container.parse_css(style, is_declaration=True)
if remove_links_in_declaration(partial(container.href_to_name, base=name), style, predicate):
removed = True
tag.set('style', css_text(style))
elif mt in OEB_STYLES:
removed = remove_links_in_sheet(partial(container.href_to_name, base=name), container.parsed(name), predicate)
if removed:
changed.add(name)
for i in changed:
container.dirty(i)
return changed
def get_spine_order_for_all_files(container):
linear_names, non_linear_names = [], []
for name, is_linear in container.spine_names:
(linear_names if is_linear else non_linear_names).append(name)
all_names = linear_names + non_linear_names
spine_names = frozenset(all_names)
ans = {}
for spine_pos, name in enumerate(all_names):
ans.setdefault(name, (spine_pos, -1))
for i, href in enumerate(container.iterlinks(name, get_line_numbers=False)):
lname = container.href_to_name(href, name)
if lname not in spine_names:
ans.setdefault(lname, (spine_pos, i))
return ans
| 15,346 | Python | .py | 352 | 33.954545 | 133 | 0.604016 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,395 | pretty.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/pretty.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import textwrap
# from lxml.etree import Element
from calibre import force_unicode
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, SVG, XHTML, XPNSMAP, barename, serialize
from calibre.ebooks.oeb.polish.container import OPF_NAMESPACES
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.utils.icu import sort_key
from polyglot.builtins import iteritems
def isspace(x):
return not x.strip('\u0009\u000a\u000c\u000d\u0020')
def pretty_xml_tree(elem, level=0, indent=' '):
''' XML beautifier, assumes that elements that have children do not have
textual content. Also assumes that there is no text immediately after
closing tags. These are true for opf/ncx and container.xml files. If either
of the assumptions are violated, there should be no data loss, but pretty
printing won't produce optimal results.'''
if (not elem.text and len(elem) > 0) or (elem.text and isspace(elem.text)):
elem.text = '\n' + (indent * (level+1))
for i, child in enumerate(elem):
pretty_xml_tree(child, level=level+1, indent=indent)
if not child.tail or isspace(child.tail):
l = level + 1
if i == len(elem) - 1:
l -= 1
child.tail = '\n' + (indent * l)
def pretty_opf(root):
# Put all dc: tags first starting with title and author. Preserve order for
# the rest.
def dckey(x):
return {'title':0, 'creator':1}.get(barename(x.tag), 2)
for metadata in root.xpath('//opf:metadata', namespaces=OPF_NAMESPACES):
dc_tags = metadata.xpath('./*[namespace-uri()="%s"]' % OPF_NAMESPACES['dc'])
dc_tags.sort(key=dckey)
for x in reversed(dc_tags):
metadata.insert(0, x)
# Group items in the manifest
spine_ids = root.xpath('//opf:spine/opf:itemref/@idref', namespaces=OPF_NAMESPACES)
spine_ids = {x:i for i, x in enumerate(spine_ids)}
def manifest_key(x):
mt = x.get('media-type', '')
href = x.get('href', '')
ext = href.rpartition('.')[-1].lower()
cat = 1000
if mt in OEB_DOCS:
cat = 0
elif mt == guess_type('a.ncx'):
cat = 1
elif mt in OEB_STYLES:
cat = 2
elif mt.startswith('image/'):
cat = 3
elif ext in {'otf', 'ttf', 'woff', 'woff2'}:
cat = 4
elif mt.startswith('audio/'):
cat = 5
elif mt.startswith('video/'):
cat = 6
if cat == 0:
i = spine_ids.get(x.get('id', None), 1000000000)
else:
i = sort_key(href)
return (cat, i)
for manifest in root.xpath('//opf:manifest', namespaces=OPF_NAMESPACES):
try:
children = sorted(manifest, key=manifest_key)
except AttributeError:
continue # There are comments so dont sort since that would mess up the comments
for x in reversed(children):
manifest.insert(0, x)
SVG_TAG = SVG('svg')
BLOCK_TAGS = frozenset(map(XHTML, (
'address', 'article', 'aside', 'audio', 'blockquote', 'body', 'canvas', 'col', 'colgroup', 'dd',
'div', 'dl', 'dt', 'fieldset', 'figcaption', 'figure', 'footer', 'form',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'li',
'noscript', 'ol', 'output', 'p', 'pre', 'script', 'section', 'style', 'table', 'tbody', 'td',
'tfoot', 'th', 'thead', 'tr', 'ul', 'video', 'img'))) | {SVG_TAG}
def isblock(x):
if callable(x.tag) or not x.tag:
return True
if x.tag in BLOCK_TAGS:
return True
return False
def has_only_blocks(x):
if hasattr(x.tag, 'split') and len(x) == 0:
# Tag with no children,
return False
if x.text and not isspace(x.text):
return False
for child in x:
if not isblock(child) or (child.tail and not isspace(child.tail)):
return False
return True
def indent_for_tag(x):
prev = x.getprevious()
x = x.getparent().text if prev is None else prev.tail
if not x:
return ''
s = x.rpartition('\n')[-1]
return s if isspace(s) else ''
def set_indent(elem, attr, indent):
x = getattr(elem, attr)
if not x:
x = indent
else:
lines = x.splitlines()
if isspace(lines[-1]):
lines[-1] = indent
else:
lines.append(indent)
x = '\n'.join(lines)
setattr(elem, attr, x)
def pretty_block(parent, level=1, indent=' '):
''' Surround block tags with blank lines and recurse into child block tags
that contain only other block tags '''
if not parent.text or isspace(parent.text):
parent.text = ''
nn = '\n' if hasattr(parent.tag, 'strip') and barename(parent.tag) in {'tr', 'td', 'th'} else '\n\n'
parent.text = parent.text + nn + (indent * level)
for i, child in enumerate(parent):
if isblock(child) and has_only_blocks(child):
pretty_block(child, level=level+1, indent=indent)
elif child.tag == SVG_TAG:
pretty_xml_tree(child, level=level, indent=indent)
l = level
if i == len(parent) - 1:
l -= 1
if not child.tail or isspace(child.tail):
child.tail = ''
child.tail = child.tail + nn + (indent * l)
def pretty_script_or_style(container, child):
if child.text:
indent = indent_for_tag(child)
if child.tag.endswith('style'):
child.text = force_unicode(pretty_css(container, '', child.text), 'utf-8')
child.text = textwrap.dedent(child.text)
child.text = '\n' + '\n'.join([(indent + x) if x else '' for x in child.text.splitlines()])
set_indent(child, 'text', indent)
def pretty_html_tree(container, root):
root.text = '\n\n'
for child in root:
child.tail = '\n\n'
if hasattr(child.tag, 'endswith') and child.tag.endswith('}head'):
pretty_xml_tree(child)
for body in root.findall('h:body', namespaces=XPNSMAP):
pretty_block(body)
# Special case the handling of a body that contains a single block tag
# with all content. In this case we prettify the containing block tag
# even if it has non block children.
if (len(body) == 1 and not callable(body[0].tag) and isblock(body[0]) and not has_only_blocks(
body[0]) and barename(body[0].tag) not in (
'pre', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6') and len(body[0]) > 0):
pretty_block(body[0], level=2)
if container is not None:
# Handle <script> and <style> tags
for child in root.xpath('//*[local-name()="script" or local-name()="style"]'):
pretty_script_or_style(container, child)
def fix_html(container, raw):
' Fix any parsing errors in the HTML represented as a string in raw. Fixing is done using the HTML5 parsing algorithm. '
root = container.parse_xhtml(raw)
return serialize(root, 'text/html')
def pretty_html(container, name, raw):
' Pretty print the HTML represented as a string in raw '
root = container.parse_xhtml(raw)
pretty_html_tree(container, root)
return serialize(root, 'text/html')
def pretty_css(container, name, raw):
' Pretty print the CSS represented as a string in raw '
sheet = container.parse_css(raw)
return serialize(sheet, 'text/css')
def pretty_xml(container, name, raw):
' Pretty print the XML represented as a string in raw. If ``name`` is the name of the OPF, extra OPF-specific prettying is performed. '
root = container.parse_xml(raw)
if name == container.opf_name:
pretty_opf(root)
pretty_xml_tree(root)
return serialize(root, 'text/xml')
def fix_all_html(container):
' Fix any parsing errors in all HTML files in the container. Fixing is done using the HTML5 parsing algorithm. '
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
container.parsed(name)
container.dirty(name)
def pretty_all(container):
' Pretty print all HTML/CSS/XML files in the container '
xml_types = {guess_type('a.ncx'), guess_type('a.xml'), guess_type('a.svg')}
for name, mt in iteritems(container.mime_map):
prettied = False
if mt in OEB_DOCS:
pretty_html_tree(container, container.parsed(name))
prettied = True
elif mt in OEB_STYLES:
container.parsed(name)
prettied = True
elif name == container.opf_name:
root = container.parsed(name)
pretty_opf(root)
pretty_xml_tree(root)
prettied = True
elif mt in xml_types:
pretty_xml_tree(container.parsed(name))
prettied = True
if prettied:
container.dirty(name)
| 8,905 | Python | .py | 206 | 35.592233 | 139 | 0.61518 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,396 | tts.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tts.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import io
import json
import os
import sys
from collections import defaultdict
from contextlib import suppress
from functools import partial
from typing import NamedTuple
from lxml.etree import ElementBase as Element
from lxml.etree import tostring as _tostring
from calibre.ebooks.html_transform_rules import unwrap_tag
from calibre.ebooks.oeb.base import EPUB, EPUB_NS, SMIL_NS, barename
from calibre.ebooks.oeb.polish.container import OEB_DOCS, seconds_to_timestamp
from calibre.ebooks.oeb.polish.errors import UnsupportedContainerType
from calibre.ebooks.oeb.polish.upgrade import upgrade_book
from calibre.spell.break_iterator import split_into_sentences_for_tts_embed
from calibre.utils.localization import canonicalize_lang, get_lang
class Sentence(NamedTuple):
elem_id: str
text: str
lang: str
voice: str
def tostring(x) -> str:
return _tostring(x, encoding='unicode')
def lang_for_elem(elem, parent_lang):
return canonicalize_lang(elem.get('lang') or elem.get('xml_lang') or elem.get('{http://www.w3.org/XML/1998/namespace}lang')) or parent_lang
def has_text(elem):
if elem.text and elem.text.strip():
return True
for child in elem:
if child.tail and child.tail.strip():
return True
return False
class Chunk(NamedTuple):
child: Element | None
text: str
start_at: int
is_tail: bool = False
continued_tag_names = frozenset({
'a', 'span', 'em', 'strong', 'b', 'i', 'u', 'code', 'sub', 'sup', 'cite', 'q', 'kbd'
})
ignored_tag_names = frozenset({
'img', 'object', 'script', 'style', 'head', 'title', 'form', 'input', 'br', 'hr', 'map', 'textarea', 'svg', 'math', 'rp', 'rt', 'rtc',
})
id_prefix = 'cttsw-'
data_name = 'data-calibre-tts'
skip_name = '__skip__'
def unmark_sentences_in_html(root):
for x in root.xpath(f'//*[starts-with(@id, "{id_prefix}")]'):
x.attrib.pop('id')
if not x.attrib and x.tag and x.tag.endswith('span'):
unwrap_tag(x)
def mark_sentences_in_html(root, lang: str = '', voice: str = '') -> list[Sentence]:
root_lang = canonicalize_lang(lang_for_elem(root, canonicalize_lang(lang or get_lang())) or 'en')
root_voice = voice
seen_ids = set(root.xpath('//*/@id'))
id_counter = 1
ans = []
clones_map = defaultdict(list)
class Parent:
def __init__(self, elem, tag_name, parent_lang, parent_voice, child_lang=''):
self.elem = elem
self.tag_name = tag_name
self.lang = child_lang or lang_for_elem(elem, parent_lang)
self.parent_lang = parent_lang
self.parent_voice = parent_voice
q = elem.get(data_name, '')
self.voice = parent_voice
if q.startswith('{'): # }
with suppress(Exception):
q = json.loads(q)
self.voice = q.get('voice') or parent_voice
else:
self.voice = q or parent_voice
self.pos = 0
self.texts = []
if elem.text and elem.text.strip():
self.texts.append(Chunk(None, elem.text, self.pos))
self.pos += len(elem.text)
self.children = tuple(elem.iterchildren())
self.has_tail = bool((elem.tail or '').strip())
def add_simple_child(self, elem):
if text := elem.text:
self.texts.append(Chunk(elem, text, self.pos))
self.pos += len(text)
def add_tail(self, elem, text):
self.texts.append(Chunk(elem, text, self.pos, is_tail=True))
self.pos += len(text)
def commit(self) -> None:
if self.texts:
text = ''.join(c.text for c in self.texts)
self.pos = 0
for start, length in split_into_sentences_for_tts_embed(text, self.lang):
stext = text[start:start+length]
if stext.strip() and self.voice != '__skip__':
elem_id = self.wrap_sentence(start, length)
ans.append(Sentence(elem_id, stext, self.lang, self.voice))
if self.has_tail:
p = self.elem.getparent()
spans = []
before = after = None
for start, length in split_into_sentences_for_tts_embed(self.elem.tail, self.parent_lang):
end = start + length
text = self.elem.tail[start:end]
if not text.strip() or self.parent_voice == '__skip__':
continue
if before is None:
before = self.elem.tail[:start]
span = self.make_wrapper(text, p)
spans.append(span)
ans.append(Sentence(span.get('id'), text, self.parent_lang, self.parent_voice))
after = self.elem.tail[end:]
self.elem.tail = before
if after and spans:
spans[-1].tail = after
idx = p.index(self.elem)
p[idx+1:idx+1] = spans
def make_into_wrapper(self, elem: Element) -> str:
nonlocal id_counter
while True:
q = f'{id_prefix}{id_counter}'
if q not in seen_ids:
elem.set('id', q)
seen_ids.add(q)
return q
id_counter += 1
def make_wrapper(self, text: str | None, elem: Element | None = None) -> Element:
if elem is None:
elem = self.elem
ns, sep, _ = elem.tag.partition('}')
ans = elem.makeelement(ns + sep + 'span')
ans.text = text
self.make_into_wrapper(ans)
return ans
def replace_reference_to_child(self, elem: Element, replacement: Element) -> None:
for i in range(self.pos + 1, len(self.texts)):
if self.texts[i].child is elem:
self.texts[i] = self.texts[i]._replace(child=replacement)
else:
break
def wrap_contents(self, first_child: Element | None, last_child: Element) -> Element:
w = self.make_wrapper(self.elem.text if first_child is None else None)
in_range = False
for c in self.elem.iterchildren('*'):
if not in_range and (first_child is None or first_child is c):
in_range = True
pos = self.elem.index(c)
self.elem.insert(pos, w)
w.append(c)
first_child = c
if in_range:
if c is last_child:
if last_child is not first_child:
w.append(c)
break
else:
w.append(c)
self.replace_reference_to_child(last_child, w)
return w
def clone_simple_element(self, elem: Element) -> Element:
ans = elem.makeelement(elem.tag)
ans.attrib.update(elem.attrib)
ans.attrib.pop('id', None)
ans.attrib.pop('name', None)
ans.text, ans.tail = elem.text, elem.tail
p = elem.getparent()
idx = p.index(elem)
p.insert(idx + 1, ans)
self.replace_reference_to_child(elem, ans)
clones_map[elem].append(ans)
return ans
def wrap_sentence(self, start: int, length: int) -> str:
end = start + length
start_chunk = end_chunk = -1
start_offset = end_offset = 0
for i in range(self.pos, len(self.texts)):
c = self.texts[i]
if c.start_at <= start:
start_chunk = i
start_offset = start - c.start_at
if end <= c.start_at + len(c.text):
end_chunk = i
self.pos = i
end_offset = end - c.start_at
break
else:
self.pos = end_chunk = len(self.texts) - 1
end_offset = len(self.texts[-1].text)
assert start_chunk > -1
s, e = self.texts[start_chunk], self.texts[end_chunk]
if s.child is None: # start in leading text of parent element
if e is s: # end also in leading text of parent element
before, sentence, after = s.text[:start_offset], s.text[start_offset:end_offset], s.text[end_offset:]
self.elem.text = before
w = self.make_wrapper(sentence)
self.elem.insert(0, w)
w.tail = after
if after:
self.texts[self.pos] = Chunk(w, after, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
if e.is_tail: # ending in the tail of a child
before_start, after_start = s.text[:start_offset], s.text[start_offset:]
included, after = e.text[:end_offset], e.text[end_offset:]
e.child.tail = included
self.elem.text = after_start
w = self.wrap_contents(None, e.child)
w.tail = after
self.elem.text = before_start
if after:
self.texts[self.pos] = Chunk(w, after, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
# ending inside a child
before_start, after_start = s.text[:start_offset], s.text[start_offset:]
included, after = e.text[:end_offset], e.text[end_offset:]
e.child.text = included
c = self.clone_simple_element(e.child)
c.text = after
e.child.tail = None
self.elem.text = after_start
w = self.wrap_contents(None, e.child)
self.elem.text = before_start
if after:
self.texts[self.pos] = Chunk(c, c.text, end)
else:
self.pos += 1
return w.get('id')
# starting in a child text or tail
if s.is_tail:
if e.is_tail:
if s is e: # end in tail of same element
before, sentence, after = s.text[:start_offset], s.text[start_offset:end_offset], s.text[end_offset:]
s.child.tail = before
w = self.make_wrapper(sentence)
w.tail = after
idx = self.elem.index(s.child)
self.elem.insert(idx + 1, w)
if after:
self.texts[self.pos] = Chunk(w, after, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
s.child.tail, after_start = s.text[:start_offset], s.text[start_offset:]
e.child.tail, after_end = e.text[:end_offset], e.text[end_offset:]
idx = self.elem.index(s.child)
w = self.wrap_contents(self.elem[idx+1], e.child)
w.text, w.tail = after_start, after_end
if after_end:
self.texts[self.pos] = Chunk(w, after_end, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
# end inside some subsequent simple element
s.child.tail, after_start = s.text[:start_offset], s.text[start_offset:]
e.child.text, after_end = e.text[:end_offset], e.text[end_offset:]
c = self.clone_simple_element(e.child)
c.text = after_end
e.child.tail = None
w = self.wrap_contents(self.elem[self.elem.index(s.child) + 1], e.child)
w.text = after_start
if after_end:
self.texts[self.pos] = Chunk(c, after_end, end)
else:
self.pos += 1
return w.get('id')
# start is in the text of a simple child
if s.child is e.child:
if e.is_tail: # ending in tail of element we start in
before_start, after_start = s.text[:start_offset], s.text[start_offset:]
c = self.clone_simple_element(s.child)
s.child.text, s.child.tail = before_start, None
before_end, after_end = e.text[:end_offset], e.text[end_offset:]
c.text, c.tail = after_start, before_end
w = self.wrap_contents(c, c)
w.tail = after_end
if after_end:
self.texts[self.pos] = Chunk(w, after_end, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
# start and end in text of element
before, sentence, after = s.text[:start_offset], s.text[start_offset:end_offset], s.text[end_offset:]
c = self.clone_simple_element(s.child)
s.child.text, s.child.tail = before, None
c.text, c.tail = sentence, None
c2 = self.clone_simple_element(c)
c2.text = after
self.make_into_wrapper(c)
if after:
self.texts[self.pos] = Chunk(c2, after, end)
else:
self.pos += 1
return c.get('id')
# end is in a subsequent simple child or tail of one
s.child.text, after_start = s.text[:start_offset], s.text[start_offset:]
c = self.clone_simple_element(s.child)
c.text, s.child.tail = after_start, None
if e.is_tail:
e.child.tail, after_end = e.text[:end_offset], e.text[end_offset:]
w = self.wrap_contents(c, e.child)
w.tail = after_end
if after_end:
self.texts[self.pos] = Chunk(w, after_end, end, is_tail=True)
else:
self.pos += 1
return w.get('id')
# end is in text of subsequent simple child
e.child.text, after_end = e.text[:end_offset], e.text[end_offset:]
c2 = self.clone_simple_element(e.child)
c2.text, e.child.tail = after_end, None
w = self.wrap_contents(c, e.child)
if after_end:
self.texts[self.pos] = Chunk(c2, after_end, end)
else:
self.pos += 1
return w.get('id')
stack_of_parents = [Parent(elem, 'body', root_lang, root_voice) for elem in root.iterchildren('*') if barename(elem.tag).lower() == 'body']
while stack_of_parents:
p = stack_of_parents.pop()
simple_allowed = True
children_to_process = []
for child in p.children:
child_voice = child.get(data_name, '')
child_lang = lang_for_elem(child, p.lang)
child_tag_name = barename(child.tag).lower() if isinstance(child.tag, str) else ''
if simple_allowed and child_lang == p.lang and child_voice == p.voice and child_tag_name in continued_tag_names and len(child) == 0:
p.add_simple_child(child)
elif child_tag_name not in ignored_tag_names:
simple_allowed = False
children_to_process.append(Parent(child, child_tag_name, p.lang, p.voice, child_lang=child_lang))
if simple_allowed and (text := child.tail):
p.add_tail(child, text)
p.commit()
stack_of_parents.extend(reversed(children_to_process))
for src_elem, clones in clones_map.items():
for clone in clones + [src_elem]:
if not clone.text and not clone.tail and not clone.get('id') and not clone.get('name'):
if (p := clone.getparent()) is not None:
p.remove(clone)
return ans
class PerFileData:
def __init__(self, name: str):
self.name = name
self.root = None
self.sentences: list[Sentence] = []
self.key_map: dict[tuple[str, str], list[Sentence]] = defaultdict(list)
self.audio_file_name = self.smil_file_name = ''
class ReportProgress:
def __init__(self):
self.current_stage = ''
def __call__(self, stage: str, item: str, count: int, total: int) -> bool:
if stage != self.current_stage:
self.current_stage = stage
print()
print(self.current_stage)
return False
frac = count / total
print(f'\r{frac:4.0%} {item}', end='')
return False
def make_par(container, seq, html_href, audio_href, elem_id, pos, duration) -> None:
seq.set(EPUB('textref'), html_href)
par = seq.makeelement('par')
par.tail = seq.text
par.set('id', f'par-{len(seq) + 1}')
seq.append(par)
par.text = seq.text + ' '
text = par.makeelement('text')
text.set('src', f'{html_href}#{elem_id}')
text.tail = par.text
par.append(text)
audio = par.makeelement('audio')
audio.tail = par.tail
par.append(audio)
audio.set('src', audio_href)
audio.set('clipBegin', seconds_to_timestamp(pos))
audio.set('clipEnd', seconds_to_timestamp(pos + duration))
def remove_embedded_tts(container):
manifest_items = container.manifest_items
id_map = {item.get('id'): item for item in manifest_items}
container.set_media_overlay_durations({})
media_files = set()
for item in manifest_items:
smil_id = item.attrib.pop('media-overlay', '')
href = item.get('href')
if href and smil_id:
name = container.href_to_name(href, container.opf_name)
root = container.parsed(name)
unmark_sentences_in_html(root)
container.dirty(name)
smil_item = id_map.get(smil_id)
if smil_item is not None:
smil_href = smil_item.get('href')
if smil_href:
smil_name = container.href_to_name(smil_item.get('href'), container.opf_name)
media_files.add(smil_name)
smil_root = container.parsed(smil_name)
for ahref in smil_root.xpath('//*[local-name() = "audio"]/@src'):
aname = container.href_to_name(ahref, smil_name)
media_files.add(aname)
container.remove_from_xml(smil_item)
for aname in media_files:
container.remove_item(aname)
container.dirty(container.opf_name)
def embed_tts(container, report_progress=None, callback_to_download_voices=None):
report_progress = report_progress or ReportProgress()
if container.book_type != 'epub':
raise UnsupportedContainerType(_('Only the EPUB format has support for embedding speech overlay audio'))
if container.opf_version_parsed[0] < 3:
if report_progress(_('Updating book internals'), '', 0, 0):
return False
upgrade_book(container, print)
remove_embedded_tts(container)
from calibre.gui2.tts.piper import HIGH_QUALITY_SAMPLE_RATE, PiperEmbedded
from calibre_extensions.ffmpeg import transcode_single_audio_stream, wav_header_for_pcm_data
piper = PiperEmbedded()
language = container.mi.language
name_map = {}
for name, is_linear in container.spine_names:
if container.mime_map.get(name) in OEB_DOCS:
name_map[name] = PerFileData(name)
stage = _('Processing HTML')
if report_progress(stage, '', 0, len(name_map)):
return False
all_voices = set()
total_num_sentences = 0
files_with_no_sentences = set()
for i, (name, pfd) in enumerate(name_map.items()):
pfd.root = container.parsed(name)
pfd.sentences = mark_sentences_in_html(pfd.root, lang=language)
if not pfd.sentences:
files_with_no_sentences.add(name)
else:
total_num_sentences += len(pfd.sentences)
for s in pfd.sentences:
key = s.lang, s.voice
pfd.key_map[key].append(s)
all_voices.add(key)
container.dirty(name)
if report_progress(stage, name, i+1, len(name_map)):
return False
for rname in files_with_no_sentences:
name_map.pop(rname)
if callback_to_download_voices is None:
piper.ensure_voices_downloaded(iter(all_voices))
else:
if not callback_to_download_voices(partial(piper.ensure_voices_downloaded, iter(all_voices))):
return False
stage = _('Converting text to speech')
if report_progress(stage, '', 0, total_num_sentences):
return False
snum = 0
size_of_audio_data = 0
mmap = {container.href_to_name(item.get('href'), container.opf_name):item for item in container.manifest_items}
for name, pfd in name_map.items():
duration_map = {}
audio_map: dict[Sentence, tuple[bytes, float]] = {}
for (lang, voice), sentences in pfd.key_map.items():
texts = tuple(s.text for s in sentences)
for i, (audio_data, duration) in enumerate(piper.text_to_raw_audio_data(texts, lang, voice, sample_rate=HIGH_QUALITY_SAMPLE_RATE)):
s = sentences[i]
audio_map[s] = audio_data, duration
size_of_audio_data += len(audio_data)
snum += 1
if report_progress(stage, _('Sentence: {0} of {1}').format(snum, total_num_sentences), snum, total_num_sentences):
return False
wav = io.BytesIO()
wav.write(wav_header_for_pcm_data(size_of_audio_data, HIGH_QUALITY_SAMPLE_RATE))
durations = []
file_duration = 0
for i, s in enumerate(pfd.sentences):
audio_data, duration = audio_map[s]
if duration > 0:
wav.write(audio_data)
durations.append((s.elem_id, file_duration, duration))
file_duration += duration
if not file_duration:
continue
afitem = container.generate_item(name + '.m4a', id_prefix='tts-')
pfd.audio_file_name = container.href_to_name(afitem.get('href'), container.opf_name)
smilitem = container.generate_item(name + '.smil', id_prefix='smil-')
pfd.smil_file_name = container.href_to_name(smilitem.get('href'), container.opf_name)
with container.open(pfd.smil_file_name, 'w') as sf:
sf.write(f'''\
<smil xmlns="{SMIL_NS}" xmlns:epub="{EPUB_NS}" version="3.0">
<body>
<seq id="generated-by-calibre">
X
</seq>
</body>
</smil>''')
smil_root = container.parsed(pfd.smil_file_name)
seq = smil_root[0][0]
seq.text = seq.text[:seq.text.find('X')]
audio_href = container.name_to_href(pfd.audio_file_name, pfd.smil_file_name)
html_href = container.name_to_href(pfd.name, pfd.smil_file_name)
for elem_id, clip_start, duration in durations:
make_par(container, seq, html_href, audio_href, elem_id, clip_start, duration)
if len(seq):
seq[-1].tail = seq.text[:-2]
wav.seek(0)
with container.open(pfd.audio_file_name, 'wb') as m4a:
transcode_single_audio_stream(wav, m4a)
container.pretty_print.add(pfd.smil_file_name)
container.dirty(pfd.smil_file_name)
container.serialize_item(pfd.smil_file_name)
html_item = mmap[name]
html_item.set('media-overlay', smilitem.get('id'))
duration_map[smilitem.get('id')] = file_duration
container.set_media_overlay_durations(duration_map)
return True
def develop():
from calibre.ebooks.oeb.polish.container import get_container
path = sys.argv[-1]
container = get_container(path, tweak_mode=True)
embed_tts(container)
b, e = os.path.splitext(path)
outpath = b + '-tts' + e
container.commit(outpath)
print('Output saved to:', outpath)
if __name__ == '__main__':
develop()
| 24,511 | Python | .py | 526 | 33.81749 | 144 | 0.549219 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,397 | links.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/links.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from collections import defaultdict
from threading import Thread
from calibre import browser
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, XHTML_MIME, urlunquote
from calibre.ebooks.oeb.polish.check.base import ERROR, INFO, WARN, BaseError
from calibre.ebooks.oeb.polish.cover import get_raster_cover_name
from calibre.ebooks.oeb.polish.parsing import parse_html5
from calibre.ebooks.oeb.polish.replace import remove_links_to
from calibre.ebooks.oeb.polish.utils import OEB_FONTS, actual_case_for_name, corrected_case_for_name, guess_type
from polyglot.builtins import iteritems, itervalues
from polyglot.queue import Empty, Queue
from polyglot.urllib import urlparse
class BadLink(BaseError):
HELP = _('The resource pointed to by this link does not exist. You should'
' either fix, or remove the link.')
level = WARN
class InvalidCharInLink(BadLink):
HELP = _('Windows computers do not allow the : character in filenames. For maximum'
' compatibility it is best to not use these in filenames/links to files.')
class MalformedURL(BadLink):
HELP = _('This URL could not be parsed.')
level = ERROR
class CaseMismatch(BadLink):
def __init__(self, href, corrected_name, name, lnum, col):
BadLink.__init__(self, _('The linked to resource {0} does not exist').format(href), name, line=lnum, col=col)
self.HELP = _('The case of the link {0} and the case of the actual file it points to {1}'
' do not agree. You should change either the case of the link or rename the file.').format(
href, corrected_name)
self.INDIVIDUAL_FIX = _('Change the case of the link to match the actual file')
self.corrected_name = corrected_name
self.href = href
def __call__(self, container):
frag = urlparse(self.href).fragment
nhref = container.name_to_href(self.corrected_name, self.name)
if frag:
nhref += '#' + frag
orig_href = self.href
class LinkReplacer:
replaced = False
def __call__(self, url):
if url != orig_href:
return url
self.replaced = True
return nhref
replacer = LinkReplacer()
container.replace_links(self.name, replacer)
return replacer.replaced
class BadDestinationType(BaseError):
level = WARN
def __init__(self, link_source, link_dest, link_elem):
BaseError.__init__(self, _('Link points to a file that is not a text document'), link_source, line=link_elem.sourceline)
self.HELP = _('The link "{0}" points to a file <i>{1}</i> that is not a text (HTML) document.'
' Many e-book readers will be unable to follow such a link. You should'
' either remove the link or change it to point to a text document.'
' For example, if it points to an image, you can create small wrapper'
' document that contains the image and change the link to point to that.').format(
link_elem.get('href'), link_dest)
self.bad_href = link_elem.get('href')
class BadDestinationFragment(BaseError):
level = WARN
def __init__(self, link_source, link_dest, link_elem, fragment):
BaseError.__init__(self, _('Link points to a location not present in the target file'), link_source, line=link_elem.sourceline)
self.bad_href = link_elem.get('href')
self.HELP = _('The link "{0}" points to a location <i>{1}</i> in the file {2} that does not exist.'
' You should either remove the location so that the link points to the top of the file,'
' or change the link to point to the correct location.').format(
self.bad_href, fragment, link_dest)
class FileLink(BadLink):
HELP = _('This link uses the file:// URL scheme. This does not work with many e-book readers.'
' Remove the file:// prefix and make sure the link points to a file inside the book.')
class LocalLink(BadLink):
HELP = _('This link points to a file outside the book. It will not work if the'
' book is read on any computer other than the one it was created on.'
' Either fix or remove the link.')
class EmptyLink(BadLink):
HELP = _('This link is empty. This is almost always a mistake. Either fill in the link destination or remove the link tag.')
class UnreferencedResource(BadLink):
HELP = _('This file is included in the book but not referred to by any document in the spine.'
' This means that the file will not be viewable on most e-book readers. You should '
' probably remove this file from the book or add a link to it somewhere.')
def __init__(self, name):
BadLink.__init__(self, _(
'The file %s is not referenced') % name, name)
class UnreferencedDoc(UnreferencedResource):
HELP = _('This file is not in the book spine. All content documents must be in the spine.'
' You should probably add it to the spine.')
INDIVIDUAL_FIX = _('Append this file to the spine')
def __call__(self, container):
from calibre.ebooks.oeb.base import OPF
rmap = {v:k for k, v in iteritems(container.manifest_id_map)}
if self.name in rmap:
manifest_id = rmap[self.name]
else:
manifest_id = container.add_name_to_manifest(self.name)
spine = container.opf_xpath('//opf:spine')[0]
si = spine.makeelement(OPF('itemref'), idref=manifest_id)
container.insert_into_xml(spine, si)
container.dirty(container.opf_name)
return True
class Unmanifested(BadLink):
HELP = _('This file is not listed in the book manifest. While not strictly necessary'
' it is good practice to list all files in the manifest. Either list this'
' file in the manifest or remove it from the book if it is an unnecessary file.')
def __init__(self, name, unreferenced=None):
BadLink.__init__(self, _(
'The file %s is not listed in the manifest') % name, name)
self.file_action = None
if unreferenced is not None:
self.INDIVIDUAL_FIX = _(
'Remove %s from the book') % name if unreferenced else _(
'Add %s to the manifest') % name
self.file_action = 'remove' if unreferenced else 'add'
def __call__(self, container):
if self.file_action == 'remove':
container.remove_item(self.name)
else:
rmap = {v:k for k, v in iteritems(container.manifest_id_map)}
if self.name not in rmap:
container.add_name_to_manifest(self.name)
return True
class DanglingLink(BadLink):
def __init__(self, text, target_name, name, lnum, col):
BadLink.__init__(self, text, name, lnum, col)
self.INDIVIDUAL_FIX = _('Remove all references to %s from the HTML and CSS in the book') % target_name
self.target_name = target_name
def __call__(self, container):
return bool(remove_links_to(container, lambda name, *a: name == self.target_name))
class Bookmarks(BadLink):
HELP = _(
'This file stores the bookmarks and last opened information from'
' the calibre E-book viewer. You can remove it if you do not'
' need that information, or don\'t want to share it with'
' other people you send this book to.')
INDIVIDUAL_FIX = _('Remove this file')
level = INFO
def __init__(self, name):
BadLink.__init__(self, _(
'The bookmarks file used by the calibre E-book viewer is present'), name)
def __call__(self, container):
container.remove_item(self.name)
return True
class MimetypeMismatch(BaseError):
level = WARN
def __init__(self, container, name, opf_mt, ext_mt):
self.opf_mt, self.ext_mt = opf_mt, ext_mt
self.file_name = name
BaseError.__init__(self, _('The file %s has a MIME type that does not match its extension') % name, container.opf_name)
ext = name.rpartition('.')[-1]
self.HELP = _('The file {0} has its MIME type specified as {1} in the OPF file.'
' The recommended MIME type for files with the extension "{2}" is {3}.'
' You should change either the file extension or the MIME type in the OPF.').format(
name, opf_mt, ext, ext_mt)
if opf_mt in OEB_DOCS and name in {n for n, l in container.spine_names}:
self.INDIVIDUAL_FIX = _('Change the file extension to .xhtml')
self.change_ext_to = 'xhtml'
else:
self.INDIVIDUAL_FIX = _('Change the MIME type for this file in the OPF to %s') % ext_mt
self.change_ext_to = None
def __call__(self, container):
changed = False
if self.change_ext_to is not None:
from calibre.ebooks.oeb.polish.replace import rename_files
new_name = self.file_name.rpartition('.')[0] + '.' + self.change_ext_to
c = 0
while container.has_name(new_name):
c += 1
new_name = self.file_name.rpartition('.')[0] + ('%d.' % c) + self.change_ext_to
rename_files(container, {self.file_name:new_name})
changed = True
else:
for item in container.opf_xpath('//opf:manifest/opf:item[@href and @media-type="%s"]' % self.opf_mt):
name = container.href_to_name(item.get('href'), container.opf_name)
if name == self.file_name:
changed = True
item.set('media-type', self.ext_mt)
container.mime_map[name] = self.ext_mt
if changed:
container.dirty(container.opf_name)
return changed
def check_mimetypes(container):
errors = []
a = errors.append
for name, mt in iteritems(container.mime_map):
gt = container.guess_type(name)
if mt != gt:
if mt == 'application/oebps-page-map+xml' and name.lower().endswith('.xml'):
continue
a(MimetypeMismatch(container, name, mt, gt))
return errors
def check_link_destination(container, dest_map, name, href, a, errors):
if href.startswith('#'):
tname = name
else:
try:
tname = container.href_to_name(href, name)
except ValueError:
tname = None # Absolute links to files on another drive in windows cause this
if tname and tname in container.mime_map:
if container.mime_map[tname] not in OEB_DOCS:
errors.append(BadDestinationType(name, tname, a))
else:
root = container.parsed(tname)
if hasattr(root, 'xpath'):
if tname not in dest_map:
dest_map[tname] = set(root.xpath('//*/@id|//*/@name'))
purl = urlparse(href)
if purl.fragment and purl.fragment not in dest_map[tname]:
errors.append(BadDestinationFragment(name, tname, a, purl.fragment))
else:
errors.append(BadDestinationType(name, tname, a))
def check_link_destinations(container):
' Check destinations of links that point to HTML files '
errors = []
dest_map = {}
opf_type = guess_type('a.opf')
ncx_type = guess_type('a.ncx')
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
for a in container.parsed(name).xpath('//*[local-name()="a" and @href]'):
href = a.get('href')
check_link_destination(container, dest_map, name, href, a, errors)
elif mt == opf_type:
for a in container.opf_xpath('//opf:reference[@href]'):
if container.book_type == 'azw3' and a.get('type') in {'cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'}:
continue
href = a.get('href')
check_link_destination(container, dest_map, name, href, a, errors)
elif mt == ncx_type:
for a in container.parsed(name).xpath('//*[local-name() = "content" and @src]'):
href = a.get('src')
check_link_destination(container, dest_map, name, href, a, errors)
return errors
def check_links(container):
links_map = defaultdict(set)
xml_types = {guess_type('a.opf'), guess_type('a.ncx')}
errors = []
a = errors.append
def fl(x):
x = repr(x)
if x.startswith('u'):
x = x[1:]
return x
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS or mt in OEB_STYLES or mt in xml_types:
for href, lnum, col in container.iterlinks(name):
if not href:
a(EmptyLink(_('The link is empty'), name, lnum, col))
try:
tname = container.href_to_name(href, name)
except ValueError:
tname = None # Absolute paths to files on another drive in windows cause this
if tname is not None:
if container.exists(tname):
if tname in container.mime_map:
links_map[name].add(tname)
else:
# Filesystem says the file exists, but it is not in
# the mime_map, so either there is a case mismatch
# or the link is a directory
apath = container.name_to_abspath(tname)
if os.path.isdir(apath):
a(BadLink(_('The linked resource %s is a folder') % fl(href), name, lnum, col))
else:
a(CaseMismatch(href, actual_case_for_name(container, tname), name, lnum, col))
else:
cname = corrected_case_for_name(container, tname)
if cname is not None:
a(CaseMismatch(href, cname, name, lnum, col))
else:
a(DanglingLink(_('The linked resource %s does not exist') % fl(href), tname, name, lnum, col))
else:
try:
purl = urlparse(href)
except ValueError:
a(MalformedURL(_('The URL {} could not be parsed').format(href), name, lnum, col))
else:
if purl.scheme == 'file':
a(FileLink(_('The link %s is a file:// URL') % fl(href), name, lnum, col))
elif purl.path and purl.path.startswith('/') and purl.scheme in {'', 'file'}:
a(LocalLink(_('The link %s points to a file outside the book') % fl(href), name, lnum, col))
elif purl.path and purl.scheme in {'', 'file'} and ':' in urlunquote(purl.path):
a(InvalidCharInLink(
_('The link %s contains a : character, this will cause errors on Windows computers') % fl(href), name, lnum, col))
spine_docs = {name for name, linear in container.spine_names}
spine_styles = {tname for name in spine_docs for tname in links_map[name] if container.mime_map.get(tname, None) in OEB_STYLES}
num = -1
while len(spine_styles) > num:
# Handle import rules in stylesheets
num = len(spine_styles)
spine_styles |= {tname for name in spine_styles for tname in links_map[name] if container.mime_map.get(tname, None) in OEB_STYLES}
seen = set(OEB_DOCS) | set(OEB_STYLES)
spine_resources = {tname for name in spine_docs | spine_styles for tname in links_map[name] if container.mime_map[tname] not in seen}
unreferenced = set()
cover_name = container.guide_type_map.get('cover', None)
nav_items = frozenset(container.manifest_items_with_property('nav'))
for name, mt in iteritems(container.mime_map):
if mt in OEB_STYLES and name not in spine_styles:
a(UnreferencedResource(name))
elif mt in OEB_DOCS and name not in spine_docs and name not in nav_items:
a(UnreferencedDoc(name))
elif (mt in OEB_FONTS or mt.partition('/')[0] in {'image', 'audio', 'video'}) and name not in spine_resources and name != cover_name:
if mt.partition('/')[0] == 'image' and name == get_raster_cover_name(container):
continue
a(UnreferencedResource(name))
else:
continue
unreferenced.add(name)
manifest_names = set(itervalues(container.manifest_id_map))
for name in container.mime_map:
if name not in manifest_names and not container.ok_to_be_unmanifested(name):
a(Unmanifested(name, unreferenced=name in unreferenced))
if name == 'META-INF/calibre_bookmarks.txt':
a(Bookmarks(name))
return errors
def get_html_ids(raw_data):
ans = set()
root = parse_html5(raw_data, discard_namespaces=True, line_numbers=False, fix_newlines=False)
for body in root.xpath('//body'):
ans.update(set(body.xpath('descendant-or-self::*/@id')))
ans.update(set(body.xpath('descendant::a/@name')))
return ans
def check_external_links(container, progress_callback=(lambda num, total:None), check_anchors=True):
progress_callback(0, 0)
external_links = defaultdict(list)
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS or mt in OEB_STYLES:
for href, lnum, col in container.iterlinks(name):
purl = urlparse(href)
if purl.scheme in ('http', 'https'):
external_links[href].append((name, href, lnum, col))
if not external_links:
return []
items = Queue()
ans = []
for el in iteritems(external_links):
items.put(el)
progress_callback(0, len(external_links))
done = []
downloaded_html_ids = {}
def check_links():
br = browser(honor_time=False, verify_ssl_certificates=False)
while True:
try:
full_href, locations = items.get_nowait()
except Empty:
return
href, frag = full_href.partition('#')[::2]
try:
res = br.open(href, timeout=10)
except Exception as e:
ans.append((locations, e, full_href))
else:
if frag and check_anchors:
ct = res.info().get('Content-Type')
if ct and ct.split(';')[0].lower() in {'text/html', XHTML_MIME}:
ids = downloaded_html_ids.get(href)
if ids is None:
try:
ids = downloaded_html_ids[href] = get_html_ids(res.read())
except Exception:
ids = downloaded_html_ids[href] = frozenset()
if frag not in ids:
ans.append((locations, ValueError(f'HTML anchor {frag} not found on the page'), full_href))
res.close()
finally:
done.append(None)
progress_callback(len(done), len(external_links))
workers = [Thread(name="CheckLinks", target=check_links) for i in range(min(10, len(external_links)))]
for w in workers:
w.daemon = True
w.start()
for w in workers:
w.join()
return ans
| 19,865 | Python | .py | 378 | 40.71164 | 146 | 0.591207 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,398 | images.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/images.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from io import BytesIO
from PIL import Image
from calibre import as_unicode
from calibre.ebooks.oeb.polish.check.base import WARN, BaseError
from calibre.ebooks.oeb.polish.check.parsing import EmptyFile
from polyglot.builtins import error_message
class InvalidImage(BaseError):
HELP = _('An invalid image is an image that could not be loaded, typically because'
' it is corrupted. You should replace it with a good image or remove it.')
def __init__(self, msg, *args, **kwargs):
BaseError.__init__(self, 'Invalid image: ' + msg, *args, **kwargs)
class CMYKImage(BaseError):
HELP = _('Reader devices based on Adobe Digital Editions cannot display images whose'
' colors are specified in the CMYK colorspace. You should convert this image'
' to the RGB colorspace, for maximum compatibility.')
INDIVIDUAL_FIX = _('Convert image to RGB automatically')
level = WARN
def __call__(self, container):
from qt.core import QImage
from calibre.gui2 import pixmap_to_data
ext = container.mime_map[self.name].split('/')[-1].upper()
if ext == 'JPG':
ext = 'JPEG'
if ext not in ('PNG', 'JPEG', 'GIF'):
return False
with container.open(self.name, 'r+b') as f:
raw = f.read()
i = QImage()
i.loadFromData(raw)
if i.isNull():
return False
raw = pixmap_to_data(i, format=ext, quality=95)
f.seek(0)
f.truncate()
f.write(raw)
return True
def check_raster_images(name, mt, raw):
if not raw:
return [EmptyFile(name)]
errors = []
try:
i = Image.open(BytesIO(raw))
except Exception as e:
errors.append(InvalidImage(as_unicode(error_message(e)), name))
else:
if i.mode == 'CMYK':
errors.append(CMYKImage(_('Image is in the CMYK colorspace'), name))
return errors
| 2,091 | Python | .py | 51 | 33.176471 | 90 | 0.629263 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,399 | parsing.py | kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/parsing.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from lxml.etree import XMLSyntaxError
from calibre import human_readable, prepare_string_for_xml
from calibre.ebooks.chardet import find_declared_encoding, replace_encoding_declarations
from calibre.ebooks.html_entities import html5_entities
from calibre.ebooks.oeb.base import OEB_DOCS, URL_SAFE, XHTML, XHTML_NS, urlquote
from calibre.ebooks.oeb.polish.check.base import INFO, WARN, BaseError
from calibre.ebooks.oeb.polish.pretty import pretty_script_or_style as fix_style_tag
from calibre.ebooks.oeb.polish.utils import PositionFinder, guess_type
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import error_message, iteritems
HTML_ENTITTIES = frozenset(html5_entities)
XML_ENTITIES = {'lt', 'gt', 'amp', 'apos', 'quot'}
ALL_ENTITIES = HTML_ENTITTIES | XML_ENTITIES
fix_style_tag
replace_pat = re.compile('&(%s);' % '|'.join(re.escape(x) for x in sorted(HTML_ENTITTIES - XML_ENTITIES)))
mismatch_pat = re.compile(r'tag mismatch:.+?line (\d+).+?line \d+')
class EmptyFile(BaseError):
HELP = _('This file is empty, it contains nothing, you should probably remove it.')
INDIVIDUAL_FIX = _('Remove this file')
def __init__(self, name):
BaseError.__init__(self, _('The file %s is empty') % name, name)
def __call__(self, container):
container.remove_item(self.name)
return True
class DecodeError(BaseError):
is_parsing_error = True
HELP = _('A decoding errors means that the contents of the file could not'
' be interpreted as text. This usually happens if the file has'
' an incorrect character encoding declaration or if the file is actually'
' a binary file, like an image or font that is mislabelled with'
' an incorrect media type in the OPF.')
def __init__(self, name):
BaseError.__init__(self, _('Parsing of %s failed, could not decode') % name, name)
class XMLParseError(BaseError):
is_parsing_error = True
HELP = _('A parsing error in an XML file means that the XML syntax in the file is incorrect.'
' Such a file will most probably not open in an e-book reader. These errors can '
' usually be fixed automatically, however, automatic fixing can sometimes '
' "do the wrong thing".')
def __init__(self, msg, *args, **kwargs):
msg = msg or ''
BaseError.__init__(self, 'Parsing failed: ' + msg, *args, **kwargs)
m = mismatch_pat.search(msg)
if m is not None:
self.has_multiple_locations = True
self.all_locations = [(self.name, int(m.group(1)), None), (self.name, self.line, self.col)]
class HTMLParseError(XMLParseError):
HELP = _('A parsing error in an HTML file means that the HTML syntax is incorrect.'
' Most readers will automatically ignore such errors, but they may result in '
' incorrect display of content. These errors can usually be fixed automatically,'
' however, automatic fixing can sometimes "do the wrong thing".')
class PrivateEntities(XMLParseError):
HELP = _('This HTML file uses private entities.'
' These are not supported. You can try running "Fix HTML" from the Tools menu,'
' which will try to automatically resolve the private entities.')
class NamedEntities(BaseError):
level = WARN
INDIVIDUAL_FIX = _('Replace all named entities with their character equivalents in this book')
HELP = _('Named entities are often only incompletely supported by various book reading software.'
' Therefore, it is best to not use them, replacing them with the actual characters they'
' represent. This can be done automatically.')
def __init__(self, name):
BaseError.__init__(self, _('Named entities present'), name)
def __call__(self, container):
changed = False
from calibre.ebooks.oeb.polish.check.main import XML_TYPES
check_types = XML_TYPES | OEB_DOCS
for name, mt in iteritems(container.mime_map):
if mt in check_types:
raw = container.raw_data(name)
nraw = replace_pat.sub(lambda m:html5_entities[m.group(1)], raw)
if raw != nraw:
changed = True
with container.open(name, 'wb') as f:
f.write(nraw.encode('utf-8'))
return changed
def make_filename_safe(name):
from calibre.utils.filenames import ascii_filename
def esc(n):
return ''.join(x if x in URL_SAFE else '_' for x in n)
return '/'.join(esc(ascii_filename(x)) for x in name.split('/'))
class EscapedName(BaseError):
level = WARN
def __init__(self, name):
BaseError.__init__(self, _('Filename contains unsafe characters'), name)
qname = urlquote(name)
self.sname = make_filename_safe(name)
self.HELP = _(
'The filename {0} contains unsafe characters, that must be escaped, like'
' this {1}. This can cause problems with some e-book readers. To be'
' absolutely safe, use only the English alphabet [a-z], the numbers [0-9],'
' underscores and hyphens in your file names. While many other characters'
' are allowed, they may cause problems with some software.').format(name, qname)
self.INDIVIDUAL_FIX = _(
'Rename the file {0} to {1}').format(name, self.sname)
def __call__(self, container):
from calibre.ebooks.oeb.polish.replace import rename_files
all_names = set(container.name_path_map)
bn, ext = self.sname.rpartition('.')[0::2]
c = 0
while self.sname in all_names:
c += 1
self.sname = '%s_%d.%s' % (bn, c, ext)
rename_files(container, {self.name:self.sname})
return True
class TooLarge(BaseError):
level = INFO
MAX_SIZE = 260 *1024
HELP = _('This HTML file is larger than %s. Too large HTML files can cause performance problems'
' on some e-book readers. Consider splitting this file into smaller sections.') % human_readable(MAX_SIZE)
def __init__(self, name):
BaseError.__init__(self, _('File too large'), name)
class BadEntity(BaseError):
HELP = _('This is an invalid (unrecognized) entity. Replace it with whatever'
' text it is supposed to have represented.')
def __init__(self, ent, name, lnum, col):
BaseError.__init__(self, _('Invalid entity: %s') % ent, name, lnum, col)
class BadNamespace(BaseError):
INDIVIDUAL_FIX = _(
'Run fix HTML on this file, which will automatically insert the correct namespace')
def __init__(self, name, namespace):
BaseError.__init__(self, _('Invalid or missing namespace'), name)
self.HELP = prepare_string_for_xml(_(
'This file has {0}. Its namespace must be {1}. Set the namespace by defining the xmlns'
' attribute on the <html> element, like this <html xmlns="{1}">').format(
(_('incorrect namespace %s') % namespace) if namespace else _('no namespace'),
XHTML_NS))
def __call__(self, container):
container.parsed(self.name)
container.dirty(self.name)
return True
class NonUTF8(BaseError):
level = WARN
INDIVIDUAL_FIX = _("Change this file's encoding to UTF-8")
def __init__(self, name, enc):
BaseError.__init__(self, _('Non UTF-8 encoding declaration'), name)
self.HELP = _('This file has its encoding declared as %s. Some'
' reader software cannot handle non-UTF8 encoded files.'
' You should change the encoding to UTF-8.') % enc
def __call__(self, container):
raw = container.raw_data(self.name)
if isinstance(raw, str):
raw, changed = replace_encoding_declarations(raw)
if changed:
container.open(self.name, 'wb').write(raw.encode('utf-8'))
return True
class EntitityProcessor:
def __init__(self, mt):
self.entities = ALL_ENTITIES if mt in OEB_DOCS else XML_ENTITIES
self.ok_named_entities = []
self.bad_entities = []
def __call__(self, m):
val = m.group(1).decode('ascii')
if val in XML_ENTITIES:
# Leave XML entities alone
return m.group()
if val.startswith('#'):
nval = val[1:]
try:
if nval.startswith('x'):
int(nval[1:], 16)
else:
int(nval, 10)
except ValueError:
# Invalid numerical entity
self.bad_entities.append((m.start(), m.group()))
return b' ' * len(m.group())
return m.group()
if val in self.entities:
# Known named entity, report it
self.ok_named_entities.append(m.start())
else:
self.bad_entities.append((m.start(), m.group()))
return b' ' * len(m.group())
def check_html_size(name, mt, raw):
errors = []
if len(raw) > TooLarge.MAX_SIZE:
errors.append(TooLarge(name))
return errors
entity_pat = re.compile(br'&(#{0,1}[a-zA-Z0-9]{1,8});')
def check_encoding_declarations(name, container):
errors = []
enc = find_declared_encoding(container.raw_data(name))
if enc is not None and enc.lower() != 'utf-8':
errors.append(NonUTF8(name, enc))
return errors
def check_for_private_entities(name, raw):
if re.search(br'<!DOCTYPE\s+.+?<!ENTITY\s+.+?]>', raw, flags=re.DOTALL) is not None:
return True
def check_xml_parsing(name, mt, raw):
if not raw:
return [EmptyFile(name)]
if check_for_private_entities(name, raw):
return [PrivateEntities(_('Private entities found'), name)]
raw = raw.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
# Get rid of entities as named entities trip up the XML parser
eproc = EntitityProcessor(mt)
eraw = entity_pat.sub(eproc, raw)
errcls = HTMLParseError if mt in OEB_DOCS else XMLParseError
errors = []
if eproc.ok_named_entities:
errors.append(NamedEntities(name))
if eproc.bad_entities:
position = PositionFinder(raw)
for offset, ent in eproc.bad_entities:
lnum, col = position(offset)
errors.append(BadEntity(ent, name, lnum, col))
try:
root = safe_xml_fromstring(eraw, recover=False)
except UnicodeDecodeError:
return errors + [DecodeError(name)]
except XMLSyntaxError as err:
try:
line, col = err.position
except Exception:
line = col = None
return errors + [errcls(error_message(err), name, line, col)]
except Exception as err:
return errors + [errcls(error_message(err), name)]
if mt in OEB_DOCS:
if root.nsmap.get(root.prefix, None) != XHTML_NS:
errors.append(BadNamespace(name, root.nsmap.get(root.prefix, None)))
return errors
pos_pats = (re.compile(r'\[(\d+):(\d+)'), re.compile(r'(\d+), (\d+)\)'))
class DuplicateId(BaseError):
has_multiple_locations = True
INDIVIDUAL_FIX = _(
'Remove the duplicate ids from all but the first element')
def __init__(self, name, eid, locs):
BaseError.__init__(self, _('Duplicate id: %s') % eid, name)
self.HELP = _(
'The id {0} is present on more than one element in {1}. This is'
' not allowed. Remove the id from all but one of the elements').format(eid, name)
self.all_locations = [(name, lnum, None) for lnum in sorted(locs)]
self.duplicate_id = eid
def __call__(self, container):
elems = [e for e in container.parsed(self.name).xpath('//*[@id]') if e.get('id') == self.duplicate_id]
for e in elems[1:]:
e.attrib.pop('id')
container.dirty(self.name)
return True
class InvalidId(BaseError):
level = WARN
INDIVIDUAL_FIX = _(
'Replace this id with a randomly generated valid id')
def __init__(self, name, line, eid):
BaseError.__init__(self, _('Invalid id: %s') % eid, name, line)
self.HELP = _(
'The id {0} is not a valid id. IDs must start with a letter ([A-Za-z]) and may be'
' followed by any number of letters, digits ([0-9]), hyphens ("-"), underscores ("_")'
', colons (":"), and periods ("."). This is to ensure maximum compatibility'
' with a wide range of devices.').format(eid)
self.invalid_id = eid
def __call__(self, container):
from calibre.ebooks.oeb.base import uuid_id
from calibre.ebooks.oeb.polish.replace import replace_ids
newid = uuid_id()
changed = False
elems = (e for e in container.parsed(self.name).xpath('//*[@id]') if e.get('id') == self.invalid_id)
for e in elems:
e.set('id', newid)
changed = True
container.dirty(self.name)
if changed:
replace_ids(container, {self.name:{self.invalid_id:newid}})
return changed
class BareTextInBody(BaseError):
INDIVIDUAL_FIX = _('Wrap the bare text in a p tag')
HELP = _('You cannot have bare text inside the body tag. The text must be placed inside some other tag, such as p or div')
has_multiple_locations = True
def __init__(self, name, lines):
BaseError.__init__(self, _('Bare text in body tag'), name)
self.all_locations = [(name, l, None) for l in sorted(lines)]
def __call__(self, container):
root = container.parsed(self.name)
for body in root.xpath('//*[local-name() = "body"]'):
children = tuple(body.iterchildren('*'))
if body.text and body.text.strip():
p = body.makeelement(XHTML('p'))
p.text, body.text = body.text.strip(), '\n '
p.tail = '\n'
if children:
p.tail += ' '
body.insert(0, p)
for child in children:
if child.tail and child.tail.strip():
p = body.makeelement(XHTML('p'))
p.text, child.tail = child.tail.strip(), '\n '
p.tail = '\n'
body.insert(body.index(child) + 1, p)
if child is not children[-1]:
p.tail += ' '
container.dirty(self.name)
return True
def check_filenames(container):
errors = []
all_names = set(container.name_path_map) - container.names_that_must_not_be_changed
for name in all_names:
if urlquote(name) != name:
errors.append(EscapedName(name))
return errors
valid_id = re.compile(r'^[a-zA-Z][a-zA-Z0-9_:.-]*$')
def check_ids(container):
errors = []
mts = set(OEB_DOCS) | {guess_type('a.opf'), guess_type('a.ncx')}
for name, mt in iteritems(container.mime_map):
if mt in mts:
root = container.parsed(name)
seen_ids = {}
dups = {}
for elem in root.xpath('//*[@id]'):
eid = elem.get('id')
if eid in seen_ids:
if eid not in dups:
dups[eid] = [seen_ids[eid]]
dups[eid].append(elem.sourceline)
else:
seen_ids[eid] = elem.sourceline
if eid and valid_id.match(eid) is None:
errors.append(InvalidId(name, elem.sourceline, eid))
errors.extend(DuplicateId(name, eid, locs) for eid, locs in iteritems(dups))
return errors
def check_markup(container):
errors = []
for name, mt in iteritems(container.mime_map):
if mt in OEB_DOCS:
lines = []
root = container.parsed(name)
for body in root.xpath('//*[local-name()="body"]'):
if body.text and body.text.strip():
lines.append(body.sourceline)
for child in body.iterchildren('*'):
if child.tail and child.tail.strip():
lines.append(child.sourceline)
if lines:
errors.append(BareTextInBody(name, lines))
return errors
| 16,405 | Python | .py | 341 | 38.665689 | 126 | 0.607557 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |