id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
9,800
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_license
def com_google_fonts_check_metadata_license(family_metadata): """METADATA.pb license is "APACHE2", "UFL" or "OFL"?""" licenses = ["APACHE2", "OFL", "UFL"] if family_metadata.license in licenses: yield PASS, ("Font license is declared" " in METADATA.pb as \"{}\"").format(family_metadata.license) else: yield FAIL, ("METADATA.pb license field (\"{}\")" " must be one of the following:" " {}").format(family_metadata.license, licenses)
python
def com_google_fonts_check_metadata_license(family_metadata): """METADATA.pb license is "APACHE2", "UFL" or "OFL"?""" licenses = ["APACHE2", "OFL", "UFL"] if family_metadata.license in licenses: yield PASS, ("Font license is declared" " in METADATA.pb as \"{}\"").format(family_metadata.license) else: yield FAIL, ("METADATA.pb license field (\"{}\")" " must be one of the following:" " {}").format(family_metadata.license, licenses)
[ "def", "com_google_fonts_check_metadata_license", "(", "family_metadata", ")", ":", "licenses", "=", "[", "\"APACHE2\"", ",", "\"OFL\"", ",", "\"UFL\"", "]", "if", "family_metadata", ".", "license", "in", "licenses", ":", "yield", "PASS", ",", "(", "\"Font license is declared\"", "\" in METADATA.pb as \\\"{}\\\"\"", ")", ".", "format", "(", "family_metadata", ".", "license", ")", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb license field (\\\"{}\\\")\"", "\" must be one of the following:\"", "\" {}\"", ")", ".", "format", "(", "family_metadata", ".", "license", ",", "licenses", ")" ]
METADATA.pb license is "APACHE2", "UFL" or "OFL"?
[ "METADATA", ".", "pb", "license", "is", "APACHE2", "UFL", "or", "OFL", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1448-L1458
9,801
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_menu_and_latin
def com_google_fonts_check_metadata_menu_and_latin(family_metadata): """METADATA.pb should contain at least "menu" and "latin" subsets.""" missing = [] for s in ["menu", "latin"]: if s not in list(family_metadata.subsets): missing.append(s) if missing != []: yield FAIL, ("Subsets \"menu\" and \"latin\" are mandatory," " but METADATA.pb is missing" " \"{}\"").format(" and ".join(missing)) else: yield PASS, "METADATA.pb contains \"menu\" and \"latin\" subsets."
python
def com_google_fonts_check_metadata_menu_and_latin(family_metadata): """METADATA.pb should contain at least "menu" and "latin" subsets.""" missing = [] for s in ["menu", "latin"]: if s not in list(family_metadata.subsets): missing.append(s) if missing != []: yield FAIL, ("Subsets \"menu\" and \"latin\" are mandatory," " but METADATA.pb is missing" " \"{}\"").format(" and ".join(missing)) else: yield PASS, "METADATA.pb contains \"menu\" and \"latin\" subsets."
[ "def", "com_google_fonts_check_metadata_menu_and_latin", "(", "family_metadata", ")", ":", "missing", "=", "[", "]", "for", "s", "in", "[", "\"menu\"", ",", "\"latin\"", "]", ":", "if", "s", "not", "in", "list", "(", "family_metadata", ".", "subsets", ")", ":", "missing", ".", "append", "(", "s", ")", "if", "missing", "!=", "[", "]", ":", "yield", "FAIL", ",", "(", "\"Subsets \\\"menu\\\" and \\\"latin\\\" are mandatory,\"", "\" but METADATA.pb is missing\"", "\" \\\"{}\\\"\"", ")", ".", "format", "(", "\" and \"", ".", "join", "(", "missing", ")", ")", "else", ":", "yield", "PASS", ",", "\"METADATA.pb contains \\\"menu\\\" and \\\"latin\\\" subsets.\"" ]
METADATA.pb should contain at least "menu" and "latin" subsets.
[ "METADATA", ".", "pb", "should", "contain", "at", "least", "menu", "and", "latin", "subsets", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1465-L1477
9,802
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_subsets_order
def com_google_fonts_check_metadata_subsets_order(family_metadata): """METADATA.pb subsets should be alphabetically ordered.""" expected = list(sorted(family_metadata.subsets)) if list(family_metadata.subsets) != expected: yield FAIL, ("METADATA.pb subsets are not sorted " "in alphabetical order: Got ['{}']" " and expected ['{}']").format("', '".join(family_metadata.subsets), "', '".join(expected)) else: yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
python
def com_google_fonts_check_metadata_subsets_order(family_metadata): """METADATA.pb subsets should be alphabetically ordered.""" expected = list(sorted(family_metadata.subsets)) if list(family_metadata.subsets) != expected: yield FAIL, ("METADATA.pb subsets are not sorted " "in alphabetical order: Got ['{}']" " and expected ['{}']").format("', '".join(family_metadata.subsets), "', '".join(expected)) else: yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
[ "def", "com_google_fonts_check_metadata_subsets_order", "(", "family_metadata", ")", ":", "expected", "=", "list", "(", "sorted", "(", "family_metadata", ".", "subsets", ")", ")", "if", "list", "(", "family_metadata", ".", "subsets", ")", "!=", "expected", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb subsets are not sorted \"", "\"in alphabetical order: Got ['{}']\"", "\" and expected ['{}']\"", ")", ".", "format", "(", "\"', '\"", ".", "join", "(", "family_metadata", ".", "subsets", ")", ",", "\"', '\"", ".", "join", "(", "expected", ")", ")", "else", ":", "yield", "PASS", ",", "\"METADATA.pb subsets are sorted in alphabetical order.\"" ]
METADATA.pb subsets should be alphabetically ordered.
[ "METADATA", ".", "pb", "subsets", "should", "be", "alphabetically", "ordered", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1484-L1494
9,803
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_familyname
def com_google_fonts_check_metadata_familyname(family_metadata): """Check that METADATA.pb family values are all the same.""" name = "" fail = False for f in family_metadata.fonts: if name and f.name != name: fail = True name = f.name if fail: yield FAIL, ("METADATA.pb: Family name is not the same" " in all metadata \"fonts\" items.") else: yield PASS, ("METADATA.pb: Family name is the same" " in all metadata \"fonts\" items.")
python
def com_google_fonts_check_metadata_familyname(family_metadata): """Check that METADATA.pb family values are all the same.""" name = "" fail = False for f in family_metadata.fonts: if name and f.name != name: fail = True name = f.name if fail: yield FAIL, ("METADATA.pb: Family name is not the same" " in all metadata \"fonts\" items.") else: yield PASS, ("METADATA.pb: Family name is the same" " in all metadata \"fonts\" items.")
[ "def", "com_google_fonts_check_metadata_familyname", "(", "family_metadata", ")", ":", "name", "=", "\"\"", "fail", "=", "False", "for", "f", "in", "family_metadata", ".", "fonts", ":", "if", "name", "and", "f", ".", "name", "!=", "name", ":", "fail", "=", "True", "name", "=", "f", ".", "name", "if", "fail", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Family name is not the same\"", "\" in all metadata \\\"fonts\\\" items.\"", ")", "else", ":", "yield", "PASS", ",", "(", "\"METADATA.pb: Family name is the same\"", "\" in all metadata \\\"fonts\\\" items.\"", ")" ]
Check that METADATA.pb family values are all the same.
[ "Check", "that", "METADATA", ".", "pb", "family", "values", "are", "all", "the", "same", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1520-L1533
9,804
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_family_name
def com_google_fonts_check_metadata_nameid_family_name(ttFont, font_metadata): """Checks METADATA.pb font.name field matches family name declared on the name table. """ from fontbakery.utils import get_name_entry_strings familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) if not familynames: familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) if len(familynames) == 0: yield FAIL, Message("missing", ("This font lacks a FONT_FAMILY_NAME entry" " (nameID={}) in the name" " table.").format(NameID.FONT_FAMILY_NAME)) else: if font_metadata.name not in familynames: yield FAIL, Message("mismatch", ("Unmatched family name in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\"").format(familynames[0], font_metadata.name)) else: yield PASS, ("Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.name)
python
def com_google_fonts_check_metadata_nameid_family_name(ttFont, font_metadata): """Checks METADATA.pb font.name field matches family name declared on the name table. """ from fontbakery.utils import get_name_entry_strings familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) if not familynames: familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) if len(familynames) == 0: yield FAIL, Message("missing", ("This font lacks a FONT_FAMILY_NAME entry" " (nameID={}) in the name" " table.").format(NameID.FONT_FAMILY_NAME)) else: if font_metadata.name not in familynames: yield FAIL, Message("mismatch", ("Unmatched family name in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\"").format(familynames[0], font_metadata.name)) else: yield PASS, ("Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.name)
[ "def", "com_google_fonts_check_metadata_nameid_family_name", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "familynames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", ")", "if", "not", "familynames", ":", "familynames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "if", "len", "(", "familynames", ")", "==", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"missing\"", ",", "(", "\"This font lacks a FONT_FAMILY_NAME entry\"", "\" (nameID={}) in the name\"", "\" table.\"", ")", ".", "format", "(", "NameID", ".", "FONT_FAMILY_NAME", ")", ")", "else", ":", "if", "font_metadata", ".", "name", "not", "in", "familynames", ":", "yield", "FAIL", ",", "Message", "(", "\"mismatch\"", ",", "(", "\"Unmatched family name in font:\"", "\" TTF has \\\"{}\\\" while METADATA.pb\"", "\" has \\\"{}\\\"\"", ")", ".", "format", "(", "familynames", "[", "0", "]", ",", "font_metadata", ".", "name", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"Family name \\\"{}\\\" is identical\"", "\" in METADATA.pb and on the\"", "\" TTF file.\"", ")", ".", "format", "(", "font_metadata", ".", "name", ")" ]
Checks METADATA.pb font.name field matches family name declared on the name table.
[ "Checks", "METADATA", ".", "pb", "font", ".", "name", "field", "matches", "family", "name", "declared", "on", "the", "name", "table", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1593-L1617
9,805
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_post_script_name
def com_google_fonts_check_metadata_nameid_post_script_name(ttFont, font_metadata): """Checks METADATA.pb font.post_script_name matches postscript name declared on the name table. """ failed = False from fontbakery.utils import get_name_entry_strings postscript_names = get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME) if len(postscript_names) == 0: failed = True yield FAIL, Message("missing", ("This font lacks a POSTSCRIPT_NAME" " entry (nameID={}) in the " "name table.").format(NameID.POSTSCRIPT_NAME)) else: for psname in postscript_names: if psname != font_metadata.post_script_name: failed = True yield FAIL, Message("mismatch", ("Unmatched postscript name in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\"." "").format(psname, font_metadata.post_script_name)) if not failed: yield PASS, ("Postscript name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.post_script_name)
python
def com_google_fonts_check_metadata_nameid_post_script_name(ttFont, font_metadata): """Checks METADATA.pb font.post_script_name matches postscript name declared on the name table. """ failed = False from fontbakery.utils import get_name_entry_strings postscript_names = get_name_entry_strings(ttFont, NameID.POSTSCRIPT_NAME) if len(postscript_names) == 0: failed = True yield FAIL, Message("missing", ("This font lacks a POSTSCRIPT_NAME" " entry (nameID={}) in the " "name table.").format(NameID.POSTSCRIPT_NAME)) else: for psname in postscript_names: if psname != font_metadata.post_script_name: failed = True yield FAIL, Message("mismatch", ("Unmatched postscript name in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\"." "").format(psname, font_metadata.post_script_name)) if not failed: yield PASS, ("Postscript name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.post_script_name)
[ "def", "com_google_fonts_check_metadata_nameid_post_script_name", "(", "ttFont", ",", "font_metadata", ")", ":", "failed", "=", "False", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "postscript_names", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "POSTSCRIPT_NAME", ")", "if", "len", "(", "postscript_names", ")", "==", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"missing\"", ",", "(", "\"This font lacks a POSTSCRIPT_NAME\"", "\" entry (nameID={}) in the \"", "\"name table.\"", ")", ".", "format", "(", "NameID", ".", "POSTSCRIPT_NAME", ")", ")", "else", ":", "for", "psname", "in", "postscript_names", ":", "if", "psname", "!=", "font_metadata", ".", "post_script_name", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"mismatch\"", ",", "(", "\"Unmatched postscript name in font:\"", "\" TTF has \\\"{}\\\" while METADATA.pb\"", "\" has \\\"{}\\\".\"", "\"\"", ")", ".", "format", "(", "psname", ",", "font_metadata", ".", "post_script_name", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Postscript name \\\"{}\\\" is identical\"", "\" in METADATA.pb and on the\"", "\" TTF file.\"", ")", ".", "format", "(", "font_metadata", ".", "post_script_name", ")" ]
Checks METADATA.pb font.post_script_name matches postscript name declared on the name table.
[ "Checks", "METADATA", ".", "pb", "font", ".", "post_script_name", "matches", "postscript", "name", "declared", "on", "the", "name", "table", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1623-L1650
9,806
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_full_name
def com_google_fonts_check_metadata_nameid_full_name(ttFont, font_metadata): """METADATA.pb font.full_name value matches fullname declared on the name table? """ from fontbakery.utils import get_name_entry_strings full_fontnames = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(full_fontnames) == 0: yield FAIL, Message("lacks-entry", ("This font lacks a FULL_FONT_NAME" " entry (nameID={}) in the" " name table.").format(NameID.FULL_FONT_NAME)) else: for full_fontname in full_fontnames: if full_fontname != font_metadata.full_name: yield FAIL, Message("mismatch", ("Unmatched fullname in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\".").format(full_fontname, font_metadata.full_name)) else: yield PASS, ("Font fullname \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(full_fontname)
python
def com_google_fonts_check_metadata_nameid_full_name(ttFont, font_metadata): """METADATA.pb font.full_name value matches fullname declared on the name table? """ from fontbakery.utils import get_name_entry_strings full_fontnames = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(full_fontnames) == 0: yield FAIL, Message("lacks-entry", ("This font lacks a FULL_FONT_NAME" " entry (nameID={}) in the" " name table.").format(NameID.FULL_FONT_NAME)) else: for full_fontname in full_fontnames: if full_fontname != font_metadata.full_name: yield FAIL, Message("mismatch", ("Unmatched fullname in font:" " TTF has \"{}\" while METADATA.pb" " has \"{}\".").format(full_fontname, font_metadata.full_name)) else: yield PASS, ("Font fullname \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(full_fontname)
[ "def", "com_google_fonts_check_metadata_nameid_full_name", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "full_fontnames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "if", "len", "(", "full_fontnames", ")", "==", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"lacks-entry\"", ",", "(", "\"This font lacks a FULL_FONT_NAME\"", "\" entry (nameID={}) in the\"", "\" name table.\"", ")", ".", "format", "(", "NameID", ".", "FULL_FONT_NAME", ")", ")", "else", ":", "for", "full_fontname", "in", "full_fontnames", ":", "if", "full_fontname", "!=", "font_metadata", ".", "full_name", ":", "yield", "FAIL", ",", "Message", "(", "\"mismatch\"", ",", "(", "\"Unmatched fullname in font:\"", "\" TTF has \\\"{}\\\" while METADATA.pb\"", "\" has \\\"{}\\\".\"", ")", ".", "format", "(", "full_fontname", ",", "font_metadata", ".", "full_name", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"Font fullname \\\"{}\\\" is identical\"", "\" in METADATA.pb and on the\"", "\" TTF file.\"", ")", ".", "format", "(", "full_fontname", ")" ]
METADATA.pb font.full_name value matches fullname declared on the name table?
[ "METADATA", ".", "pb", "font", ".", "full_name", "value", "matches", "fullname", "declared", "on", "the", "name", "table?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1657-L1680
9,807
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_font_name
def com_google_fonts_check_metadata_nameid_font_name(ttFont, style, font_metadata): """METADATA.pb font.name value should be same as the family name declared on the name table. """ from fontbakery.utils import get_name_entry_strings from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: font_familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) nameid = NameID.FONT_FAMILY_NAME else: font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) nameid = NameID.TYPOGRAPHIC_FAMILY_NAME if len(font_familynames) == 0: yield FAIL, Message("lacks-entry", (f"This font lacks a {NameID(nameid).name} entry" f" (nameID={nameid}) in the name table.")) else: for font_familyname in font_familynames: if font_familyname != font_metadata.name: yield FAIL, Message("mismatch", ("Unmatched familyname in font:" " TTF has \"{}\" while METADATA.pb has" " name=\"{}\".").format(font_familyname, font_metadata.name)) else: yield PASS, ("OK: Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.name)
python
def com_google_fonts_check_metadata_nameid_font_name(ttFont, style, font_metadata): """METADATA.pb font.name value should be same as the family name declared on the name table. """ from fontbakery.utils import get_name_entry_strings from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: font_familynames = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) nameid = NameID.FONT_FAMILY_NAME else: font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) nameid = NameID.TYPOGRAPHIC_FAMILY_NAME if len(font_familynames) == 0: yield FAIL, Message("lacks-entry", (f"This font lacks a {NameID(nameid).name} entry" f" (nameID={nameid}) in the name table.")) else: for font_familyname in font_familynames: if font_familyname != font_metadata.name: yield FAIL, Message("mismatch", ("Unmatched familyname in font:" " TTF has \"{}\" while METADATA.pb has" " name=\"{}\".").format(font_familyname, font_metadata.name)) else: yield PASS, ("OK: Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file.").format(font_metadata.name)
[ "def", "com_google_fonts_check_metadata_nameid_font_name", "(", "ttFont", ",", "style", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "if", "style", "in", "RIBBI_STYLE_NAMES", ":", "font_familynames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "nameid", "=", "NameID", ".", "FONT_FAMILY_NAME", "else", ":", "font_familynames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", ")", "nameid", "=", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", "if", "len", "(", "font_familynames", ")", "==", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"lacks-entry\"", ",", "(", "f\"This font lacks a {NameID(nameid).name} entry\"", "f\" (nameID={nameid}) in the name table.\"", ")", ")", "else", ":", "for", "font_familyname", "in", "font_familynames", ":", "if", "font_familyname", "!=", "font_metadata", ".", "name", ":", "yield", "FAIL", ",", "Message", "(", "\"mismatch\"", ",", "(", "\"Unmatched familyname in font:\"", "\" TTF has \\\"{}\\\" while METADATA.pb has\"", "\" name=\\\"{}\\\".\"", ")", ".", "format", "(", "font_familyname", ",", "font_metadata", ".", "name", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"OK: Family name \\\"{}\\\" is identical\"", "\" in METADATA.pb and on the\"", "\" TTF file.\"", ")", ".", "format", "(", "font_metadata", ".", "name", ")" ]
METADATA.pb font.name value should be same as the family name declared on the name table.
[ "METADATA", ".", "pb", "font", ".", "name", "value", "should", "be", "same", "as", "the", "family", "name", "declared", "on", "the", "name", "table", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1687-L1716
9,808
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_match_fullname_postscript
def com_google_fonts_check_metadata_match_fullname_postscript(font_metadata): """METADATA.pb font.full_name and font.post_script_name fields have equivalent values ? """ import re regex = re.compile(r"\W") post_script_name = regex.sub("", font_metadata.post_script_name) fullname = regex.sub("", font_metadata.full_name) if fullname != post_script_name: yield FAIL, ("METADATA.pb font full_name=\"{}\"" " does not match post_script_name =" " \"{}\"").format(font_metadata.full_name, font_metadata.post_script_name) else: yield PASS, ("METADATA.pb font fields \"full_name\" and" " \"post_script_name\" have equivalent values.")
python
def com_google_fonts_check_metadata_match_fullname_postscript(font_metadata): """METADATA.pb font.full_name and font.post_script_name fields have equivalent values ? """ import re regex = re.compile(r"\W") post_script_name = regex.sub("", font_metadata.post_script_name) fullname = regex.sub("", font_metadata.full_name) if fullname != post_script_name: yield FAIL, ("METADATA.pb font full_name=\"{}\"" " does not match post_script_name =" " \"{}\"").format(font_metadata.full_name, font_metadata.post_script_name) else: yield PASS, ("METADATA.pb font fields \"full_name\" and" " \"post_script_name\" have equivalent values.")
[ "def", "com_google_fonts_check_metadata_match_fullname_postscript", "(", "font_metadata", ")", ":", "import", "re", "regex", "=", "re", ".", "compile", "(", "r\"\\W\"", ")", "post_script_name", "=", "regex", ".", "sub", "(", "\"\"", ",", "font_metadata", ".", "post_script_name", ")", "fullname", "=", "regex", ".", "sub", "(", "\"\"", ",", "font_metadata", ".", "full_name", ")", "if", "fullname", "!=", "post_script_name", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb font full_name=\\\"{}\\\"\"", "\" does not match post_script_name =\"", "\" \\\"{}\\\"\"", ")", ".", "format", "(", "font_metadata", ".", "full_name", ",", "font_metadata", ".", "post_script_name", ")", "else", ":", "yield", "PASS", ",", "(", "\"METADATA.pb font fields \\\"full_name\\\" and\"", "\" \\\"post_script_name\\\" have equivalent values.\"", ")" ]
METADATA.pb font.full_name and font.post_script_name fields have equivalent values ?
[ "METADATA", ".", "pb", "font", ".", "full_name", "and", "font", ".", "post_script_name", "fields", "have", "equivalent", "values", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1723-L1738
9,809
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_match_filename_postscript
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata): """METADATA.pb font.filename and font.post_script_name fields have equivalent values? """ post_script_name = font_metadata.post_script_name filename = os.path.splitext(font_metadata.filename)[0] if filename != post_script_name: yield FAIL, ("METADATA.pb font filename=\"{}\" does not match" " post_script_name=\"{}\"." "").format(font_metadata.filename, font_metadata.post_script_name) else: yield PASS, ("METADATA.pb font fields \"filename\" and" " \"post_script_name\" have equivalent values.")
python
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata): """METADATA.pb font.filename and font.post_script_name fields have equivalent values? """ post_script_name = font_metadata.post_script_name filename = os.path.splitext(font_metadata.filename)[0] if filename != post_script_name: yield FAIL, ("METADATA.pb font filename=\"{}\" does not match" " post_script_name=\"{}\"." "").format(font_metadata.filename, font_metadata.post_script_name) else: yield PASS, ("METADATA.pb font fields \"filename\" and" " \"post_script_name\" have equivalent values.")
[ "def", "com_google_fonts_check_metadata_match_filename_postscript", "(", "font_metadata", ")", ":", "post_script_name", "=", "font_metadata", ".", "post_script_name", "filename", "=", "os", ".", "path", ".", "splitext", "(", "font_metadata", ".", "filename", ")", "[", "0", "]", "if", "filename", "!=", "post_script_name", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb font filename=\\\"{}\\\" does not match\"", "\" post_script_name=\\\"{}\\\".\"", "\"\"", ")", ".", "format", "(", "font_metadata", ".", "filename", ",", "font_metadata", ".", "post_script_name", ")", "else", ":", "yield", "PASS", ",", "(", "\"METADATA.pb font fields \\\"filename\\\" and\"", "\" \\\"post_script_name\\\" have equivalent values.\"", ")" ]
METADATA.pb font.filename and font.post_script_name fields have equivalent values?
[ "METADATA", ".", "pb", "font", ".", "filename", "and", "font", ".", "post_script_name", "fields", "have", "equivalent", "values?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1748-L1762
9,810
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_valid_name_values
def com_google_fonts_check_metadata_valid_name_values(style, font_metadata, font_familynames, typographic_familynames): """METADATA.pb font.name field contains font name in right format?""" from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: familynames = font_familynames else: familynames = typographic_familynames failed = False for font_familyname in familynames: if font_familyname not in font_metadata.name: failed = True yield FAIL, ("METADATA.pb font.name field (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.name, font_familyname) if not failed: yield PASS, ("METADATA.pb font.name field contains" " font name in right format.")
python
def com_google_fonts_check_metadata_valid_name_values(style, font_metadata, font_familynames, typographic_familynames): """METADATA.pb font.name field contains font name in right format?""" from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: familynames = font_familynames else: familynames = typographic_familynames failed = False for font_familyname in familynames: if font_familyname not in font_metadata.name: failed = True yield FAIL, ("METADATA.pb font.name field (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.name, font_familyname) if not failed: yield PASS, ("METADATA.pb font.name field contains" " font name in right format.")
[ "def", "com_google_fonts_check_metadata_valid_name_values", "(", "style", ",", "font_metadata", ",", "font_familynames", ",", "typographic_familynames", ")", ":", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "if", "style", "in", "RIBBI_STYLE_NAMES", ":", "familynames", "=", "font_familynames", "else", ":", "familynames", "=", "typographic_familynames", "failed", "=", "False", "for", "font_familyname", "in", "familynames", ":", "if", "font_familyname", "not", "in", "font_metadata", ".", "name", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"METADATA.pb font.name field (\\\"{}\\\")\"", "\" does not match correct font name format (\\\"{}\\\").\"", "\"\"", ")", ".", "format", "(", "font_metadata", ".", "name", ",", "font_familyname", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"METADATA.pb font.name field contains\"", "\" font name in right format.\"", ")" ]
METADATA.pb font.name field contains font name in right format?
[ "METADATA", ".", "pb", "font", ".", "name", "field", "contains", "font", "name", "in", "right", "format?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1792-L1813
9,811
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_valid_full_name_values
def com_google_fonts_check_metadata_valid_full_name_values(style, font_metadata, font_familynames, typographic_familynames): """METADATA.pb font.full_name field contains font name in right format?""" from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: familynames = font_familynames if familynames == []: yield SKIP, "No FONT_FAMILYNAME" else: familynames = typographic_familynames if familynames == []: yield SKIP, "No TYPOGRAPHIC_FAMILYNAME" for font_familyname in familynames: if font_familyname in font_metadata.full_name: yield PASS, ("METADATA.pb font.full_name field contains" " font name in right format." " ('{}' in '{}')").format(font_familyname, font_metadata.full_name) else: yield FAIL, ("METADATA.pb font.full_name field (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.full_name, font_familyname)
python
def com_google_fonts_check_metadata_valid_full_name_values(style, font_metadata, font_familynames, typographic_familynames): """METADATA.pb font.full_name field contains font name in right format?""" from fontbakery.constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES: familynames = font_familynames if familynames == []: yield SKIP, "No FONT_FAMILYNAME" else: familynames = typographic_familynames if familynames == []: yield SKIP, "No TYPOGRAPHIC_FAMILYNAME" for font_familyname in familynames: if font_familyname in font_metadata.full_name: yield PASS, ("METADATA.pb font.full_name field contains" " font name in right format." " ('{}' in '{}')").format(font_familyname, font_metadata.full_name) else: yield FAIL, ("METADATA.pb font.full_name field (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.full_name, font_familyname)
[ "def", "com_google_fonts_check_metadata_valid_full_name_values", "(", "style", ",", "font_metadata", ",", "font_familynames", ",", "typographic_familynames", ")", ":", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "if", "style", "in", "RIBBI_STYLE_NAMES", ":", "familynames", "=", "font_familynames", "if", "familynames", "==", "[", "]", ":", "yield", "SKIP", ",", "\"No FONT_FAMILYNAME\"", "else", ":", "familynames", "=", "typographic_familynames", "if", "familynames", "==", "[", "]", ":", "yield", "SKIP", ",", "\"No TYPOGRAPHIC_FAMILYNAME\"", "for", "font_familyname", "in", "familynames", ":", "if", "font_familyname", "in", "font_metadata", ".", "full_name", ":", "yield", "PASS", ",", "(", "\"METADATA.pb font.full_name field contains\"", "\" font name in right format.\"", "\" ('{}' in '{}')\"", ")", ".", "format", "(", "font_familyname", ",", "font_metadata", ".", "full_name", ")", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb font.full_name field (\\\"{}\\\")\"", "\" does not match correct font name format (\\\"{}\\\").\"", "\"\"", ")", ".", "format", "(", "font_metadata", ".", "full_name", ",", "font_familyname", ")" ]
METADATA.pb font.full_name field contains font name in right format?
[ "METADATA", ".", "pb", "font", ".", "full_name", "field", "contains", "font", "name", "in", "right", "format?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1821-L1846
9,812
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_valid_filename_values
def com_google_fonts_check_metadata_valid_filename_values(font, family_metadata): """METADATA.pb font.filename field contains font name in right format?""" expected = os.path.basename(font) failed = True for font_metadata in family_metadata.fonts: if font_metadata.filename == expected: failed = False yield PASS, ("METADATA.pb filename field contains" " font name in right format.") break if failed: yield FAIL, ("None of the METADATA.pb filename fields match" f" correct font name format (\"{expected}\").")
python
def com_google_fonts_check_metadata_valid_filename_values(font, family_metadata): """METADATA.pb font.filename field contains font name in right format?""" expected = os.path.basename(font) failed = True for font_metadata in family_metadata.fonts: if font_metadata.filename == expected: failed = False yield PASS, ("METADATA.pb filename field contains" " font name in right format.") break if failed: yield FAIL, ("None of the METADATA.pb filename fields match" f" correct font name format (\"{expected}\").")
[ "def", "com_google_fonts_check_metadata_valid_filename_values", "(", "font", ",", "family_metadata", ")", ":", "expected", "=", "os", ".", "path", ".", "basename", "(", "font", ")", "failed", "=", "True", "for", "font_metadata", "in", "family_metadata", ".", "fonts", ":", "if", "font_metadata", ".", "filename", "==", "expected", ":", "failed", "=", "False", "yield", "PASS", ",", "(", "\"METADATA.pb filename field contains\"", "\" font name in right format.\"", ")", "break", "if", "failed", ":", "yield", "FAIL", ",", "(", "\"None of the METADATA.pb filename fields match\"", "f\" correct font name format (\\\"{expected}\\\").\"", ")" ]
METADATA.pb font.filename field contains font name in right format?
[ "METADATA", ".", "pb", "font", ".", "filename", "field", "contains", "font", "name", "in", "right", "format?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1855-L1868
9,813
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_valid_post_script_name_values
def com_google_fonts_check_metadata_valid_post_script_name_values(font_metadata, font_familynames): """METADATA.pb font.post_script_name field contains font name in right format? """ for font_familyname in font_familynames: psname = "".join(str(font_familyname).split()) if psname in "".join(font_metadata.post_script_name.split("-")): yield PASS, ("METADATA.pb postScriptName field" " contains font name in right format.") else: yield FAIL, ("METADATA.pb postScriptName (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.post_script_name, font_familyname)
python
def com_google_fonts_check_metadata_valid_post_script_name_values(font_metadata, font_familynames): """METADATA.pb font.post_script_name field contains font name in right format? """ for font_familyname in font_familynames: psname = "".join(str(font_familyname).split()) if psname in "".join(font_metadata.post_script_name.split("-")): yield PASS, ("METADATA.pb postScriptName field" " contains font name in right format.") else: yield FAIL, ("METADATA.pb postScriptName (\"{}\")" " does not match correct font name format (\"{}\")." "").format(font_metadata.post_script_name, font_familyname)
[ "def", "com_google_fonts_check_metadata_valid_post_script_name_values", "(", "font_metadata", ",", "font_familynames", ")", ":", "for", "font_familyname", "in", "font_familynames", ":", "psname", "=", "\"\"", ".", "join", "(", "str", "(", "font_familyname", ")", ".", "split", "(", ")", ")", "if", "psname", "in", "\"\"", ".", "join", "(", "font_metadata", ".", "post_script_name", ".", "split", "(", "\"-\"", ")", ")", ":", "yield", "PASS", ",", "(", "\"METADATA.pb postScriptName field\"", "\" contains font name in right format.\"", ")", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb postScriptName (\\\"{}\\\")\"", "\" does not match correct font name format (\\\"{}\\\").\"", "\"\"", ")", ".", "format", "(", "font_metadata", ".", "post_script_name", ",", "font_familyname", ")" ]
METADATA.pb font.post_script_name field contains font name in right format?
[ "METADATA", ".", "pb", "font", ".", "post_script_name", "field", "contains", "font", "name", "in", "right", "format?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1876-L1890
9,814
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_valid_copyright
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
python
def com_google_fonts_check_metadata_valid_copyright(font_metadata): """Copyright notices match canonical pattern in METADATA.pb""" import re string = font_metadata.copyright does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, "METADATA.pb copyright string is good" else: yield FAIL, ("METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string)
[ "def", "com_google_fonts_check_metadata_valid_copyright", "(", "font_metadata", ")", ":", "import", "re", "string", "=", "font_metadata", ".", "copyright", "does_match", "=", "re", ".", "search", "(", "r'Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)'", ",", "string", ")", "if", "does_match", ":", "yield", "PASS", ",", "\"METADATA.pb copyright string is good\"", "else", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Copyright notices should match\"", "\" a pattern similar to:\"", "\" 'Copyright 2017 The Familyname\"", "\" Project Authors (git url)'\\n\"", "\"But instead we have got:\"", "\" '{}'\"", ")", ".", "format", "(", "string", ")" ]
Copyright notices match canonical pattern in METADATA.pb
[ "Copyright", "notices", "match", "canonical", "pattern", "in", "METADATA", ".", "pb" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1897-L1911
9,815
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_font_copyright
def com_google_fonts_check_font_copyright(ttFont): """Copyright notices match canonical pattern in fonts""" import re from fontbakery.utils import get_name_entry_strings failed = False for string in get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE): does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, ("Name Table entry: Copyright field '{}'" " matches canonical pattern.").format(string) else: failed = True yield FAIL, ("Name Table entry: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string) if not failed: yield PASS, "Name table copyright entries are good"
python
def com_google_fonts_check_font_copyright(ttFont): """Copyright notices match canonical pattern in fonts""" import re from fontbakery.utils import get_name_entry_strings failed = False for string in get_name_entry_strings(ttFont, NameID.COPYRIGHT_NOTICE): does_match = re.search(r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)', string) if does_match: yield PASS, ("Name Table entry: Copyright field '{}'" " matches canonical pattern.").format(string) else: failed = True yield FAIL, ("Name Table entry: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'").format(string) if not failed: yield PASS, "Name table copyright entries are good"
[ "def", "com_google_fonts_check_font_copyright", "(", "ttFont", ")", ":", "import", "re", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "failed", "=", "False", "for", "string", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "COPYRIGHT_NOTICE", ")", ":", "does_match", "=", "re", ".", "search", "(", "r'Copyright [0-9]{4} The .* Project Authors \\([^\\@]*\\)'", ",", "string", ")", "if", "does_match", ":", "yield", "PASS", ",", "(", "\"Name Table entry: Copyright field '{}'\"", "\" matches canonical pattern.\"", ")", ".", "format", "(", "string", ")", "else", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Name Table entry: Copyright notices should match\"", "\" a pattern similar to:\"", "\" 'Copyright 2017 The Familyname\"", "\" Project Authors (git url)'\\n\"", "\"But instead we have got:\"", "\" '{}'\"", ")", ".", "format", "(", "string", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"Name table copyright entries are good\"" ]
Copyright notices match canonical pattern in fonts
[ "Copyright", "notices", "match", "canonical", "pattern", "in", "fonts" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L1917-L1938
9,816
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_italic_style
def com_google_fonts_check_metadata_italic_style(ttFont, font_metadata): """METADATA.pb font.style "italic" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "italic": yield SKIP, "This check only applies to italic fonts." else: font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_fullname) == 0: yield SKIP, "Font lacks fullname entries in name table." # this fail scenario was already checked above # (passing those previous checks is a prerequisite for this one) # FIXME: Could we pack this into a condition ? else: # FIXME: here we only check the first name entry. # Should we iterate over them all ? Or should we check # if they're all the same? font_fullname = font_fullname[0] if not bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", "METADATA.pb style has been set to italic" " but font macStyle is improperly set.") elif not font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("bad-fullfont-name", ("Font macStyle Italic bit is set" " but nameID {} (\"{}\") is not ended with" " \"Italic\"").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("OK: METADATA.pb font.style \"italic\"" " matches font internals.")
python
def com_google_fonts_check_metadata_italic_style(ttFont, font_metadata): """METADATA.pb font.style "italic" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "italic": yield SKIP, "This check only applies to italic fonts." else: font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_fullname) == 0: yield SKIP, "Font lacks fullname entries in name table." # this fail scenario was already checked above # (passing those previous checks is a prerequisite for this one) # FIXME: Could we pack this into a condition ? else: # FIXME: here we only check the first name entry. # Should we iterate over them all ? Or should we check # if they're all the same? font_fullname = font_fullname[0] if not bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", "METADATA.pb style has been set to italic" " but font macStyle is improperly set.") elif not font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("bad-fullfont-name", ("Font macStyle Italic bit is set" " but nameID {} (\"{}\") is not ended with" " \"Italic\"").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("OK: METADATA.pb font.style \"italic\"" " matches font internals.")
[ "def", "com_google_fonts_check_metadata_italic_style", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "from", "fontbakery", ".", "constants", "import", "MacStyle", "if", "font_metadata", ".", "style", "!=", "\"italic\"", ":", "yield", "SKIP", ",", "\"This check only applies to italic fonts.\"", "else", ":", "font_fullname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "if", "len", "(", "font_fullname", ")", "==", "0", ":", "yield", "SKIP", ",", "\"Font lacks fullname entries in name table.\"", "# this fail scenario was already checked above", "# (passing those previous checks is a prerequisite for this one)", "# FIXME: Could we pack this into a condition ?", "else", ":", "# FIXME: here we only check the first name entry.", "# Should we iterate over them all ? Or should we check", "# if they're all the same?", "font_fullname", "=", "font_fullname", "[", "0", "]", "if", "not", "bool", "(", "ttFont", "[", "\"head\"", "]", ".", "macStyle", "&", "MacStyle", ".", "ITALIC", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"bad-macstyle\"", ",", "\"METADATA.pb style has been set to italic\"", "\" but font macStyle is improperly set.\"", ")", "elif", "not", "font_fullname", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", ".", "endswith", "(", "\"Italic\"", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"bad-fullfont-name\"", ",", "(", "\"Font macStyle Italic bit is set\"", "\" but nameID {} (\\\"{}\\\") is not ended with\"", "\" \\\"Italic\\\"\"", ")", ".", "format", "(", "NameID", ".", "FULL_FONT_NAME", ",", "font_fullname", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"OK: METADATA.pb font.style \\\"italic\\\"\"", "\" matches font internals.\"", ")" ]
METADATA.pb font.style "italic" matches font internals?
[ "METADATA", ".", "pb", "font", ".", "style", "italic", "matches", "font", "internals?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2029-L2061
9,817
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_normal_style
def com_google_fonts_check_metadata_normal_style(ttFont, font_metadata): """METADATA.pb font.style "normal" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "normal": yield SKIP, "This check only applies to normal fonts." # FIXME: declare a common condition called "normal_style" else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_familyname) == 0 or len(font_fullname) == 0: yield SKIP, ("Font lacks familyname and/or" " fullname entries in name table.") # FIXME: This is the same SKIP condition as in check/metadata/italic_style # so we definitely need to address them with a common condition! else: font_familyname = font_familyname[0] font_fullname = font_fullname[0] if bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", ("METADATA.pb style has been set to normal" " but font macStyle is improperly set.")) elif font_familyname.split("-")[-1].endswith('Italic'): yield FAIL, Message("familyname-italic", ("Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FONT_FAMILY_NAME, font_familyname)) elif font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("fullfont-italic", ("Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("METADATA.pb font.style \"normal\"" " matches font internals.")
python
def com_google_fonts_check_metadata_normal_style(ttFont, font_metadata): """METADATA.pb font.style "normal" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "normal": yield SKIP, "This check only applies to normal fonts." # FIXME: declare a common condition called "normal_style" else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_familyname) == 0 or len(font_fullname) == 0: yield SKIP, ("Font lacks familyname and/or" " fullname entries in name table.") # FIXME: This is the same SKIP condition as in check/metadata/italic_style # so we definitely need to address them with a common condition! else: font_familyname = font_familyname[0] font_fullname = font_fullname[0] if bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", ("METADATA.pb style has been set to normal" " but font macStyle is improperly set.")) elif font_familyname.split("-")[-1].endswith('Italic'): yield FAIL, Message("familyname-italic", ("Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FONT_FAMILY_NAME, font_familyname)) elif font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("fullfont-italic", ("Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("METADATA.pb font.style \"normal\"" " matches font internals.")
[ "def", "com_google_fonts_check_metadata_normal_style", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "from", "fontbakery", ".", "constants", "import", "MacStyle", "if", "font_metadata", ".", "style", "!=", "\"normal\"", ":", "yield", "SKIP", ",", "\"This check only applies to normal fonts.\"", "# FIXME: declare a common condition called \"normal_style\"", "else", ":", "font_familyname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "font_fullname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "if", "len", "(", "font_familyname", ")", "==", "0", "or", "len", "(", "font_fullname", ")", "==", "0", ":", "yield", "SKIP", ",", "(", "\"Font lacks familyname and/or\"", "\" fullname entries in name table.\"", ")", "# FIXME: This is the same SKIP condition as in check/metadata/italic_style", "# so we definitely need to address them with a common condition!", "else", ":", "font_familyname", "=", "font_familyname", "[", "0", "]", "font_fullname", "=", "font_fullname", "[", "0", "]", "if", "bool", "(", "ttFont", "[", "\"head\"", "]", ".", "macStyle", "&", "MacStyle", ".", "ITALIC", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"bad-macstyle\"", ",", "(", "\"METADATA.pb style has been set to normal\"", "\" but font macStyle is improperly set.\"", ")", ")", "elif", "font_familyname", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", ".", "endswith", "(", "'Italic'", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"familyname-italic\"", ",", "(", "\"Font macStyle indicates a non-Italic font, but\"", "\" nameID {} (FONT_FAMILY_NAME: \\\"{}\\\") ends with\"", "\" \\\"Italic\\\".\"", ")", ".", "format", "(", "NameID", ".", "FONT_FAMILY_NAME", ",", "font_familyname", ")", ")", "elif", "font_fullname", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", ".", "endswith", "(", "\"Italic\"", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"fullfont-italic\"", ",", "(", "\"Font macStyle indicates a non-Italic font but\"", "\" nameID {} (FULL_FONT_NAME: \\\"{}\\\") ends with\"", "\" \\\"Italic\\\".\"", ")", ".", "format", "(", "NameID", ".", "FULL_FONT_NAME", ",", "font_fullname", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"METADATA.pb font.style \\\"normal\\\"\"", "\" matches font internals.\"", ")" ]
METADATA.pb font.style "normal" matches font internals?
[ "METADATA", ".", "pb", "font", ".", "style", "normal", "matches", "font", "internals?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2068-L2106
9,818
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_family_and_full_names
def com_google_fonts_check_metadata_nameid_family_and_full_names(ttFont, font_metadata): """METADATA.pb font.name and font.full_name fields match the values declared on the name table? """ from fontbakery.utils import get_name_entry_strings font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) if font_familynames: font_familyname = font_familynames[0] else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)[0] font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)[0] # FIXME: common condition/name-id check as in the two previous checks. if font_fullname != font_metadata.full_name: yield FAIL, Message("fullname-mismatch", ("METADATA.pb: Fullname (\"{}\")" " does not match name table" " entry \"{}\" !").format(font_metadata.full_name, font_fullname)) elif font_familyname != font_metadata.name: yield FAIL, Message("familyname-mismatch", ("METADATA.pb Family name \"{}\")" " does not match name table" " entry \"{}\" !").format(font_metadata.name, font_familyname)) else: yield PASS, ("METADATA.pb familyname and fullName fields" " match corresponding name table entries.")
python
def com_google_fonts_check_metadata_nameid_family_and_full_names(ttFont, font_metadata): """METADATA.pb font.name and font.full_name fields match the values declared on the name table? """ from fontbakery.utils import get_name_entry_strings font_familynames = get_name_entry_strings(ttFont, NameID.TYPOGRAPHIC_FAMILY_NAME) if font_familynames: font_familyname = font_familynames[0] else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)[0] font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)[0] # FIXME: common condition/name-id check as in the two previous checks. if font_fullname != font_metadata.full_name: yield FAIL, Message("fullname-mismatch", ("METADATA.pb: Fullname (\"{}\")" " does not match name table" " entry \"{}\" !").format(font_metadata.full_name, font_fullname)) elif font_familyname != font_metadata.name: yield FAIL, Message("familyname-mismatch", ("METADATA.pb Family name \"{}\")" " does not match name table" " entry \"{}\" !").format(font_metadata.name, font_familyname)) else: yield PASS, ("METADATA.pb familyname and fullName fields" " match corresponding name table entries.")
[ "def", "com_google_fonts_check_metadata_nameid_family_and_full_names", "(", "ttFont", ",", "font_metadata", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "font_familynames", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", ")", "if", "font_familynames", ":", "font_familyname", "=", "font_familynames", "[", "0", "]", "else", ":", "font_familyname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "[", "0", "]", "font_fullname", "=", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FULL_FONT_NAME", ")", "[", "0", "]", "# FIXME: common condition/name-id check as in the two previous checks.", "if", "font_fullname", "!=", "font_metadata", ".", "full_name", ":", "yield", "FAIL", ",", "Message", "(", "\"fullname-mismatch\"", ",", "(", "\"METADATA.pb: Fullname (\\\"{}\\\")\"", "\" does not match name table\"", "\" entry \\\"{}\\\" !\"", ")", ".", "format", "(", "font_metadata", ".", "full_name", ",", "font_fullname", ")", ")", "elif", "font_familyname", "!=", "font_metadata", ".", "name", ":", "yield", "FAIL", ",", "Message", "(", "\"familyname-mismatch\"", ",", "(", "\"METADATA.pb Family name \\\"{}\\\")\"", "\" does not match name table\"", "\" entry \\\"{}\\\" !\"", ")", ".", "format", "(", "font_metadata", ".", "name", ",", "font_familyname", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"METADATA.pb familyname and fullName fields\"", "\" match corresponding name table entries.\"", ")" ]
METADATA.pb font.name and font.full_name fields match the values declared on the name table?
[ "METADATA", ".", "pb", "font", ".", "name", "and", "font", ".", "full_name", "fields", "match", "the", "values", "declared", "on", "the", "name", "table?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2113-L2141
9,819
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_match_weight_postscript
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata): """METADATA.pb weight matches postScriptName.""" WEIGHTS = { "Thin": 100, "ThinItalic": 100, "ExtraLight": 200, "ExtraLightItalic": 200, "Light": 300, "LightItalic": 300, "Regular": 400, "Italic": 400, "Medium": 500, "MediumItalic": 500, "SemiBold": 600, "SemiBoldItalic": 600, "Bold": 700, "BoldItalic": 700, "ExtraBold": 800, "ExtraBoldItalic": 800, "Black": 900, "BlackItalic": 900 } pair = [] for k, weight in WEIGHTS.items(): if weight == font_metadata.weight: pair.append((k, weight)) if not pair: yield FAIL, ("METADATA.pb: Font weight value ({})" " is invalid.").format(font_metadata.weight) elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or font_metadata.post_script_name.endswith('-' + pair[1][0])): yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")" " and weight value ({}). The name must be" " ended with \"{}\" or \"{}\"." "").format(font_metadata.post_script_name, pair[0][1], pair[0][0], pair[1][0]) else: yield PASS, "Weight value matches postScriptName."
python
def com_google_fonts_check_metadata_match_weight_postscript(font_metadata): """METADATA.pb weight matches postScriptName.""" WEIGHTS = { "Thin": 100, "ThinItalic": 100, "ExtraLight": 200, "ExtraLightItalic": 200, "Light": 300, "LightItalic": 300, "Regular": 400, "Italic": 400, "Medium": 500, "MediumItalic": 500, "SemiBold": 600, "SemiBoldItalic": 600, "Bold": 700, "BoldItalic": 700, "ExtraBold": 800, "ExtraBoldItalic": 800, "Black": 900, "BlackItalic": 900 } pair = [] for k, weight in WEIGHTS.items(): if weight == font_metadata.weight: pair.append((k, weight)) if not pair: yield FAIL, ("METADATA.pb: Font weight value ({})" " is invalid.").format(font_metadata.weight) elif not (font_metadata.post_script_name.endswith('-' + pair[0][0]) or font_metadata.post_script_name.endswith('-' + pair[1][0])): yield FAIL, ("METADATA.pb: Mismatch between postScriptName (\"{}\")" " and weight value ({}). The name must be" " ended with \"{}\" or \"{}\"." "").format(font_metadata.post_script_name, pair[0][1], pair[0][0], pair[1][0]) else: yield PASS, "Weight value matches postScriptName."
[ "def", "com_google_fonts_check_metadata_match_weight_postscript", "(", "font_metadata", ")", ":", "WEIGHTS", "=", "{", "\"Thin\"", ":", "100", ",", "\"ThinItalic\"", ":", "100", ",", "\"ExtraLight\"", ":", "200", ",", "\"ExtraLightItalic\"", ":", "200", ",", "\"Light\"", ":", "300", ",", "\"LightItalic\"", ":", "300", ",", "\"Regular\"", ":", "400", ",", "\"Italic\"", ":", "400", ",", "\"Medium\"", ":", "500", ",", "\"MediumItalic\"", ":", "500", ",", "\"SemiBold\"", ":", "600", ",", "\"SemiBoldItalic\"", ":", "600", ",", "\"Bold\"", ":", "700", ",", "\"BoldItalic\"", ":", "700", ",", "\"ExtraBold\"", ":", "800", ",", "\"ExtraBoldItalic\"", ":", "800", ",", "\"Black\"", ":", "900", ",", "\"BlackItalic\"", ":", "900", "}", "pair", "=", "[", "]", "for", "k", ",", "weight", "in", "WEIGHTS", ".", "items", "(", ")", ":", "if", "weight", "==", "font_metadata", ".", "weight", ":", "pair", ".", "append", "(", "(", "k", ",", "weight", ")", ")", "if", "not", "pair", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Font weight value ({})\"", "\" is invalid.\"", ")", ".", "format", "(", "font_metadata", ".", "weight", ")", "elif", "not", "(", "font_metadata", ".", "post_script_name", ".", "endswith", "(", "'-'", "+", "pair", "[", "0", "]", "[", "0", "]", ")", "or", "font_metadata", ".", "post_script_name", ".", "endswith", "(", "'-'", "+", "pair", "[", "1", "]", "[", "0", "]", ")", ")", ":", "yield", "FAIL", ",", "(", "\"METADATA.pb: Mismatch between postScriptName (\\\"{}\\\")\"", "\" and weight value ({}). The name must be\"", "\" ended with \\\"{}\\\" or \\\"{}\\\".\"", "\"\"", ")", ".", "format", "(", "font_metadata", ".", "post_script_name", ",", "pair", "[", "0", "]", "[", "1", "]", ",", "pair", "[", "0", "]", "[", "0", "]", ",", "pair", "[", "1", "]", "[", "0", "]", ")", "else", ":", "yield", "PASS", ",", "\"Weight value matches postScriptName.\"" ]
METADATA.pb weight matches postScriptName.
[ "METADATA", ".", "pb", "weight", "matches", "postScriptName", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2259-L2299
9,820
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_unitsperem_strict
def com_google_fonts_check_unitsperem_strict(ttFont): """ Stricter unitsPerEm criteria for Google Fonts. """ upm_height = ttFont["head"].unitsPerEm ACCEPTABLE = [16, 32, 64, 128, 256, 500, 512, 1000, 1024, 2000, 2048] if upm_height not in ACCEPTABLE: yield FAIL, (f"Font em size (unitsPerEm) is {upm_height}." " If possible, please consider using 1000" " or even 2000 (which is ideal for" " Variable Fonts)." " The acceptable values for unitsPerEm," f" though, are: {ACCEPTABLE}.") elif upm_height != 2000: yield WARN, (f"Even though unitsPerEm ({upm_height}) in" " this font is reasonable. It is strongly" " advised to consider changing it to 2000," " since it will likely improve the quality of" " Variable Fonts by avoiding excessive" " rounding of coordinates on interpolations.") else: yield PASS, "Font em size is good (unitsPerEm = 2000)."
python
def com_google_fonts_check_unitsperem_strict(ttFont): """ Stricter unitsPerEm criteria for Google Fonts. """ upm_height = ttFont["head"].unitsPerEm ACCEPTABLE = [16, 32, 64, 128, 256, 500, 512, 1000, 1024, 2000, 2048] if upm_height not in ACCEPTABLE: yield FAIL, (f"Font em size (unitsPerEm) is {upm_height}." " If possible, please consider using 1000" " or even 2000 (which is ideal for" " Variable Fonts)." " The acceptable values for unitsPerEm," f" though, are: {ACCEPTABLE}.") elif upm_height != 2000: yield WARN, (f"Even though unitsPerEm ({upm_height}) in" " this font is reasonable. It is strongly" " advised to consider changing it to 2000," " since it will likely improve the quality of" " Variable Fonts by avoiding excessive" " rounding of coordinates on interpolations.") else: yield PASS, "Font em size is good (unitsPerEm = 2000)."
[ "def", "com_google_fonts_check_unitsperem_strict", "(", "ttFont", ")", ":", "upm_height", "=", "ttFont", "[", "\"head\"", "]", ".", "unitsPerEm", "ACCEPTABLE", "=", "[", "16", ",", "32", ",", "64", ",", "128", ",", "256", ",", "500", ",", "512", ",", "1000", ",", "1024", ",", "2000", ",", "2048", "]", "if", "upm_height", "not", "in", "ACCEPTABLE", ":", "yield", "FAIL", ",", "(", "f\"Font em size (unitsPerEm) is {upm_height}.\"", "\" If possible, please consider using 1000\"", "\" or even 2000 (which is ideal for\"", "\" Variable Fonts).\"", "\" The acceptable values for unitsPerEm,\"", "f\" though, are: {ACCEPTABLE}.\"", ")", "elif", "upm_height", "!=", "2000", ":", "yield", "WARN", ",", "(", "f\"Even though unitsPerEm ({upm_height}) in\"", "\" this font is reasonable. It is strongly\"", "\" advised to consider changing it to 2000,\"", "\" since it will likely improve the quality of\"", "\" Variable Fonts by avoiding excessive\"", "\" rounding of coordinates on interpolations.\"", ")", "else", ":", "yield", "PASS", ",", "\"Font em size is good (unitsPerEm = 2000).\"" ]
Stricter unitsPerEm criteria for Google Fonts.
[ "Stricter", "unitsPerEm", "criteria", "for", "Google", "Fonts", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2367-L2387
9,821
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
remote_styles
def remote_styles(family_metadata): """Get a dictionary of TTFont objects of all font files of a given family as currently hosted at Google Fonts. """ def download_family_from_Google_Fonts(family_name): """Return a zipfile containing a font family hosted on fonts.google.com""" from zipfile import ZipFile from fontbakery.utils import download_file url_prefix = 'https://fonts.google.com/download?family=' url = '{}{}'.format(url_prefix, family_name.replace(' ', '+')) return ZipFile(download_file(url)) def fonts_from_zip(zipfile): '''return a list of fontTools TTFonts''' from fontTools.ttLib import TTFont from io import BytesIO fonts = [] for file_name in zipfile.namelist(): if file_name.lower().endswith(".ttf"): file_obj = BytesIO(zipfile.open(file_name).read()) fonts.append([file_name, TTFont(file_obj)]) return fonts if (not listed_on_gfonts_api(family_metadata) or not family_metadata): return None remote_fonts_zip = download_family_from_Google_Fonts(family_metadata.name) rstyles = {} for remote_filename, remote_font in fonts_from_zip(remote_fonts_zip): remote_style = os.path.splitext(remote_filename)[0] if '-' in remote_style: remote_style = remote_style.split('-')[1] rstyles[remote_style] = remote_font return rstyles
python
def remote_styles(family_metadata): """Get a dictionary of TTFont objects of all font files of a given family as currently hosted at Google Fonts. """ def download_family_from_Google_Fonts(family_name): """Return a zipfile containing a font family hosted on fonts.google.com""" from zipfile import ZipFile from fontbakery.utils import download_file url_prefix = 'https://fonts.google.com/download?family=' url = '{}{}'.format(url_prefix, family_name.replace(' ', '+')) return ZipFile(download_file(url)) def fonts_from_zip(zipfile): '''return a list of fontTools TTFonts''' from fontTools.ttLib import TTFont from io import BytesIO fonts = [] for file_name in zipfile.namelist(): if file_name.lower().endswith(".ttf"): file_obj = BytesIO(zipfile.open(file_name).read()) fonts.append([file_name, TTFont(file_obj)]) return fonts if (not listed_on_gfonts_api(family_metadata) or not family_metadata): return None remote_fonts_zip = download_family_from_Google_Fonts(family_metadata.name) rstyles = {} for remote_filename, remote_font in fonts_from_zip(remote_fonts_zip): remote_style = os.path.splitext(remote_filename)[0] if '-' in remote_style: remote_style = remote_style.split('-')[1] rstyles[remote_style] = remote_font return rstyles
[ "def", "remote_styles", "(", "family_metadata", ")", ":", "def", "download_family_from_Google_Fonts", "(", "family_name", ")", ":", "\"\"\"Return a zipfile containing a font family hosted on fonts.google.com\"\"\"", "from", "zipfile", "import", "ZipFile", "from", "fontbakery", ".", "utils", "import", "download_file", "url_prefix", "=", "'https://fonts.google.com/download?family='", "url", "=", "'{}{}'", ".", "format", "(", "url_prefix", ",", "family_name", ".", "replace", "(", "' '", ",", "'+'", ")", ")", "return", "ZipFile", "(", "download_file", "(", "url", ")", ")", "def", "fonts_from_zip", "(", "zipfile", ")", ":", "'''return a list of fontTools TTFonts'''", "from", "fontTools", ".", "ttLib", "import", "TTFont", "from", "io", "import", "BytesIO", "fonts", "=", "[", "]", "for", "file_name", "in", "zipfile", ".", "namelist", "(", ")", ":", "if", "file_name", ".", "lower", "(", ")", ".", "endswith", "(", "\".ttf\"", ")", ":", "file_obj", "=", "BytesIO", "(", "zipfile", ".", "open", "(", "file_name", ")", ".", "read", "(", ")", ")", "fonts", ".", "append", "(", "[", "file_name", ",", "TTFont", "(", "file_obj", ")", "]", ")", "return", "fonts", "if", "(", "not", "listed_on_gfonts_api", "(", "family_metadata", ")", "or", "not", "family_metadata", ")", ":", "return", "None", "remote_fonts_zip", "=", "download_family_from_Google_Fonts", "(", "family_metadata", ".", "name", ")", "rstyles", "=", "{", "}", "for", "remote_filename", ",", "remote_font", "in", "fonts_from_zip", "(", "remote_fonts_zip", ")", ":", "remote_style", "=", "os", ".", "path", ".", "splitext", "(", "remote_filename", ")", "[", "0", "]", "if", "'-'", "in", "remote_style", ":", "remote_style", "=", "remote_style", ".", "split", "(", "'-'", ")", "[", "1", "]", "rstyles", "[", "remote_style", "]", "=", "remote_font", "return", "rstyles" ]
Get a dictionary of TTFont objects of all font files of a given family as currently hosted at Google Fonts.
[ "Get", "a", "dictionary", "of", "TTFont", "objects", "of", "all", "font", "files", "of", "a", "given", "family", "as", "currently", "hosted", "at", "Google", "Fonts", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2391-L2428
9,822
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
github_gfonts_ttFont
def github_gfonts_ttFont(ttFont, license): """Get a TTFont object of a font downloaded from Google Fonts git repository. """ if not license: return from fontbakery.utils import download_file from fontTools.ttLib import TTFont from urllib.request import HTTPError LICENSE_DIRECTORY = { "OFL.txt": "ofl", "UFL.txt": "ufl", "LICENSE.txt": "apache" } filename = os.path.basename(ttFont.reader.file.name) fontname = filename.split('-')[0].lower() url = ("https://github.com/google/fonts/raw/master" "/{}/{}/{}").format(LICENSE_DIRECTORY[license], fontname, filename) try: fontfile = download_file(url) return TTFont(fontfile) except HTTPError: return None
python
def github_gfonts_ttFont(ttFont, license): """Get a TTFont object of a font downloaded from Google Fonts git repository. """ if not license: return from fontbakery.utils import download_file from fontTools.ttLib import TTFont from urllib.request import HTTPError LICENSE_DIRECTORY = { "OFL.txt": "ofl", "UFL.txt": "ufl", "LICENSE.txt": "apache" } filename = os.path.basename(ttFont.reader.file.name) fontname = filename.split('-')[0].lower() url = ("https://github.com/google/fonts/raw/master" "/{}/{}/{}").format(LICENSE_DIRECTORY[license], fontname, filename) try: fontfile = download_file(url) return TTFont(fontfile) except HTTPError: return None
[ "def", "github_gfonts_ttFont", "(", "ttFont", ",", "license", ")", ":", "if", "not", "license", ":", "return", "from", "fontbakery", ".", "utils", "import", "download_file", "from", "fontTools", ".", "ttLib", "import", "TTFont", "from", "urllib", ".", "request", "import", "HTTPError", "LICENSE_DIRECTORY", "=", "{", "\"OFL.txt\"", ":", "\"ofl\"", ",", "\"UFL.txt\"", ":", "\"ufl\"", ",", "\"LICENSE.txt\"", ":", "\"apache\"", "}", "filename", "=", "os", ".", "path", ".", "basename", "(", "ttFont", ".", "reader", ".", "file", ".", "name", ")", "fontname", "=", "filename", ".", "split", "(", "'-'", ")", "[", "0", "]", ".", "lower", "(", ")", "url", "=", "(", "\"https://github.com/google/fonts/raw/master\"", "\"/{}/{}/{}\"", ")", ".", "format", "(", "LICENSE_DIRECTORY", "[", "license", "]", ",", "fontname", ",", "filename", ")", "try", ":", "fontfile", "=", "download_file", "(", "url", ")", "return", "TTFont", "(", "fontfile", ")", "except", "HTTPError", ":", "return", "None" ]
Get a TTFont object of a font downloaded from Google Fonts git repository.
[ "Get", "a", "TTFont", "object", "of", "a", "font", "downloaded", "from", "Google", "Fonts", "git", "repository", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2442-L2467
9,823
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_version_bump
def com_google_fonts_check_version_bump(ttFont, api_gfonts_ttFont, github_gfonts_ttFont): """Version number has increased since previous release on Google Fonts?""" v_number = ttFont["head"].fontRevision api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision failed = False if v_number == api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts.").format(v_number) if v_number < api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts ({})." "").format(v_number, api_gfonts_v_number) if v_number == github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts GitHub repo." "").format(v_number) if v_number < github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts GitHub repo ({})." "").format(v_number, github_gfonts_v_number) if not failed: yield PASS, ("Version number {} is greater than" " version on Google Fonts GitHub ({})" " and production servers ({})." "").format(v_number, github_gfonts_v_number, api_gfonts_v_number)
python
def com_google_fonts_check_version_bump(ttFont, api_gfonts_ttFont, github_gfonts_ttFont): """Version number has increased since previous release on Google Fonts?""" v_number = ttFont["head"].fontRevision api_gfonts_v_number = api_gfonts_ttFont["head"].fontRevision github_gfonts_v_number = github_gfonts_ttFont["head"].fontRevision failed = False if v_number == api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts.").format(v_number) if v_number < api_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts ({})." "").format(v_number, api_gfonts_v_number) if v_number == github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is equal to" " version on Google Fonts GitHub repo." "").format(v_number) if v_number < github_gfonts_v_number: failed = True yield FAIL, ("Version number {} is less than" " version on Google Fonts GitHub repo ({})." "").format(v_number, github_gfonts_v_number) if not failed: yield PASS, ("Version number {} is greater than" " version on Google Fonts GitHub ({})" " and production servers ({})." "").format(v_number, github_gfonts_v_number, api_gfonts_v_number)
[ "def", "com_google_fonts_check_version_bump", "(", "ttFont", ",", "api_gfonts_ttFont", ",", "github_gfonts_ttFont", ")", ":", "v_number", "=", "ttFont", "[", "\"head\"", "]", ".", "fontRevision", "api_gfonts_v_number", "=", "api_gfonts_ttFont", "[", "\"head\"", "]", ".", "fontRevision", "github_gfonts_v_number", "=", "github_gfonts_ttFont", "[", "\"head\"", "]", ".", "fontRevision", "failed", "=", "False", "if", "v_number", "==", "api_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is equal to\"", "\" version on Google Fonts.\"", ")", ".", "format", "(", "v_number", ")", "if", "v_number", "<", "api_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is less than\"", "\" version on Google Fonts ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "api_gfonts_v_number", ")", "if", "v_number", "==", "github_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is equal to\"", "\" version on Google Fonts GitHub repo.\"", "\"\"", ")", ".", "format", "(", "v_number", ")", "if", "v_number", "<", "github_gfonts_v_number", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Version number {} is less than\"", "\" version on Google Fonts GitHub repo ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "github_gfonts_v_number", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Version number {} is greater than\"", "\" version on Google Fonts GitHub ({})\"", "\" and production servers ({}).\"", "\"\"", ")", ".", "format", "(", "v_number", ",", "github_gfonts_v_number", ",", "api_gfonts_v_number", ")" ]
Version number has increased since previous release on Google Fonts?
[ "Version", "number", "has", "increased", "since", "previous", "release", "on", "Google", "Fonts?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2475-L2515
9,824
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_production_glyphs_similarity
def com_google_fonts_check_production_glyphs_similarity(ttFont, api_gfonts_ttFont): """Glyphs are similiar to Google Fonts version?""" def glyphs_surface_area(ttFont): """Calculate the surface area of a glyph's ink""" from fontTools.pens.areaPen import AreaPen glyphs = {} glyph_set = ttFont.getGlyphSet() area_pen = AreaPen(glyph_set) for glyph in glyph_set.keys(): glyph_set[glyph].draw(area_pen) area = area_pen.value area_pen.value = 0 glyphs[glyph] = area return glyphs bad_glyphs = [] these_glyphs = glyphs_surface_area(ttFont) gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont) shared_glyphs = set(these_glyphs) & set(gfonts_glyphs) this_upm = ttFont['head'].unitsPerEm gfonts_upm = api_gfonts_ttFont['head'].unitsPerEm for glyph in shared_glyphs: # Normalize area difference against comparison's upm this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm if abs(this_glyph_area - gfont_glyph_area) > 7000: bad_glyphs.append(glyph) if bad_glyphs: yield WARN, ("Following glyphs differ greatly from" " Google Fonts version: [{}]").format(", ".join(bad_glyphs)) else: yield PASS, ("Glyphs are similar in" " comparison to the Google Fonts version.")
python
def com_google_fonts_check_production_glyphs_similarity(ttFont, api_gfonts_ttFont): """Glyphs are similiar to Google Fonts version?""" def glyphs_surface_area(ttFont): """Calculate the surface area of a glyph's ink""" from fontTools.pens.areaPen import AreaPen glyphs = {} glyph_set = ttFont.getGlyphSet() area_pen = AreaPen(glyph_set) for glyph in glyph_set.keys(): glyph_set[glyph].draw(area_pen) area = area_pen.value area_pen.value = 0 glyphs[glyph] = area return glyphs bad_glyphs = [] these_glyphs = glyphs_surface_area(ttFont) gfonts_glyphs = glyphs_surface_area(api_gfonts_ttFont) shared_glyphs = set(these_glyphs) & set(gfonts_glyphs) this_upm = ttFont['head'].unitsPerEm gfonts_upm = api_gfonts_ttFont['head'].unitsPerEm for glyph in shared_glyphs: # Normalize area difference against comparison's upm this_glyph_area = (these_glyphs[glyph] / this_upm) * gfonts_upm gfont_glyph_area = (gfonts_glyphs[glyph] / gfonts_upm) * this_upm if abs(this_glyph_area - gfont_glyph_area) > 7000: bad_glyphs.append(glyph) if bad_glyphs: yield WARN, ("Following glyphs differ greatly from" " Google Fonts version: [{}]").format(", ".join(bad_glyphs)) else: yield PASS, ("Glyphs are similar in" " comparison to the Google Fonts version.")
[ "def", "com_google_fonts_check_production_glyphs_similarity", "(", "ttFont", ",", "api_gfonts_ttFont", ")", ":", "def", "glyphs_surface_area", "(", "ttFont", ")", ":", "\"\"\"Calculate the surface area of a glyph's ink\"\"\"", "from", "fontTools", ".", "pens", ".", "areaPen", "import", "AreaPen", "glyphs", "=", "{", "}", "glyph_set", "=", "ttFont", ".", "getGlyphSet", "(", ")", "area_pen", "=", "AreaPen", "(", "glyph_set", ")", "for", "glyph", "in", "glyph_set", ".", "keys", "(", ")", ":", "glyph_set", "[", "glyph", "]", ".", "draw", "(", "area_pen", ")", "area", "=", "area_pen", ".", "value", "area_pen", ".", "value", "=", "0", "glyphs", "[", "glyph", "]", "=", "area", "return", "glyphs", "bad_glyphs", "=", "[", "]", "these_glyphs", "=", "glyphs_surface_area", "(", "ttFont", ")", "gfonts_glyphs", "=", "glyphs_surface_area", "(", "api_gfonts_ttFont", ")", "shared_glyphs", "=", "set", "(", "these_glyphs", ")", "&", "set", "(", "gfonts_glyphs", ")", "this_upm", "=", "ttFont", "[", "'head'", "]", ".", "unitsPerEm", "gfonts_upm", "=", "api_gfonts_ttFont", "[", "'head'", "]", ".", "unitsPerEm", "for", "glyph", "in", "shared_glyphs", ":", "# Normalize area difference against comparison's upm", "this_glyph_area", "=", "(", "these_glyphs", "[", "glyph", "]", "/", "this_upm", ")", "*", "gfonts_upm", "gfont_glyph_area", "=", "(", "gfonts_glyphs", "[", "glyph", "]", "/", "gfonts_upm", ")", "*", "this_upm", "if", "abs", "(", "this_glyph_area", "-", "gfont_glyph_area", ")", ">", "7000", ":", "bad_glyphs", ".", "append", "(", "glyph", ")", "if", "bad_glyphs", ":", "yield", "WARN", ",", "(", "\"Following glyphs differ greatly from\"", "\" Google Fonts version: [{}]\"", ")", ".", "format", "(", "\", \"", ".", "join", "(", "bad_glyphs", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"Glyphs are similar in\"", "\" comparison to the Google Fonts version.\"", ")" ]
Glyphs are similiar to Google Fonts version?
[ "Glyphs", "are", "similiar", "to", "Google", "Fonts", "version?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2522-L2562
9,825
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_italic_angle
def com_google_fonts_check_italic_angle(ttFont, style): """Checking post.italicAngle value.""" failed = False value = ttFont["post"].italicAngle # Checking that italicAngle <= 0 if value > 0: failed = True yield FAIL, Message("positive", ("The value of post.italicAngle is positive, which" " is likely a mistake and should become negative," " from {} to {}.").format(value, -value)) # Checking that italicAngle is less than 20° (not good) or 30° (bad) # Also note we invert the value to check it in a clear way if abs(value) > 30: failed = True yield FAIL, Message("over -30 degrees", ("The value of post.italicAngle ({}) is very" " high (over -30°!) and should be" " confirmed.").format(value)) elif abs(value) > 20: failed = True yield WARN, Message("over -20 degrees", ("The value of post.italicAngle ({}) seems very" " high (over -20°!) and should be" " confirmed.").format(value)) # Checking if italicAngle matches font style: if "Italic" in style: if ttFont['post'].italicAngle == 0: failed = True yield FAIL, Message("zero-italic", ("Font is italic, so post.italicAngle" " should be non-zero.")) else: if ttFont["post"].italicAngle != 0: failed = True yield FAIL, Message("non-zero-normal", ("Font is not italic, so post.italicAngle" " should be equal to zero.")) if not failed: yield PASS, ("Value of post.italicAngle is {}" " with style='{}'.").format(value, style)
python
def com_google_fonts_check_italic_angle(ttFont, style): """Checking post.italicAngle value.""" failed = False value = ttFont["post"].italicAngle # Checking that italicAngle <= 0 if value > 0: failed = True yield FAIL, Message("positive", ("The value of post.italicAngle is positive, which" " is likely a mistake and should become negative," " from {} to {}.").format(value, -value)) # Checking that italicAngle is less than 20° (not good) or 30° (bad) # Also note we invert the value to check it in a clear way if abs(value) > 30: failed = True yield FAIL, Message("over -30 degrees", ("The value of post.italicAngle ({}) is very" " high (over -30°!) and should be" " confirmed.").format(value)) elif abs(value) > 20: failed = True yield WARN, Message("over -20 degrees", ("The value of post.italicAngle ({}) seems very" " high (over -20°!) and should be" " confirmed.").format(value)) # Checking if italicAngle matches font style: if "Italic" in style: if ttFont['post'].italicAngle == 0: failed = True yield FAIL, Message("zero-italic", ("Font is italic, so post.italicAngle" " should be non-zero.")) else: if ttFont["post"].italicAngle != 0: failed = True yield FAIL, Message("non-zero-normal", ("Font is not italic, so post.italicAngle" " should be equal to zero.")) if not failed: yield PASS, ("Value of post.italicAngle is {}" " with style='{}'.").format(value, style)
[ "def", "com_google_fonts_check_italic_angle", "(", "ttFont", ",", "style", ")", ":", "failed", "=", "False", "value", "=", "ttFont", "[", "\"post\"", "]", ".", "italicAngle", "# Checking that italicAngle <= 0", "if", "value", ">", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"positive\"", ",", "(", "\"The value of post.italicAngle is positive, which\"", "\" is likely a mistake and should become negative,\"", "\" from {} to {}.\"", ")", ".", "format", "(", "value", ",", "-", "value", ")", ")", "# Checking that italicAngle is less than 20° (not good) or 30° (bad)", "# Also note we invert the value to check it in a clear way", "if", "abs", "(", "value", ")", ">", "30", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"over -30 degrees\"", ",", "(", "\"The value of post.italicAngle ({}) is very\"", "\" high (over -30°!) and should be\"", "\" confirmed.\"", ")", ".", "format", "(", "value", ")", ")", "elif", "abs", "(", "value", ")", ">", "20", ":", "failed", "=", "True", "yield", "WARN", ",", "Message", "(", "\"over -20 degrees\"", ",", "(", "\"The value of post.italicAngle ({}) seems very\"", "\" high (over -20°!) and should be\"", "\" confirmed.\"", ")", ".", "format", "(", "value", ")", ")", "# Checking if italicAngle matches font style:", "if", "\"Italic\"", "in", "style", ":", "if", "ttFont", "[", "'post'", "]", ".", "italicAngle", "==", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"zero-italic\"", ",", "(", "\"Font is italic, so post.italicAngle\"", "\" should be non-zero.\"", ")", ")", "else", ":", "if", "ttFont", "[", "\"post\"", "]", ".", "italicAngle", "!=", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "Message", "(", "\"non-zero-normal\"", ",", "(", "\"Font is not italic, so post.italicAngle\"", "\" should be equal to zero.\"", ")", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Value of post.italicAngle is {}\"", "\" with style='{}'.\"", ")", ".", "format", "(", "value", ",", "style", ")" ]
Checking post.italicAngle value.
[ "Checking", "post", ".", "italicAngle", "value", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2610-L2655
9,826
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_mac_style
def com_google_fonts_check_mac_style(ttFont, style): """Checking head.macStyle value.""" from fontbakery.utils import check_bit_entry from fontbakery.constants import MacStyle # Checking macStyle ITALIC bit: expected = "Italic" in style yield check_bit_entry(ttFont, "head", "macStyle", expected, bitmask=MacStyle.ITALIC, bitname="ITALIC") # Checking macStyle BOLD bit: expected = style in ["Bold", "BoldItalic"] yield check_bit_entry(ttFont, "head", "macStyle", expected, bitmask=MacStyle.BOLD, bitname="BOLD")
python
def com_google_fonts_check_mac_style(ttFont, style): """Checking head.macStyle value.""" from fontbakery.utils import check_bit_entry from fontbakery.constants import MacStyle # Checking macStyle ITALIC bit: expected = "Italic" in style yield check_bit_entry(ttFont, "head", "macStyle", expected, bitmask=MacStyle.ITALIC, bitname="ITALIC") # Checking macStyle BOLD bit: expected = style in ["Bold", "BoldItalic"] yield check_bit_entry(ttFont, "head", "macStyle", expected, bitmask=MacStyle.BOLD, bitname="BOLD")
[ "def", "com_google_fonts_check_mac_style", "(", "ttFont", ",", "style", ")", ":", "from", "fontbakery", ".", "utils", "import", "check_bit_entry", "from", "fontbakery", ".", "constants", "import", "MacStyle", "# Checking macStyle ITALIC bit:", "expected", "=", "\"Italic\"", "in", "style", "yield", "check_bit_entry", "(", "ttFont", ",", "\"head\"", ",", "\"macStyle\"", ",", "expected", ",", "bitmask", "=", "MacStyle", ".", "ITALIC", ",", "bitname", "=", "\"ITALIC\"", ")", "# Checking macStyle BOLD bit:", "expected", "=", "style", "in", "[", "\"Bold\"", ",", "\"BoldItalic\"", "]", "yield", "check_bit_entry", "(", "ttFont", ",", "\"head\"", ",", "\"macStyle\"", ",", "expected", ",", "bitmask", "=", "MacStyle", ".", "BOLD", ",", "bitname", "=", "\"BOLD\"", ")" ]
Checking head.macStyle value.
[ "Checking", "head", ".", "macStyle", "value", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2668-L2685
9,827
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_contour_count
def com_google_fonts_check_contour_count(ttFont): """Check if each glyph has the recommended amount of contours. This check is useful to assure glyphs aren't incorrectly constructed. The desired_glyph_data module contains the 'recommended' countour count for encoded glyphs. The contour counts are derived from fonts which were chosen for their quality and unique design decisions for particular glyphs. In the future, additional glyph data can be included. A good addition would be the 'recommended' anchor counts for each glyph. """ from fontbakery.glyphdata import desired_glyph_data as glyph_data from fontbakery.utils import (get_font_glyph_data, pretty_print_list) # rearrange data structure: desired_glyph_data = {} for glyph in glyph_data: desired_glyph_data[glyph['unicode']] = glyph bad_glyphs = [] desired_glyph_contours = {f: desired_glyph_data[f]['contours'] for f in desired_glyph_data} font_glyph_data = get_font_glyph_data(ttFont) if font_glyph_data is None: yield FAIL, "This font lacks cmap data." else: font_glyph_contours = {f['unicode']: list(f['contours'])[0] for f in font_glyph_data} shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours) for glyph in shared_glyphs: if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]: bad_glyphs.append([glyph, font_glyph_contours[glyph], desired_glyph_contours[glyph]]) if len(bad_glyphs) > 0: cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP).cmap bad_glyphs_name = [("Glyph name: {}\t" "Contours detected: {}\t" "Expected: {}").format(cmap[name], count, pretty_print_list(expected, shorten=None, glue="or")) for name, count, expected in bad_glyphs] yield WARN, (("This check inspects the glyph outlines and detects the" " total number of contours in each of them. The expected" " values are infered from the typical ammounts of" " contours observed in a large collection of reference" " font families. The divergences listed below may simply" " indicate a significantly different design on some of" " your glyphs. On the other hand, some of these may flag" " actual bugs in the font such as glyphs mapped to an" " incorrect codepoint. Please consider reviewing" " the design and codepoint assignment of these to make" " sure they are correct.\n" "\n" "The following glyphs do not have the recommended" " number of contours:\n" "\n{}").format('\n'.join(bad_glyphs_name))) else: yield PASS, "All glyphs have the recommended amount of contours"
python
def com_google_fonts_check_contour_count(ttFont): """Check if each glyph has the recommended amount of contours. This check is useful to assure glyphs aren't incorrectly constructed. The desired_glyph_data module contains the 'recommended' countour count for encoded glyphs. The contour counts are derived from fonts which were chosen for their quality and unique design decisions for particular glyphs. In the future, additional glyph data can be included. A good addition would be the 'recommended' anchor counts for each glyph. """ from fontbakery.glyphdata import desired_glyph_data as glyph_data from fontbakery.utils import (get_font_glyph_data, pretty_print_list) # rearrange data structure: desired_glyph_data = {} for glyph in glyph_data: desired_glyph_data[glyph['unicode']] = glyph bad_glyphs = [] desired_glyph_contours = {f: desired_glyph_data[f]['contours'] for f in desired_glyph_data} font_glyph_data = get_font_glyph_data(ttFont) if font_glyph_data is None: yield FAIL, "This font lacks cmap data." else: font_glyph_contours = {f['unicode']: list(f['contours'])[0] for f in font_glyph_data} shared_glyphs = set(desired_glyph_contours) & set(font_glyph_contours) for glyph in shared_glyphs: if font_glyph_contours[glyph] not in desired_glyph_contours[glyph]: bad_glyphs.append([glyph, font_glyph_contours[glyph], desired_glyph_contours[glyph]]) if len(bad_glyphs) > 0: cmap = ttFont['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP).cmap bad_glyphs_name = [("Glyph name: {}\t" "Contours detected: {}\t" "Expected: {}").format(cmap[name], count, pretty_print_list(expected, shorten=None, glue="or")) for name, count, expected in bad_glyphs] yield WARN, (("This check inspects the glyph outlines and detects the" " total number of contours in each of them. The expected" " values are infered from the typical ammounts of" " contours observed in a large collection of reference" " font families. The divergences listed below may simply" " indicate a significantly different design on some of" " your glyphs. On the other hand, some of these may flag" " actual bugs in the font such as glyphs mapped to an" " incorrect codepoint. Please consider reviewing" " the design and codepoint assignment of these to make" " sure they are correct.\n" "\n" "The following glyphs do not have the recommended" " number of contours:\n" "\n{}").format('\n'.join(bad_glyphs_name))) else: yield PASS, "All glyphs have the recommended amount of contours"
[ "def", "com_google_fonts_check_contour_count", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "glyphdata", "import", "desired_glyph_data", "as", "glyph_data", "from", "fontbakery", ".", "utils", "import", "(", "get_font_glyph_data", ",", "pretty_print_list", ")", "# rearrange data structure:", "desired_glyph_data", "=", "{", "}", "for", "glyph", "in", "glyph_data", ":", "desired_glyph_data", "[", "glyph", "[", "'unicode'", "]", "]", "=", "glyph", "bad_glyphs", "=", "[", "]", "desired_glyph_contours", "=", "{", "f", ":", "desired_glyph_data", "[", "f", "]", "[", "'contours'", "]", "for", "f", "in", "desired_glyph_data", "}", "font_glyph_data", "=", "get_font_glyph_data", "(", "ttFont", ")", "if", "font_glyph_data", "is", "None", ":", "yield", "FAIL", ",", "\"This font lacks cmap data.\"", "else", ":", "font_glyph_contours", "=", "{", "f", "[", "'unicode'", "]", ":", "list", "(", "f", "[", "'contours'", "]", ")", "[", "0", "]", "for", "f", "in", "font_glyph_data", "}", "shared_glyphs", "=", "set", "(", "desired_glyph_contours", ")", "&", "set", "(", "font_glyph_contours", ")", "for", "glyph", "in", "shared_glyphs", ":", "if", "font_glyph_contours", "[", "glyph", "]", "not", "in", "desired_glyph_contours", "[", "glyph", "]", ":", "bad_glyphs", ".", "append", "(", "[", "glyph", ",", "font_glyph_contours", "[", "glyph", "]", ",", "desired_glyph_contours", "[", "glyph", "]", "]", ")", "if", "len", "(", "bad_glyphs", ")", ">", "0", ":", "cmap", "=", "ttFont", "[", "'cmap'", "]", ".", "getcmap", "(", "PlatformID", ".", "WINDOWS", ",", "WindowsEncodingID", ".", "UNICODE_BMP", ")", ".", "cmap", "bad_glyphs_name", "=", "[", "(", "\"Glyph name: {}\\t\"", "\"Contours detected: {}\\t\"", "\"Expected: {}\"", ")", ".", "format", "(", "cmap", "[", "name", "]", ",", "count", ",", "pretty_print_list", "(", "expected", ",", "shorten", "=", "None", ",", "glue", "=", "\"or\"", ")", ")", "for", "name", ",", "count", ",", "expected", "in", "bad_glyphs", "]", "yield", "WARN", ",", "(", "(", "\"This check inspects the glyph outlines and detects the\"", "\" total number of contours in each of them. The expected\"", "\" values are infered from the typical ammounts of\"", "\" contours observed in a large collection of reference\"", "\" font families. The divergences listed below may simply\"", "\" indicate a significantly different design on some of\"", "\" your glyphs. On the other hand, some of these may flag\"", "\" actual bugs in the font such as glyphs mapped to an\"", "\" incorrect codepoint. Please consider reviewing\"", "\" the design and codepoint assignment of these to make\"", "\" sure they are correct.\\n\"", "\"\\n\"", "\"The following glyphs do not have the recommended\"", "\" number of contours:\\n\"", "\"\\n{}\"", ")", ".", "format", "(", "'\\n'", ".", "join", "(", "bad_glyphs_name", ")", ")", ")", "else", ":", "yield", "PASS", ",", "\"All glyphs have the recommended amount of contours\"" ]
Check if each glyph has the recommended amount of contours. This check is useful to assure glyphs aren't incorrectly constructed. The desired_glyph_data module contains the 'recommended' countour count for encoded glyphs. The contour counts are derived from fonts which were chosen for their quality and unique design decisions for particular glyphs. In the future, additional glyph data can be included. A good addition would be the 'recommended' anchor counts for each glyph.
[ "Check", "if", "each", "glyph", "has", "the", "recommended", "amount", "of", "contours", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2706-L2772
9,828
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_metadata_nameid_copyright
def com_google_fonts_check_metadata_nameid_copyright(ttFont, font_metadata): """Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?""" failed = False for nameRecord in ttFont['name'].names: string = nameRecord.string.decode(nameRecord.getEncoding()) if nameRecord.nameID == NameID.COPYRIGHT_NOTICE and\ string != font_metadata.copyright: failed = True yield FAIL, ("Copyright field for this font on METADATA.pb ('{}')" " differs from a copyright notice entry" " on the name table:" " '{}'").format(font_metadata.copyright, string) if not failed: yield PASS, ("Copyright field for this font on METADATA.pb matches" " copyright notice entries on the name table.")
python
def com_google_fonts_check_metadata_nameid_copyright(ttFont, font_metadata): """Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?""" failed = False for nameRecord in ttFont['name'].names: string = nameRecord.string.decode(nameRecord.getEncoding()) if nameRecord.nameID == NameID.COPYRIGHT_NOTICE and\ string != font_metadata.copyright: failed = True yield FAIL, ("Copyright field for this font on METADATA.pb ('{}')" " differs from a copyright notice entry" " on the name table:" " '{}'").format(font_metadata.copyright, string) if not failed: yield PASS, ("Copyright field for this font on METADATA.pb matches" " copyright notice entries on the name table.")
[ "def", "com_google_fonts_check_metadata_nameid_copyright", "(", "ttFont", ",", "font_metadata", ")", ":", "failed", "=", "False", "for", "nameRecord", "in", "ttFont", "[", "'name'", "]", ".", "names", ":", "string", "=", "nameRecord", ".", "string", ".", "decode", "(", "nameRecord", ".", "getEncoding", "(", ")", ")", "if", "nameRecord", ".", "nameID", "==", "NameID", ".", "COPYRIGHT_NOTICE", "and", "string", "!=", "font_metadata", ".", "copyright", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Copyright field for this font on METADATA.pb ('{}')\"", "\" differs from a copyright notice entry\"", "\" on the name table:\"", "\" '{}'\"", ")", ".", "format", "(", "font_metadata", ".", "copyright", ",", "string", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"Copyright field for this font on METADATA.pb matches\"", "\" copyright notice entries on the name table.\"", ")" ]
Copyright field for this font on METADATA.pb matches all copyright notice entries on the name table ?
[ "Copyright", "field", "for", "this", "font", "on", "METADATA", ".", "pb", "matches", "all", "copyright", "notice", "entries", "on", "the", "name", "table", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2800-L2816
9,829
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_name_mandatory_entries
def com_google_fonts_check_name_mandatory_entries(ttFont, style): """Font has all mandatory 'name' table entries ?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import RIBBI_STYLE_NAMES required_nameIDs = [NameID.FONT_FAMILY_NAME, NameID.FONT_SUBFAMILY_NAME, NameID.FULL_FONT_NAME, NameID.POSTSCRIPT_NAME] if style not in RIBBI_STYLE_NAMES: required_nameIDs += [NameID.TYPOGRAPHIC_FAMILY_NAME, NameID.TYPOGRAPHIC_SUBFAMILY_NAME] failed = False # The font must have at least these name IDs: for nameId in required_nameIDs: if len(get_name_entry_strings(ttFont, nameId)) == 0: failed = True yield FAIL, (f"Font lacks entry with nameId={nameId}" f" ({NameID(nameId).name})") if not failed: yield PASS, "Font contains values for all mandatory name table entries."
python
def com_google_fonts_check_name_mandatory_entries(ttFont, style): """Font has all mandatory 'name' table entries ?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import RIBBI_STYLE_NAMES required_nameIDs = [NameID.FONT_FAMILY_NAME, NameID.FONT_SUBFAMILY_NAME, NameID.FULL_FONT_NAME, NameID.POSTSCRIPT_NAME] if style not in RIBBI_STYLE_NAMES: required_nameIDs += [NameID.TYPOGRAPHIC_FAMILY_NAME, NameID.TYPOGRAPHIC_SUBFAMILY_NAME] failed = False # The font must have at least these name IDs: for nameId in required_nameIDs: if len(get_name_entry_strings(ttFont, nameId)) == 0: failed = True yield FAIL, (f"Font lacks entry with nameId={nameId}" f" ({NameID(nameId).name})") if not failed: yield PASS, "Font contains values for all mandatory name table entries."
[ "def", "com_google_fonts_check_name_mandatory_entries", "(", "ttFont", ",", "style", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entry_strings", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "required_nameIDs", "=", "[", "NameID", ".", "FONT_FAMILY_NAME", ",", "NameID", ".", "FONT_SUBFAMILY_NAME", ",", "NameID", ".", "FULL_FONT_NAME", ",", "NameID", ".", "POSTSCRIPT_NAME", "]", "if", "style", "not", "in", "RIBBI_STYLE_NAMES", ":", "required_nameIDs", "+=", "[", "NameID", ".", "TYPOGRAPHIC_FAMILY_NAME", ",", "NameID", ".", "TYPOGRAPHIC_SUBFAMILY_NAME", "]", "failed", "=", "False", "# The font must have at least these name IDs:", "for", "nameId", "in", "required_nameIDs", ":", "if", "len", "(", "get_name_entry_strings", "(", "ttFont", ",", "nameId", ")", ")", "==", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "f\"Font lacks entry with nameId={nameId}\"", "f\" ({NameID(nameId).name})\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"Font contains values for all mandatory name table entries.\"" ]
Font has all mandatory 'name' table entries ?
[ "Font", "has", "all", "mandatory", "name", "table", "entries", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L2868-L2888
9,830
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_name_copyright_length
def com_google_fonts_check_name_copyright_length(ttFont): """ Length of copyright notice must not exceed 500 characters. """ from fontbakery.utils import get_name_entries failed = False for notice in get_name_entries(ttFont, NameID.COPYRIGHT_NOTICE): notice_str = notice.string.decode(notice.getEncoding()) if len(notice_str) > 500: failed = True yield FAIL, ("The length of the following copyright notice ({})" " exceeds 500 chars: '{}'" "").format(len(notice_str), notice_str) if not failed: yield PASS, ("All copyright notice name entries on the" " 'name' table are shorter than 500 characters.")
python
def com_google_fonts_check_name_copyright_length(ttFont): """ Length of copyright notice must not exceed 500 characters. """ from fontbakery.utils import get_name_entries failed = False for notice in get_name_entries(ttFont, NameID.COPYRIGHT_NOTICE): notice_str = notice.string.decode(notice.getEncoding()) if len(notice_str) > 500: failed = True yield FAIL, ("The length of the following copyright notice ({})" " exceeds 500 chars: '{}'" "").format(len(notice_str), notice_str) if not failed: yield PASS, ("All copyright notice name entries on the" " 'name' table are shorter than 500 characters.")
[ "def", "com_google_fonts_check_name_copyright_length", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_name_entries", "failed", "=", "False", "for", "notice", "in", "get_name_entries", "(", "ttFont", ",", "NameID", ".", "COPYRIGHT_NOTICE", ")", ":", "notice_str", "=", "notice", ".", "string", ".", "decode", "(", "notice", ".", "getEncoding", "(", ")", ")", "if", "len", "(", "notice_str", ")", ">", "500", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"The length of the following copyright notice ({})\"", "\" exceeds 500 chars: '{}'\"", "\"\"", ")", ".", "format", "(", "len", "(", "notice_str", ")", ",", "notice_str", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "(", "\"All copyright notice name entries on the\"", "\" 'name' table are shorter than 500 characters.\"", ")" ]
Length of copyright notice must not exceed 500 characters.
[ "Length", "of", "copyright", "notice", "must", "not", "exceed", "500", "characters", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3181-L3196
9,831
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_fontv
def com_google_fonts_check_fontv(ttFont): """ Check for font-v versioning """ from fontv.libfv import FontVersion fv = FontVersion(ttFont) if fv.version and (fv.is_development or fv.is_release): yield PASS, "Font version string looks GREAT!" else: yield INFO, ("Version string is: \"{}\"\n" "The version string must ideally include a git commit hash" " and either a 'dev' or a 'release' suffix such as in the" " example below:\n" "\"Version 1.3; git-0d08353-release\"" "").format(fv.get_name_id5_version_string())
python
def com_google_fonts_check_fontv(ttFont): """ Check for font-v versioning """ from fontv.libfv import FontVersion fv = FontVersion(ttFont) if fv.version and (fv.is_development or fv.is_release): yield PASS, "Font version string looks GREAT!" else: yield INFO, ("Version string is: \"{}\"\n" "The version string must ideally include a git commit hash" " and either a 'dev' or a 'release' suffix such as in the" " example below:\n" "\"Version 1.3; git-0d08353-release\"" "").format(fv.get_name_id5_version_string())
[ "def", "com_google_fonts_check_fontv", "(", "ttFont", ")", ":", "from", "fontv", ".", "libfv", "import", "FontVersion", "fv", "=", "FontVersion", "(", "ttFont", ")", "if", "fv", ".", "version", "and", "(", "fv", ".", "is_development", "or", "fv", ".", "is_release", ")", ":", "yield", "PASS", ",", "\"Font version string looks GREAT!\"", "else", ":", "yield", "INFO", ",", "(", "\"Version string is: \\\"{}\\\"\\n\"", "\"The version string must ideally include a git commit hash\"", "\" and either a 'dev' or a 'release' suffix such as in the\"", "\" example below:\\n\"", "\"\\\"Version 1.3; git-0d08353-release\\\"\"", "\"\"", ")", ".", "format", "(", "fv", ".", "get_name_id5_version_string", "(", ")", ")" ]
Check for font-v versioning
[ "Check", "for", "font", "-", "v", "versioning" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3239-L3252
9,832
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_negative_advance_width
def com_google_fonts_check_negative_advance_width(ttFont): """ Check that advance widths cannot be inferred as negative. """ failed = False for glyphName in ttFont["glyf"].glyphs: coords = ttFont["glyf"][glyphName].coordinates rightX = coords[-3][0] leftX = coords[-4][0] advwidth = rightX - leftX if advwidth < 0: failed = True yield FAIL, ("glyph '{}' has bad coordinates on the glyf table," " which may lead to the advance width to be" " interpreted as a negative" " value ({}).").format(glyphName, advwidth) if not failed: yield PASS, "The x-coordinates of all glyphs look good."
python
def com_google_fonts_check_negative_advance_width(ttFont): """ Check that advance widths cannot be inferred as negative. """ failed = False for glyphName in ttFont["glyf"].glyphs: coords = ttFont["glyf"][glyphName].coordinates rightX = coords[-3][0] leftX = coords[-4][0] advwidth = rightX - leftX if advwidth < 0: failed = True yield FAIL, ("glyph '{}' has bad coordinates on the glyf table," " which may lead to the advance width to be" " interpreted as a negative" " value ({}).").format(glyphName, advwidth) if not failed: yield PASS, "The x-coordinates of all glyphs look good."
[ "def", "com_google_fonts_check_negative_advance_width", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "glyphName", "in", "ttFont", "[", "\"glyf\"", "]", ".", "glyphs", ":", "coords", "=", "ttFont", "[", "\"glyf\"", "]", "[", "glyphName", "]", ".", "coordinates", "rightX", "=", "coords", "[", "-", "3", "]", "[", "0", "]", "leftX", "=", "coords", "[", "-", "4", "]", "[", "0", "]", "advwidth", "=", "rightX", "-", "leftX", "if", "advwidth", "<", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"glyph '{}' has bad coordinates on the glyf table,\"", "\" which may lead to the advance width to be\"", "\" interpreted as a negative\"", "\" value ({}).\"", ")", ".", "format", "(", "glyphName", ",", "advwidth", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"The x-coordinates of all glyphs look good.\"" ]
Check that advance widths cannot be inferred as negative.
[ "Check", "that", "advance", "widths", "cannot", "be", "inferred", "as", "negative", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3284-L3300
9,833
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_varfont_generate_static
def com_google_fonts_check_varfont_generate_static(ttFont): """ Check a static ttf can be generated from a variable font. """ import tempfile from fontTools.varLib import mutator try: loc = {k.axisTag: float((k.maxValue + k.minValue) / 2) for k in ttFont['fvar'].axes} with tempfile.TemporaryFile() as instance: font = mutator.instantiateVariableFont(ttFont, loc) font.save(instance) yield PASS, ("fontTools.varLib.mutator generated a static font " "instance") except Exception as e: yield FAIL, ("fontTools.varLib.mutator failed to generated a static font " "instance\n{}".format(repr(e)))
python
def com_google_fonts_check_varfont_generate_static(ttFont): """ Check a static ttf can be generated from a variable font. """ import tempfile from fontTools.varLib import mutator try: loc = {k.axisTag: float((k.maxValue + k.minValue) / 2) for k in ttFont['fvar'].axes} with tempfile.TemporaryFile() as instance: font = mutator.instantiateVariableFont(ttFont, loc) font.save(instance) yield PASS, ("fontTools.varLib.mutator generated a static font " "instance") except Exception as e: yield FAIL, ("fontTools.varLib.mutator failed to generated a static font " "instance\n{}".format(repr(e)))
[ "def", "com_google_fonts_check_varfont_generate_static", "(", "ttFont", ")", ":", "import", "tempfile", "from", "fontTools", ".", "varLib", "import", "mutator", "try", ":", "loc", "=", "{", "k", ".", "axisTag", ":", "float", "(", "(", "k", ".", "maxValue", "+", "k", ".", "minValue", ")", "/", "2", ")", "for", "k", "in", "ttFont", "[", "'fvar'", "]", ".", "axes", "}", "with", "tempfile", ".", "TemporaryFile", "(", ")", "as", "instance", ":", "font", "=", "mutator", ".", "instantiateVariableFont", "(", "ttFont", ",", "loc", ")", "font", ".", "save", "(", "instance", ")", "yield", "PASS", ",", "(", "\"fontTools.varLib.mutator generated a static font \"", "\"instance\"", ")", "except", "Exception", "as", "e", ":", "yield", "FAIL", ",", "(", "\"fontTools.varLib.mutator failed to generated a static font \"", "\"instance\\n{}\"", ".", "format", "(", "repr", "(", "e", ")", ")", ")" ]
Check a static ttf can be generated from a variable font.
[ "Check", "a", "static", "ttf", "can", "be", "generated", "from", "a", "variable", "font", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3326-L3341
9,834
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_smart_dropout
def com_google_fonts_check_smart_dropout(ttFont): """Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control. """ INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ("prep" in ttFont and INSTRUCTIONS in ttFont["prep"].program.getBytecode()): yield PASS, ("'prep' table contains instructions" " enabling smart dropout control.") else: yield FAIL, ("'prep' table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script.")
python
def com_google_fonts_check_smart_dropout(ttFont): """Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control. """ INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ("prep" in ttFont and INSTRUCTIONS in ttFont["prep"].program.getBytecode()): yield PASS, ("'prep' table contains instructions" " enabling smart dropout control.") else: yield FAIL, ("'prep' table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script.")
[ "def", "com_google_fonts_check_smart_dropout", "(", "ttFont", ")", ":", "INSTRUCTIONS", "=", "b\"\\xb8\\x01\\xff\\x85\\xb0\\x04\\x8d\"", "if", "(", "\"prep\"", "in", "ttFont", "and", "INSTRUCTIONS", "in", "ttFont", "[", "\"prep\"", "]", ".", "program", ".", "getBytecode", "(", ")", ")", ":", "yield", "PASS", ",", "(", "\"'prep' table contains instructions\"", "\" enabling smart dropout control.\"", ")", "else", ":", "yield", "FAIL", ",", "(", "\"'prep' table does not contain TrueType \"", "\" instructions enabling smart dropout control.\"", "\" To fix, export the font with autohinting enabled,\"", "\" or run ttfautohint on the font, or run the \"", "\" `gftools fix-nonhinting` script.\"", ")" ]
Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control.
[ "Font", "enables", "smart", "dropout", "control", "in", "prep", "table", "instructions?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3417-L3450
9,835
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_aat
def com_google_fonts_check_aat(ttFont): """Are there unwanted Apple tables?""" UNWANTED_TABLES = { 'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc', 'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just', 'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop', 'trak', 'xref' } unwanted_tables_found = [] for table in ttFont.keys(): if table in UNWANTED_TABLES: unwanted_tables_found.append(table) if len(unwanted_tables_found) > 0: yield FAIL, ("Unwanted AAT tables were found" " in the font and should be removed, either by" " fonttools/ttx or by editing them using the tool" " they built with:" " {}").format(", ".join(unwanted_tables_found)) else: yield PASS, "There are no unwanted AAT tables."
python
def com_google_fonts_check_aat(ttFont): """Are there unwanted Apple tables?""" UNWANTED_TABLES = { 'EBSC', 'Zaph', 'acnt', 'ankr', 'bdat', 'bhed', 'bloc', 'bmap', 'bsln', 'fdsc', 'feat', 'fond', 'gcid', 'just', 'kerx', 'lcar', 'ltag', 'mort', 'morx', 'opbd', 'prop', 'trak', 'xref' } unwanted_tables_found = [] for table in ttFont.keys(): if table in UNWANTED_TABLES: unwanted_tables_found.append(table) if len(unwanted_tables_found) > 0: yield FAIL, ("Unwanted AAT tables were found" " in the font and should be removed, either by" " fonttools/ttx or by editing them using the tool" " they built with:" " {}").format(", ".join(unwanted_tables_found)) else: yield PASS, "There are no unwanted AAT tables."
[ "def", "com_google_fonts_check_aat", "(", "ttFont", ")", ":", "UNWANTED_TABLES", "=", "{", "'EBSC'", ",", "'Zaph'", ",", "'acnt'", ",", "'ankr'", ",", "'bdat'", ",", "'bhed'", ",", "'bloc'", ",", "'bmap'", ",", "'bsln'", ",", "'fdsc'", ",", "'feat'", ",", "'fond'", ",", "'gcid'", ",", "'just'", ",", "'kerx'", ",", "'lcar'", ",", "'ltag'", ",", "'mort'", ",", "'morx'", ",", "'opbd'", ",", "'prop'", ",", "'trak'", ",", "'xref'", "}", "unwanted_tables_found", "=", "[", "]", "for", "table", "in", "ttFont", ".", "keys", "(", ")", ":", "if", "table", "in", "UNWANTED_TABLES", ":", "unwanted_tables_found", ".", "append", "(", "table", ")", "if", "len", "(", "unwanted_tables_found", ")", ">", "0", ":", "yield", "FAIL", ",", "(", "\"Unwanted AAT tables were found\"", "\" in the font and should be removed, either by\"", "\" fonttools/ttx or by editing them using the tool\"", "\" they built with:\"", "\" {}\"", ")", ".", "format", "(", "\", \"", ".", "join", "(", "unwanted_tables_found", ")", ")", "else", ":", "yield", "PASS", ",", "\"There are no unwanted AAT tables.\"" ]
Are there unwanted Apple tables?
[ "Are", "there", "unwanted", "Apple", "tables?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3477-L3497
9,836
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_fvar_name_entries
def com_google_fonts_check_fvar_name_entries(ttFont): """All name entries referenced by fvar instances exist on the name table?""" failed = False for instance in ttFont["fvar"].instances: entries = [entry for entry in ttFont["name"].names if entry.nameID == instance.subfamilyNameID] if len(entries) == 0: failed = True yield FAIL, (f"Named instance with coordinates {instance.coordinates}" f" lacks an entry on the name table (nameID={instance.subfamilyNameID}).") if not failed: yield PASS, "OK"
python
def com_google_fonts_check_fvar_name_entries(ttFont): """All name entries referenced by fvar instances exist on the name table?""" failed = False for instance in ttFont["fvar"].instances: entries = [entry for entry in ttFont["name"].names if entry.nameID == instance.subfamilyNameID] if len(entries) == 0: failed = True yield FAIL, (f"Named instance with coordinates {instance.coordinates}" f" lacks an entry on the name table (nameID={instance.subfamilyNameID}).") if not failed: yield PASS, "OK"
[ "def", "com_google_fonts_check_fvar_name_entries", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "instance", "in", "ttFont", "[", "\"fvar\"", "]", ".", "instances", ":", "entries", "=", "[", "entry", "for", "entry", "in", "ttFont", "[", "\"name\"", "]", ".", "names", "if", "entry", ".", "nameID", "==", "instance", ".", "subfamilyNameID", "]", "if", "len", "(", "entries", ")", "==", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "f\"Named instance with coordinates {instance.coordinates}\"", "f\" lacks an entry on the name table (nameID={instance.subfamilyNameID}).\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"OK\"" ]
All name entries referenced by fvar instances exist on the name table?
[ "All", "name", "entries", "referenced", "by", "fvar", "instances", "exist", "on", "the", "name", "table?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3509-L3522
9,837
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_varfont_weight_instances
def com_google_fonts_check_varfont_weight_instances(ttFont): """Variable font weight coordinates must be multiples of 100.""" failed = False for instance in ttFont["fvar"].instances: if 'wght' in instance.coordinates and instance.coordinates['wght'] % 100 != 0: failed = True yield FAIL, ("Found an variable font instance with" f" 'wght'={instance.coordinates['wght']}." " This should instead be a multiple of 100.") if not failed: yield PASS, "OK"
python
def com_google_fonts_check_varfont_weight_instances(ttFont): """Variable font weight coordinates must be multiples of 100.""" failed = False for instance in ttFont["fvar"].instances: if 'wght' in instance.coordinates and instance.coordinates['wght'] % 100 != 0: failed = True yield FAIL, ("Found an variable font instance with" f" 'wght'={instance.coordinates['wght']}." " This should instead be a multiple of 100.") if not failed: yield PASS, "OK"
[ "def", "com_google_fonts_check_varfont_weight_instances", "(", "ttFont", ")", ":", "failed", "=", "False", "for", "instance", "in", "ttFont", "[", "\"fvar\"", "]", ".", "instances", ":", "if", "'wght'", "in", "instance", ".", "coordinates", "and", "instance", ".", "coordinates", "[", "'wght'", "]", "%", "100", "!=", "0", ":", "failed", "=", "True", "yield", "FAIL", ",", "(", "\"Found an variable font instance with\"", "f\" 'wght'={instance.coordinates['wght']}.\"", "\" This should instead be a multiple of 100.\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"OK\"" ]
Variable font weight coordinates must be multiples of 100.
[ "Variable", "font", "weight", "coordinates", "must", "be", "multiples", "of", "100", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3549-L3561
9,838
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_family_tnum_horizontal_metrics
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts): """All tabular figures must have the same width across the RIBBI-family.""" from fontbakery.constants import RIBBI_STYLE_NAMES from fontTools.ttLib import TTFont RIBBI_ttFonts = [TTFont(f) for f in fonts if style(f) in RIBBI_STYLE_NAMES] tnum_widths = {} for ttFont in RIBBI_ttFonts: glyphs = ttFont.getGlyphSet() tnum_glyphs = [(glyph_id, glyphs[glyph_id]) for glyph_id in glyphs.keys() if glyph_id.endswith(".tnum")] for glyph_id, glyph in tnum_glyphs: if glyph.width not in tnum_widths: tnum_widths[glyph.width] = [glyph_id] else: tnum_widths[glyph.width].append(glyph_id) if len(tnum_widths.keys()) > 1: max_num = 0 most_common_width = None for width, glyphs in tnum_widths.items(): if len(glyphs) > max_num: max_num = len(glyphs) most_common_width = width del tnum_widths[most_common_width] yield FAIL, (f"The most common tabular glyph width is {most_common_width}." " But there are other tabular glyphs with different widths" f" such as the following ones:\n\t{tnum_widths}.") else: yield PASS, "OK"
python
def com_google_fonts_check_family_tnum_horizontal_metrics(fonts): """All tabular figures must have the same width across the RIBBI-family.""" from fontbakery.constants import RIBBI_STYLE_NAMES from fontTools.ttLib import TTFont RIBBI_ttFonts = [TTFont(f) for f in fonts if style(f) in RIBBI_STYLE_NAMES] tnum_widths = {} for ttFont in RIBBI_ttFonts: glyphs = ttFont.getGlyphSet() tnum_glyphs = [(glyph_id, glyphs[glyph_id]) for glyph_id in glyphs.keys() if glyph_id.endswith(".tnum")] for glyph_id, glyph in tnum_glyphs: if glyph.width not in tnum_widths: tnum_widths[glyph.width] = [glyph_id] else: tnum_widths[glyph.width].append(glyph_id) if len(tnum_widths.keys()) > 1: max_num = 0 most_common_width = None for width, glyphs in tnum_widths.items(): if len(glyphs) > max_num: max_num = len(glyphs) most_common_width = width del tnum_widths[most_common_width] yield FAIL, (f"The most common tabular glyph width is {most_common_width}." " But there are other tabular glyphs with different widths" f" such as the following ones:\n\t{tnum_widths}.") else: yield PASS, "OK"
[ "def", "com_google_fonts_check_family_tnum_horizontal_metrics", "(", "fonts", ")", ":", "from", "fontbakery", ".", "constants", "import", "RIBBI_STYLE_NAMES", "from", "fontTools", ".", "ttLib", "import", "TTFont", "RIBBI_ttFonts", "=", "[", "TTFont", "(", "f", ")", "for", "f", "in", "fonts", "if", "style", "(", "f", ")", "in", "RIBBI_STYLE_NAMES", "]", "tnum_widths", "=", "{", "}", "for", "ttFont", "in", "RIBBI_ttFonts", ":", "glyphs", "=", "ttFont", ".", "getGlyphSet", "(", ")", "tnum_glyphs", "=", "[", "(", "glyph_id", ",", "glyphs", "[", "glyph_id", "]", ")", "for", "glyph_id", "in", "glyphs", ".", "keys", "(", ")", "if", "glyph_id", ".", "endswith", "(", "\".tnum\"", ")", "]", "for", "glyph_id", ",", "glyph", "in", "tnum_glyphs", ":", "if", "glyph", ".", "width", "not", "in", "tnum_widths", ":", "tnum_widths", "[", "glyph", ".", "width", "]", "=", "[", "glyph_id", "]", "else", ":", "tnum_widths", "[", "glyph", ".", "width", "]", ".", "append", "(", "glyph_id", ")", "if", "len", "(", "tnum_widths", ".", "keys", "(", ")", ")", ">", "1", ":", "max_num", "=", "0", "most_common_width", "=", "None", "for", "width", ",", "glyphs", "in", "tnum_widths", ".", "items", "(", ")", ":", "if", "len", "(", "glyphs", ")", ">", "max_num", ":", "max_num", "=", "len", "(", "glyphs", ")", "most_common_width", "=", "width", "del", "tnum_widths", "[", "most_common_width", "]", "yield", "FAIL", ",", "(", "f\"The most common tabular glyph width is {most_common_width}.\"", "\" But there are other tabular glyphs with different widths\"", "f\" such as the following ones:\\n\\t{tnum_widths}.\"", ")", "else", ":", "yield", "PASS", ",", "\"OK\"" ]
All tabular figures must have the same width across the RIBBI-family.
[ "All", "tabular", "figures", "must", "have", "the", "same", "width", "across", "the", "RIBBI", "-", "family", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3579-L3611
9,839
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_ligature_carets
def com_google_fonts_check_ligature_carets(ttFont, ligature_glyphs): """Are there caret positions declared for every ligature?""" if ligature_glyphs == -1: yield FAIL, Message("malformed", "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596") elif "GDEF" not in ttFont: yield WARN, Message("GDEF-missing", ("GDEF table is missing, but it is mandatory" " to declare it on fonts that provide ligature" " glyphs because the caret (text cursor)" " positioning for each ligature must be" " provided in this table.")) else: lig_caret_list = ttFont["GDEF"].table.LigCaretList if lig_caret_list is None: missing = set(ligature_glyphs) else: missing = set(ligature_glyphs) - set(lig_caret_list.Coverage.glyphs) if lig_caret_list is None or lig_caret_list.LigGlyphCount == 0: yield WARN, Message("lacks-caret-pos", ("This font lacks caret position values for" " ligature glyphs on its GDEF table.")) elif missing: missing = "\n\t- ".join(missing) yield WARN, Message("incomplete-caret-pos-data", ("This font lacks caret positioning" " values for these ligature glyphs:" f"\n\t- {missing}\n\n ")) else: yield PASS, "Looks good!"
python
def com_google_fonts_check_ligature_carets(ttFont, ligature_glyphs): """Are there caret positions declared for every ligature?""" if ligature_glyphs == -1: yield FAIL, Message("malformed", "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596") elif "GDEF" not in ttFont: yield WARN, Message("GDEF-missing", ("GDEF table is missing, but it is mandatory" " to declare it on fonts that provide ligature" " glyphs because the caret (text cursor)" " positioning for each ligature must be" " provided in this table.")) else: lig_caret_list = ttFont["GDEF"].table.LigCaretList if lig_caret_list is None: missing = set(ligature_glyphs) else: missing = set(ligature_glyphs) - set(lig_caret_list.Coverage.glyphs) if lig_caret_list is None or lig_caret_list.LigGlyphCount == 0: yield WARN, Message("lacks-caret-pos", ("This font lacks caret position values for" " ligature glyphs on its GDEF table.")) elif missing: missing = "\n\t- ".join(missing) yield WARN, Message("incomplete-caret-pos-data", ("This font lacks caret positioning" " values for these ligature glyphs:" f"\n\t- {missing}\n\n ")) else: yield PASS, "Looks good!"
[ "def", "com_google_fonts_check_ligature_carets", "(", "ttFont", ",", "ligature_glyphs", ")", ":", "if", "ligature_glyphs", "==", "-", "1", ":", "yield", "FAIL", ",", "Message", "(", "\"malformed\"", ",", "\"Failed to lookup ligatures.\"", "\" This font file seems to be malformed.\"", "\" For more info, read:\"", "\" https://github.com\"", "\"/googlefonts/fontbakery/issues/1596\"", ")", "elif", "\"GDEF\"", "not", "in", "ttFont", ":", "yield", "WARN", ",", "Message", "(", "\"GDEF-missing\"", ",", "(", "\"GDEF table is missing, but it is mandatory\"", "\" to declare it on fonts that provide ligature\"", "\" glyphs because the caret (text cursor)\"", "\" positioning for each ligature must be\"", "\" provided in this table.\"", ")", ")", "else", ":", "lig_caret_list", "=", "ttFont", "[", "\"GDEF\"", "]", ".", "table", ".", "LigCaretList", "if", "lig_caret_list", "is", "None", ":", "missing", "=", "set", "(", "ligature_glyphs", ")", "else", ":", "missing", "=", "set", "(", "ligature_glyphs", ")", "-", "set", "(", "lig_caret_list", ".", "Coverage", ".", "glyphs", ")", "if", "lig_caret_list", "is", "None", "or", "lig_caret_list", ".", "LigGlyphCount", "==", "0", ":", "yield", "WARN", ",", "Message", "(", "\"lacks-caret-pos\"", ",", "(", "\"This font lacks caret position values for\"", "\" ligature glyphs on its GDEF table.\"", ")", ")", "elif", "missing", ":", "missing", "=", "\"\\n\\t- \"", ".", "join", "(", "missing", ")", "yield", "WARN", ",", "Message", "(", "\"incomplete-caret-pos-data\"", ",", "(", "\"This font lacks caret positioning\"", "\" values for these ligature glyphs:\"", "f\"\\n\\t- {missing}\\n\\n \"", ")", ")", "else", ":", "yield", "PASS", ",", "\"Looks good!\"" ]
Are there caret positions declared for every ligature?
[ "Are", "there", "caret", "positions", "declared", "for", "every", "ligature?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3660-L3693
9,840
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_kerning_for_non_ligated_sequences
def com_google_fonts_check_kerning_for_non_ligated_sequences(ttFont, ligatures, has_kerning_info): """Is there kerning info for non-ligated sequences?""" def look_for_nonligated_kern_info(table): for pairpos in table.SubTable: for i, glyph in enumerate(pairpos.Coverage.glyphs): if not hasattr(pairpos, 'PairSet'): continue for pairvalue in pairpos.PairSet[i].PairValueRecord: kern_pair = (glyph, pairvalue.SecondGlyph) if kern_pair in ligature_pairs: ligature_pairs.remove(kern_pair) def ligatures_str(pairs): result = [f"\t- {first} + {second}" for first, second in pairs] return "\n".join(result) if ligatures == -1: yield FAIL, Message("malformed", "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596") else: ligature_pairs = [] for first, comp in ligatures.items(): for components in comp: while components: pair = (first, components[0]) if pair not in ligature_pairs: ligature_pairs.append(pair) first = components[0] components.pop(0) for record in ttFont["GSUB"].table.FeatureList.FeatureRecord: if record.FeatureTag == 'kern': for index in record.Feature.LookupListIndex: lookup = ttFont["GSUB"].table.LookupList.Lookup[index] look_for_nonligated_kern_info(lookup) if ligature_pairs: yield WARN, Message("lacks-kern-info", ("GPOS table lacks kerning info for the following" " non-ligated sequences:\n" "{}\n\n ").format(ligatures_str(ligature_pairs))) else: yield PASS, ("GPOS table provides kerning info for " "all non-ligated sequences.")
python
def com_google_fonts_check_kerning_for_non_ligated_sequences(ttFont, ligatures, has_kerning_info): """Is there kerning info for non-ligated sequences?""" def look_for_nonligated_kern_info(table): for pairpos in table.SubTable: for i, glyph in enumerate(pairpos.Coverage.glyphs): if not hasattr(pairpos, 'PairSet'): continue for pairvalue in pairpos.PairSet[i].PairValueRecord: kern_pair = (glyph, pairvalue.SecondGlyph) if kern_pair in ligature_pairs: ligature_pairs.remove(kern_pair) def ligatures_str(pairs): result = [f"\t- {first} + {second}" for first, second in pairs] return "\n".join(result) if ligatures == -1: yield FAIL, Message("malformed", "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596") else: ligature_pairs = [] for first, comp in ligatures.items(): for components in comp: while components: pair = (first, components[0]) if pair not in ligature_pairs: ligature_pairs.append(pair) first = components[0] components.pop(0) for record in ttFont["GSUB"].table.FeatureList.FeatureRecord: if record.FeatureTag == 'kern': for index in record.Feature.LookupListIndex: lookup = ttFont["GSUB"].table.LookupList.Lookup[index] look_for_nonligated_kern_info(lookup) if ligature_pairs: yield WARN, Message("lacks-kern-info", ("GPOS table lacks kerning info for the following" " non-ligated sequences:\n" "{}\n\n ").format(ligatures_str(ligature_pairs))) else: yield PASS, ("GPOS table provides kerning info for " "all non-ligated sequences.")
[ "def", "com_google_fonts_check_kerning_for_non_ligated_sequences", "(", "ttFont", ",", "ligatures", ",", "has_kerning_info", ")", ":", "def", "look_for_nonligated_kern_info", "(", "table", ")", ":", "for", "pairpos", "in", "table", ".", "SubTable", ":", "for", "i", ",", "glyph", "in", "enumerate", "(", "pairpos", ".", "Coverage", ".", "glyphs", ")", ":", "if", "not", "hasattr", "(", "pairpos", ",", "'PairSet'", ")", ":", "continue", "for", "pairvalue", "in", "pairpos", ".", "PairSet", "[", "i", "]", ".", "PairValueRecord", ":", "kern_pair", "=", "(", "glyph", ",", "pairvalue", ".", "SecondGlyph", ")", "if", "kern_pair", "in", "ligature_pairs", ":", "ligature_pairs", ".", "remove", "(", "kern_pair", ")", "def", "ligatures_str", "(", "pairs", ")", ":", "result", "=", "[", "f\"\\t- {first} + {second}\"", "for", "first", ",", "second", "in", "pairs", "]", "return", "\"\\n\"", ".", "join", "(", "result", ")", "if", "ligatures", "==", "-", "1", ":", "yield", "FAIL", ",", "Message", "(", "\"malformed\"", ",", "\"Failed to lookup ligatures.\"", "\" This font file seems to be malformed.\"", "\" For more info, read:\"", "\" https://github.com\"", "\"/googlefonts/fontbakery/issues/1596\"", ")", "else", ":", "ligature_pairs", "=", "[", "]", "for", "first", ",", "comp", "in", "ligatures", ".", "items", "(", ")", ":", "for", "components", "in", "comp", ":", "while", "components", ":", "pair", "=", "(", "first", ",", "components", "[", "0", "]", ")", "if", "pair", "not", "in", "ligature_pairs", ":", "ligature_pairs", ".", "append", "(", "pair", ")", "first", "=", "components", "[", "0", "]", "components", ".", "pop", "(", "0", ")", "for", "record", "in", "ttFont", "[", "\"GSUB\"", "]", ".", "table", ".", "FeatureList", ".", "FeatureRecord", ":", "if", "record", ".", "FeatureTag", "==", "'kern'", ":", "for", "index", "in", "record", ".", "Feature", ".", "LookupListIndex", ":", "lookup", "=", "ttFont", "[", "\"GSUB\"", "]", ".", "table", ".", "LookupList", ".", "Lookup", "[", "index", "]", "look_for_nonligated_kern_info", "(", "lookup", ")", "if", "ligature_pairs", ":", "yield", "WARN", ",", "Message", "(", "\"lacks-kern-info\"", ",", "(", "\"GPOS table lacks kerning info for the following\"", "\" non-ligated sequences:\\n\"", "\"{}\\n\\n \"", ")", ".", "format", "(", "ligatures_str", "(", "ligature_pairs", ")", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"GPOS table provides kerning info for \"", "\"all non-ligated sequences.\"", ")" ]
Is there kerning info for non-ligated sequences?
[ "Is", "there", "kerning", "info", "for", "non", "-", "ligated", "sequences?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3708-L3755
9,841
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_name_family_and_style_max_length
def com_google_fonts_check_name_family_and_style_max_length(ttFont): """Combined length of family and style must not exceed 27 characters.""" from fontbakery.utils import (get_name_entries, get_name_entry_strings) failed = False for familyname in get_name_entries(ttFont, NameID.FONT_FAMILY_NAME): # we'll only match family/style name entries with the same platform ID: plat = familyname.platformID familyname_str = familyname.string.decode(familyname.getEncoding()) for stylename_str in get_name_entry_strings(ttFont, NameID.FONT_SUBFAMILY_NAME, platformID=plat): if len(familyname_str + stylename_str) > 27: failed = True yield WARN, ("The combined length of family and style" " exceeds 27 chars in the following '{}' entries:" " FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'" "").format(PlatformID(plat).name, familyname_str, stylename_str) yield WARN, ("Please take a look at the conversation at" " https://github.com/googlefonts/fontbakery/issues/2179" " in order to understand the reasoning behing these" " name table records max-length criteria.") if not failed: yield PASS, "All name entries are good."
python
def com_google_fonts_check_name_family_and_style_max_length(ttFont): """Combined length of family and style must not exceed 27 characters.""" from fontbakery.utils import (get_name_entries, get_name_entry_strings) failed = False for familyname in get_name_entries(ttFont, NameID.FONT_FAMILY_NAME): # we'll only match family/style name entries with the same platform ID: plat = familyname.platformID familyname_str = familyname.string.decode(familyname.getEncoding()) for stylename_str in get_name_entry_strings(ttFont, NameID.FONT_SUBFAMILY_NAME, platformID=plat): if len(familyname_str + stylename_str) > 27: failed = True yield WARN, ("The combined length of family and style" " exceeds 27 chars in the following '{}' entries:" " FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'" "").format(PlatformID(plat).name, familyname_str, stylename_str) yield WARN, ("Please take a look at the conversation at" " https://github.com/googlefonts/fontbakery/issues/2179" " in order to understand the reasoning behing these" " name table records max-length criteria.") if not failed: yield PASS, "All name entries are good."
[ "def", "com_google_fonts_check_name_family_and_style_max_length", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "(", "get_name_entries", ",", "get_name_entry_strings", ")", "failed", "=", "False", "for", "familyname", "in", "get_name_entries", "(", "ttFont", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", ":", "# we'll only match family/style name entries with the same platform ID:", "plat", "=", "familyname", ".", "platformID", "familyname_str", "=", "familyname", ".", "string", ".", "decode", "(", "familyname", ".", "getEncoding", "(", ")", ")", "for", "stylename_str", "in", "get_name_entry_strings", "(", "ttFont", ",", "NameID", ".", "FONT_SUBFAMILY_NAME", ",", "platformID", "=", "plat", ")", ":", "if", "len", "(", "familyname_str", "+", "stylename_str", ")", ">", "27", ":", "failed", "=", "True", "yield", "WARN", ",", "(", "\"The combined length of family and style\"", "\" exceeds 27 chars in the following '{}' entries:\"", "\" FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'\"", "\"\"", ")", ".", "format", "(", "PlatformID", "(", "plat", ")", ".", "name", ",", "familyname_str", ",", "stylename_str", ")", "yield", "WARN", ",", "(", "\"Please take a look at the conversation at\"", "\" https://github.com/googlefonts/fontbakery/issues/2179\"", "\" in order to understand the reasoning behing these\"", "\" name table records max-length criteria.\"", ")", "if", "not", "failed", ":", "yield", "PASS", ",", "\"All name entries are good.\"" ]
Combined length of family and style must not exceed 27 characters.
[ "Combined", "length", "of", "family", "and", "style", "must", "not", "exceed", "27", "characters", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3781-L3807
9,842
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_family_control_chars
def com_google_fonts_check_family_control_chars(ttFonts): """Does font file include unacceptable control character glyphs?""" # list of unacceptable control character glyph names # definition includes the entire control character Unicode block except: # - .null (U+0000) # - CR (U+000D) unacceptable_cc_list = [ "uni0001", "uni0002", "uni0003", "uni0004", "uni0005", "uni0006", "uni0007", "uni0008", "uni0009", "uni000A", "uni000B", "uni000C", "uni000E", "uni000F", "uni0010", "uni0011", "uni0012", "uni0013", "uni0014", "uni0015", "uni0016", "uni0017", "uni0018", "uni0019", "uni001A", "uni001B", "uni001C", "uni001D", "uni001E", "uni001F" ] # a dict with key:value of font path that failed check : list of unacceptable glyph names failed_font_dict = {} for ttFont in ttFonts: font_failed = False unacceptable_glyphs_in_set = [] # a list of unacceptable glyph names identified glyph_name_set = set(ttFont["glyf"].glyphs.keys()) fontname = ttFont.reader.file.name for unacceptable_glyph_name in unacceptable_cc_list: if unacceptable_glyph_name in glyph_name_set: font_failed = True unacceptable_glyphs_in_set.append(unacceptable_glyph_name) if font_failed: failed_font_dict[fontname] = unacceptable_glyphs_in_set if len(failed_font_dict) > 0: unacceptable_cc_report_string = "The following unacceptable control characters were identified:\n" for fnt in failed_font_dict.keys(): unacceptable_cc_report_string += " {}: {}\n".format( fnt, ", ".join(failed_font_dict[fnt]) ) yield FAIL, ("{}".format(unacceptable_cc_report_string)) else: yield PASS, ("Unacceptable control characters were not identified.")
python
def com_google_fonts_check_family_control_chars(ttFonts): """Does font file include unacceptable control character glyphs?""" # list of unacceptable control character glyph names # definition includes the entire control character Unicode block except: # - .null (U+0000) # - CR (U+000D) unacceptable_cc_list = [ "uni0001", "uni0002", "uni0003", "uni0004", "uni0005", "uni0006", "uni0007", "uni0008", "uni0009", "uni000A", "uni000B", "uni000C", "uni000E", "uni000F", "uni0010", "uni0011", "uni0012", "uni0013", "uni0014", "uni0015", "uni0016", "uni0017", "uni0018", "uni0019", "uni001A", "uni001B", "uni001C", "uni001D", "uni001E", "uni001F" ] # a dict with key:value of font path that failed check : list of unacceptable glyph names failed_font_dict = {} for ttFont in ttFonts: font_failed = False unacceptable_glyphs_in_set = [] # a list of unacceptable glyph names identified glyph_name_set = set(ttFont["glyf"].glyphs.keys()) fontname = ttFont.reader.file.name for unacceptable_glyph_name in unacceptable_cc_list: if unacceptable_glyph_name in glyph_name_set: font_failed = True unacceptable_glyphs_in_set.append(unacceptable_glyph_name) if font_failed: failed_font_dict[fontname] = unacceptable_glyphs_in_set if len(failed_font_dict) > 0: unacceptable_cc_report_string = "The following unacceptable control characters were identified:\n" for fnt in failed_font_dict.keys(): unacceptable_cc_report_string += " {}: {}\n".format( fnt, ", ".join(failed_font_dict[fnt]) ) yield FAIL, ("{}".format(unacceptable_cc_report_string)) else: yield PASS, ("Unacceptable control characters were not identified.")
[ "def", "com_google_fonts_check_family_control_chars", "(", "ttFonts", ")", ":", "# list of unacceptable control character glyph names", "# definition includes the entire control character Unicode block except:", "# - .null (U+0000)", "# - CR (U+000D)", "unacceptable_cc_list", "=", "[", "\"uni0001\"", ",", "\"uni0002\"", ",", "\"uni0003\"", ",", "\"uni0004\"", ",", "\"uni0005\"", ",", "\"uni0006\"", ",", "\"uni0007\"", ",", "\"uni0008\"", ",", "\"uni0009\"", ",", "\"uni000A\"", ",", "\"uni000B\"", ",", "\"uni000C\"", ",", "\"uni000E\"", ",", "\"uni000F\"", ",", "\"uni0010\"", ",", "\"uni0011\"", ",", "\"uni0012\"", ",", "\"uni0013\"", ",", "\"uni0014\"", ",", "\"uni0015\"", ",", "\"uni0016\"", ",", "\"uni0017\"", ",", "\"uni0018\"", ",", "\"uni0019\"", ",", "\"uni001A\"", ",", "\"uni001B\"", ",", "\"uni001C\"", ",", "\"uni001D\"", ",", "\"uni001E\"", ",", "\"uni001F\"", "]", "# a dict with key:value of font path that failed check : list of unacceptable glyph names", "failed_font_dict", "=", "{", "}", "for", "ttFont", "in", "ttFonts", ":", "font_failed", "=", "False", "unacceptable_glyphs_in_set", "=", "[", "]", "# a list of unacceptable glyph names identified", "glyph_name_set", "=", "set", "(", "ttFont", "[", "\"glyf\"", "]", ".", "glyphs", ".", "keys", "(", ")", ")", "fontname", "=", "ttFont", ".", "reader", ".", "file", ".", "name", "for", "unacceptable_glyph_name", "in", "unacceptable_cc_list", ":", "if", "unacceptable_glyph_name", "in", "glyph_name_set", ":", "font_failed", "=", "True", "unacceptable_glyphs_in_set", ".", "append", "(", "unacceptable_glyph_name", ")", "if", "font_failed", ":", "failed_font_dict", "[", "fontname", "]", "=", "unacceptable_glyphs_in_set", "if", "len", "(", "failed_font_dict", ")", ">", "0", ":", "unacceptable_cc_report_string", "=", "\"The following unacceptable control characters were identified:\\n\"", "for", "fnt", "in", "failed_font_dict", ".", "keys", "(", ")", ":", "unacceptable_cc_report_string", "+=", "\" {}: {}\\n\"", ".", "format", "(", "fnt", ",", "\", \"", ".", "join", "(", "failed_font_dict", "[", "fnt", "]", ")", ")", "yield", "FAIL", ",", "(", "\"{}\"", ".", "format", "(", "unacceptable_cc_report_string", ")", ")", "else", ":", "yield", "PASS", ",", "(", "\"Unacceptable control characters were not identified.\"", ")" ]
Does font file include unacceptable control character glyphs?
[ "Does", "font", "file", "include", "unacceptable", "control", "character", "glyphs?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3818-L3882
9,843
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
gfonts_repo_structure
def gfonts_repo_structure(fonts): """ The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ? """ from fontbakery.utils import get_absolute_path # FIXME: Improve this with more details # about the expected structure. abspath = get_absolute_path(fonts[0]) return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"]
python
def gfonts_repo_structure(fonts): """ The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ? """ from fontbakery.utils import get_absolute_path # FIXME: Improve this with more details # about the expected structure. abspath = get_absolute_path(fonts[0]) return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"]
[ "def", "gfonts_repo_structure", "(", "fonts", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_absolute_path", "# FIXME: Improve this with more details", "# about the expected structure.", "abspath", "=", "get_absolute_path", "(", "fonts", "[", "0", "]", ")", "return", "abspath", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "3", "]", "in", "[", "\"ufl\"", ",", "\"ofl\"", ",", "\"apache\"", "]" ]
The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ?
[ "The", "family", "at", "the", "given", "font", "path", "follows", "the", "files", "and", "directory", "structure", "typical", "of", "a", "font", "project", "hosted", "on", "the", "Google", "Fonts", "repo", "on", "GitHub", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3886-L3896
9,844
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_repo_dirname_match_nameid_1
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts, gfonts_repo_structure): """Directory name in GFonts repo structure must match NameID 1 of the regular.""" from fontTools.ttLib import TTFont from fontbakery.utils import (get_name_entry_strings, get_absolute_path, get_regular) regular = get_regular(fonts) if not regular: yield FAIL, "The font seems to lack a regular." entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0] expected = entry.lower() expected = "".join(expected.split(' ')) expected = "".join(expected.split('-')) license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:] if familypath == expected: yield PASS, "OK" else: yield FAIL, (f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'.")
python
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts, gfonts_repo_structure): """Directory name in GFonts repo structure must match NameID 1 of the regular.""" from fontTools.ttLib import TTFont from fontbakery.utils import (get_name_entry_strings, get_absolute_path, get_regular) regular = get_regular(fonts) if not regular: yield FAIL, "The font seems to lack a regular." entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0] expected = entry.lower() expected = "".join(expected.split(' ')) expected = "".join(expected.split('-')) license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:] if familypath == expected: yield PASS, "OK" else: yield FAIL, (f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'.")
[ "def", "com_google_fonts_check_repo_dirname_match_nameid_1", "(", "fonts", ",", "gfonts_repo_structure", ")", ":", "from", "fontTools", ".", "ttLib", "import", "TTFont", "from", "fontbakery", ".", "utils", "import", "(", "get_name_entry_strings", ",", "get_absolute_path", ",", "get_regular", ")", "regular", "=", "get_regular", "(", "fonts", ")", "if", "not", "regular", ":", "yield", "FAIL", ",", "\"The font seems to lack a regular.\"", "entry", "=", "get_name_entry_strings", "(", "TTFont", "(", "regular", ")", ",", "NameID", ".", "FONT_FAMILY_NAME", ")", "[", "0", "]", "expected", "=", "entry", ".", "lower", "(", ")", "expected", "=", "\"\"", ".", "join", "(", "expected", ".", "split", "(", "' '", ")", ")", "expected", "=", "\"\"", ".", "join", "(", "expected", ".", "split", "(", "'-'", ")", ")", "license", ",", "familypath", ",", "filename", "=", "get_absolute_path", "(", "regular", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "3", ":", "]", "if", "familypath", "==", "expected", ":", "yield", "PASS", ",", "\"OK\"", "else", ":", "yield", "FAIL", ",", "(", "f\"Family name on the name table ('{entry}') does not match\"", "f\" directory name in the repo structure ('{familypath}').\"", "f\" Expected '{expected}'.\"", ")" ]
Directory name in GFonts repo structure must match NameID 1 of the regular.
[ "Directory", "name", "in", "GFonts", "repo", "structure", "must", "match", "NameID", "1", "of", "the", "regular", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3906-L3929
9,845
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_family_panose_proportion
def com_google_fonts_check_family_panose_proportion(ttFonts): """Fonts have consistent PANOSE proportion?""" failed = False proportion = None for ttFont in ttFonts: if proportion is None: proportion = ttFont['OS/2'].panose.bProportion if proportion != ttFont['OS/2'].panose.bProportion: failed = True if failed: yield FAIL, ("PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE proportion."
python
def com_google_fonts_check_family_panose_proportion(ttFonts): """Fonts have consistent PANOSE proportion?""" failed = False proportion = None for ttFont in ttFonts: if proportion is None: proportion = ttFont['OS/2'].panose.bProportion if proportion != ttFont['OS/2'].panose.bProportion: failed = True if failed: yield FAIL, ("PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE proportion."
[ "def", "com_google_fonts_check_family_panose_proportion", "(", "ttFonts", ")", ":", "failed", "=", "False", "proportion", "=", "None", "for", "ttFont", "in", "ttFonts", ":", "if", "proportion", "is", "None", ":", "proportion", "=", "ttFont", "[", "'OS/2'", "]", ".", "panose", ".", "bProportion", "if", "proportion", "!=", "ttFont", "[", "'OS/2'", "]", ".", "panose", ".", "bProportion", ":", "failed", "=", "True", "if", "failed", ":", "yield", "FAIL", ",", "(", "\"PANOSE proportion is not\"", "\" the same accross this family.\"", "\" In order to fix this,\"", "\" please make sure that the panose.bProportion value\"", "\" is the same in the OS/2 table of all of this family\"", "\" font files.\"", ")", "else", ":", "yield", "PASS", ",", "\"Fonts have consistent PANOSE proportion.\"" ]
Fonts have consistent PANOSE proportion?
[ "Fonts", "have", "consistent", "PANOSE", "proportion?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L14-L32
9,846
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_family_panose_familytype
def com_google_fonts_check_family_panose_familytype(ttFonts): """Fonts have consistent PANOSE family type?""" failed = False familytype = None for ttfont in ttFonts: if familytype is None: familytype = ttfont['OS/2'].panose.bFamilyType if familytype != ttfont['OS/2'].panose.bFamilyType: failed = True if failed: yield FAIL, ("PANOSE family type is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bFamilyType value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE family type."
python
def com_google_fonts_check_family_panose_familytype(ttFonts): """Fonts have consistent PANOSE family type?""" failed = False familytype = None for ttfont in ttFonts: if familytype is None: familytype = ttfont['OS/2'].panose.bFamilyType if familytype != ttfont['OS/2'].panose.bFamilyType: failed = True if failed: yield FAIL, ("PANOSE family type is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bFamilyType value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE family type."
[ "def", "com_google_fonts_check_family_panose_familytype", "(", "ttFonts", ")", ":", "failed", "=", "False", "familytype", "=", "None", "for", "ttfont", "in", "ttFonts", ":", "if", "familytype", "is", "None", ":", "familytype", "=", "ttfont", "[", "'OS/2'", "]", ".", "panose", ".", "bFamilyType", "if", "familytype", "!=", "ttfont", "[", "'OS/2'", "]", ".", "panose", ".", "bFamilyType", ":", "failed", "=", "True", "if", "failed", ":", "yield", "FAIL", ",", "(", "\"PANOSE family type is not\"", "\" the same accross this family.\"", "\" In order to fix this,\"", "\" please make sure that the panose.bFamilyType value\"", "\" is the same in the OS/2 table of all of this family\"", "\" font files.\"", ")", "else", ":", "yield", "PASS", ",", "\"Fonts have consistent PANOSE family type.\"" ]
Fonts have consistent PANOSE family type?
[ "Fonts", "have", "consistent", "PANOSE", "family", "type?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L38-L56
9,847
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_code_pages
def com_google_fonts_check_code_pages(ttFont): """Check code page character ranges""" if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \ not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \ (ttFont['OS/2'].ulCodePageRange1 == 0 and \ ttFont['OS/2'].ulCodePageRange2 == 0): yield FAIL, ("No code pages defined in the OS/2 table" " ulCodePageRage1 and CodePageRage2 fields.") else: yield PASS, "At least one code page is defined."
python
def com_google_fonts_check_code_pages(ttFont): """Check code page character ranges""" if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \ not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \ (ttFont['OS/2'].ulCodePageRange1 == 0 and \ ttFont['OS/2'].ulCodePageRange2 == 0): yield FAIL, ("No code pages defined in the OS/2 table" " ulCodePageRage1 and CodePageRage2 fields.") else: yield PASS, "At least one code page is defined."
[ "def", "com_google_fonts_check_code_pages", "(", "ttFont", ")", ":", "if", "not", "hasattr", "(", "ttFont", "[", "'OS/2'", "]", ",", "\"ulCodePageRange1\"", ")", "or", "not", "hasattr", "(", "ttFont", "[", "'OS/2'", "]", ",", "\"ulCodePageRange2\"", ")", "or", "(", "ttFont", "[", "'OS/2'", "]", ".", "ulCodePageRange1", "==", "0", "and", "ttFont", "[", "'OS/2'", "]", ".", "ulCodePageRange2", "==", "0", ")", ":", "yield", "FAIL", ",", "(", "\"No code pages defined in the OS/2 table\"", "\" ulCodePageRage1 and CodePageRage2 fields.\"", ")", "else", ":", "yield", "PASS", ",", "\"At least one code page is defined.\"" ]
Check code page character ranges
[ "Check", "code", "page", "character", "ranges" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L245-L255
9,848
googlefonts/fontbakery
Lib/fontbakery/profiles/glyf.py
com_google_fonts_check_glyf_unused_data
def com_google_fonts_check_glyf_unused_data(ttFont): """Is there any unused data at the end of the glyf table?""" try: expected_glyphs = len(ttFont.getGlyphOrder()) actual_glyphs = len(ttFont['glyf'].glyphs) diff = actual_glyphs - expected_glyphs if diff < 0: yield FAIL, Message("unreachable-data", ("Glyf table has unreachable data at the end of " " the table. Expected glyf table length {}" " (from loca table), got length" " {} (difference: {})").format( expected_glyphs, actual_glyphs, diff)) elif not diff: # negative diff -> exception below yield PASS, "There is no unused data at the end of the glyf table." else: raise Exception("Bug: fontTools did not raise an expected exception.") except fontTools.ttLib.TTLibError as error: if "not enough 'glyf' table data" in format(error): yield FAIL, Message("missing-data", ("Loca table references data beyond" " the end of the glyf table." " Expected glyf table length {}" " (from loca table).").format(expected_glyphs)) else: raise Exception("Bug: Unexpected fontTools exception.")
python
def com_google_fonts_check_glyf_unused_data(ttFont): """Is there any unused data at the end of the glyf table?""" try: expected_glyphs = len(ttFont.getGlyphOrder()) actual_glyphs = len(ttFont['glyf'].glyphs) diff = actual_glyphs - expected_glyphs if diff < 0: yield FAIL, Message("unreachable-data", ("Glyf table has unreachable data at the end of " " the table. Expected glyf table length {}" " (from loca table), got length" " {} (difference: {})").format( expected_glyphs, actual_glyphs, diff)) elif not diff: # negative diff -> exception below yield PASS, "There is no unused data at the end of the glyf table." else: raise Exception("Bug: fontTools did not raise an expected exception.") except fontTools.ttLib.TTLibError as error: if "not enough 'glyf' table data" in format(error): yield FAIL, Message("missing-data", ("Loca table references data beyond" " the end of the glyf table." " Expected glyf table length {}" " (from loca table).").format(expected_glyphs)) else: raise Exception("Bug: Unexpected fontTools exception.")
[ "def", "com_google_fonts_check_glyf_unused_data", "(", "ttFont", ")", ":", "try", ":", "expected_glyphs", "=", "len", "(", "ttFont", ".", "getGlyphOrder", "(", ")", ")", "actual_glyphs", "=", "len", "(", "ttFont", "[", "'glyf'", "]", ".", "glyphs", ")", "diff", "=", "actual_glyphs", "-", "expected_glyphs", "if", "diff", "<", "0", ":", "yield", "FAIL", ",", "Message", "(", "\"unreachable-data\"", ",", "(", "\"Glyf table has unreachable data at the end of \"", "\" the table. Expected glyf table length {}\"", "\" (from loca table), got length\"", "\" {} (difference: {})\"", ")", ".", "format", "(", "expected_glyphs", ",", "actual_glyphs", ",", "diff", ")", ")", "elif", "not", "diff", ":", "# negative diff -> exception below", "yield", "PASS", ",", "\"There is no unused data at the end of the glyf table.\"", "else", ":", "raise", "Exception", "(", "\"Bug: fontTools did not raise an expected exception.\"", ")", "except", "fontTools", ".", "ttLib", ".", "TTLibError", "as", "error", ":", "if", "\"not enough 'glyf' table data\"", "in", "format", "(", "error", ")", ":", "yield", "FAIL", ",", "Message", "(", "\"missing-data\"", ",", "(", "\"Loca table references data beyond\"", "\" the end of the glyf table.\"", "\" Expected glyf table length {}\"", "\" (from loca table).\"", ")", ".", "format", "(", "expected_glyphs", ")", ")", "else", ":", "raise", "Exception", "(", "\"Bug: Unexpected fontTools exception.\"", ")" ]
Is there any unused data at the end of the glyf table?
[ "Is", "there", "any", "unused", "data", "at", "the", "end", "of", "the", "glyf", "table?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/glyf.py#L13-L39
9,849
googlefonts/fontbakery
Lib/fontbakery/profiles/glyf.py
com_google_fonts_check_points_out_of_bounds
def com_google_fonts_check_points_out_of_bounds(ttFont): """Check for points out of bounds.""" failed = False out_of_bounds = [] for glyphName in ttFont['glyf'].keys(): glyph = ttFont['glyf'][glyphName] coords = glyph.getCoordinates(ttFont['glyf'])[0] for x, y in coords: if x < glyph.xMin or x > glyph.xMax or \ y < glyph.yMin or y > glyph.yMax or \ abs(x) > 32766 or abs(y) > 32766: failed = True out_of_bounds.append((glyphName, x, y)) if failed: yield WARN, ("The following glyphs have coordinates which are" " out of bounds:\n{}\nThis happens a lot when points" " are not extremes, which is usually bad. However," " fixing this alert by adding points on extremes may" " do more harm than good, especially with italics," " calligraphic-script, handwriting, rounded and" " other fonts. So it is common to" " ignore this message".format(out_of_bounds)) else: yield PASS, "All glyph paths have coordinates within bounds!"
python
def com_google_fonts_check_points_out_of_bounds(ttFont): """Check for points out of bounds.""" failed = False out_of_bounds = [] for glyphName in ttFont['glyf'].keys(): glyph = ttFont['glyf'][glyphName] coords = glyph.getCoordinates(ttFont['glyf'])[0] for x, y in coords: if x < glyph.xMin or x > glyph.xMax or \ y < glyph.yMin or y > glyph.yMax or \ abs(x) > 32766 or abs(y) > 32766: failed = True out_of_bounds.append((glyphName, x, y)) if failed: yield WARN, ("The following glyphs have coordinates which are" " out of bounds:\n{}\nThis happens a lot when points" " are not extremes, which is usually bad. However," " fixing this alert by adding points on extremes may" " do more harm than good, especially with italics," " calligraphic-script, handwriting, rounded and" " other fonts. So it is common to" " ignore this message".format(out_of_bounds)) else: yield PASS, "All glyph paths have coordinates within bounds!"
[ "def", "com_google_fonts_check_points_out_of_bounds", "(", "ttFont", ")", ":", "failed", "=", "False", "out_of_bounds", "=", "[", "]", "for", "glyphName", "in", "ttFont", "[", "'glyf'", "]", ".", "keys", "(", ")", ":", "glyph", "=", "ttFont", "[", "'glyf'", "]", "[", "glyphName", "]", "coords", "=", "glyph", ".", "getCoordinates", "(", "ttFont", "[", "'glyf'", "]", ")", "[", "0", "]", "for", "x", ",", "y", "in", "coords", ":", "if", "x", "<", "glyph", ".", "xMin", "or", "x", ">", "glyph", ".", "xMax", "or", "y", "<", "glyph", ".", "yMin", "or", "y", ">", "glyph", ".", "yMax", "or", "abs", "(", "x", ")", ">", "32766", "or", "abs", "(", "y", ")", ">", "32766", ":", "failed", "=", "True", "out_of_bounds", ".", "append", "(", "(", "glyphName", ",", "x", ",", "y", ")", ")", "if", "failed", ":", "yield", "WARN", ",", "(", "\"The following glyphs have coordinates which are\"", "\" out of bounds:\\n{}\\nThis happens a lot when points\"", "\" are not extremes, which is usually bad. However,\"", "\" fixing this alert by adding points on extremes may\"", "\" do more harm than good, especially with italics,\"", "\" calligraphic-script, handwriting, rounded and\"", "\" other fonts. So it is common to\"", "\" ignore this message\"", ".", "format", "(", "out_of_bounds", ")", ")", "else", ":", "yield", "PASS", ",", "\"All glyph paths have coordinates within bounds!\"" ]
Check for points out of bounds.
[ "Check", "for", "points", "out", "of", "bounds", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/glyf.py#L51-L75
9,850
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_ufolint
def com_daltonmaag_check_ufolint(font): """Run ufolint on UFO source directory.""" import subprocess ufolint_cmd = ["ufolint", font] try: subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: yield FAIL, ("ufolint failed the UFO source. Output follows :" "\n\n{}\n").format(e.output.decode()) except OSError: yield ERROR, "ufolint is not available!" else: yield PASS, "ufolint passed the UFO source."
python
def com_daltonmaag_check_ufolint(font): """Run ufolint on UFO source directory.""" import subprocess ufolint_cmd = ["ufolint", font] try: subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: yield FAIL, ("ufolint failed the UFO source. Output follows :" "\n\n{}\n").format(e.output.decode()) except OSError: yield ERROR, "ufolint is not available!" else: yield PASS, "ufolint passed the UFO source."
[ "def", "com_daltonmaag_check_ufolint", "(", "font", ")", ":", "import", "subprocess", "ufolint_cmd", "=", "[", "\"ufolint\"", ",", "font", "]", "try", ":", "subprocess", ".", "check_output", "(", "ufolint_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "yield", "FAIL", ",", "(", "\"ufolint failed the UFO source. Output follows :\"", "\"\\n\\n{}\\n\"", ")", ".", "format", "(", "e", ".", "output", ".", "decode", "(", ")", ")", "except", "OSError", ":", "yield", "ERROR", ",", "\"ufolint is not available!\"", "else", ":", "yield", "PASS", ",", "\"ufolint passed the UFO source.\"" ]
Run ufolint on UFO source directory.
[ "Run", "ufolint", "on", "UFO", "source", "directory", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L91-L104
9,851
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_required_fields
def com_daltonmaag_check_required_fields(ufo_font): """Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName. """ recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
python
def com_daltonmaag_check_required_fields(ufo_font): """Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName. """ recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
[ "def", "com_daltonmaag_check_required_fields", "(", "ufo_font", ")", ":", "recommended_fields", "=", "[", "]", "for", "field", "in", "[", "\"unitsPerEm\"", ",", "\"ascender\"", ",", "\"descender\"", ",", "\"xHeight\"", ",", "\"capHeight\"", ",", "\"familyName\"", "]", ":", "if", "ufo_font", ".", "info", ".", "__dict__", ".", "get", "(", "\"_\"", "+", "field", ")", "is", "None", ":", "recommended_fields", ".", "append", "(", "field", ")", "if", "recommended_fields", ":", "yield", "FAIL", ",", "f\"Required field(s) missing: {recommended_fields}\"", "else", ":", "yield", "PASS", ",", "\"Required fields present.\"" ]
Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName.
[ "Check", "that", "required", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L111-L129
9,852
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_recommended_fields
def com_daltonmaag_check_recommended_fields(ufo_font): """Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font. """ recommended_fields = [] for field in [ "postscriptUnderlineThickness", "postscriptUnderlinePosition", "versionMajor", "versionMinor", "styleName", "copyright", "openTypeOS2Panose" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield WARN, f"Recommended field(s) missing: {recommended_fields}" else: yield PASS, "Recommended fields present."
python
def com_daltonmaag_check_recommended_fields(ufo_font): """Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font. """ recommended_fields = [] for field in [ "postscriptUnderlineThickness", "postscriptUnderlinePosition", "versionMajor", "versionMinor", "styleName", "copyright", "openTypeOS2Panose" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield WARN, f"Recommended field(s) missing: {recommended_fields}" else: yield PASS, "Recommended fields present."
[ "def", "com_daltonmaag_check_recommended_fields", "(", "ufo_font", ")", ":", "recommended_fields", "=", "[", "]", "for", "field", "in", "[", "\"postscriptUnderlineThickness\"", ",", "\"postscriptUnderlinePosition\"", ",", "\"versionMajor\"", ",", "\"versionMinor\"", ",", "\"styleName\"", ",", "\"copyright\"", ",", "\"openTypeOS2Panose\"", "]", ":", "if", "ufo_font", ".", "info", ".", "__dict__", ".", "get", "(", "\"_\"", "+", "field", ")", "is", "None", ":", "recommended_fields", ".", "append", "(", "field", ")", "if", "recommended_fields", ":", "yield", "WARN", ",", "f\"Recommended field(s) missing: {recommended_fields}\"", "else", ":", "yield", "PASS", ",", "\"Recommended fields present.\"" ]
Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font.
[ "Check", "that", "recommended", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L136-L154
9,853
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_unnecessary_fields
def com_daltonmaag_check_unnecessary_fields(ufo_font): """Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2. """ unnecessary_fields = [] for field in [ "openTypeNameUniqueID", "openTypeNameVersion", "postscriptUniqueID", "year" ]: if ufo_font.info.__dict__.get("_" + field) is not None: unnecessary_fields.append(field) if unnecessary_fields: yield WARN, f"Unnecessary field(s) present: {unnecessary_fields}" else: yield PASS, "Unnecessary fields omitted."
python
def com_daltonmaag_check_unnecessary_fields(ufo_font): """Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2. """ unnecessary_fields = [] for field in [ "openTypeNameUniqueID", "openTypeNameVersion", "postscriptUniqueID", "year" ]: if ufo_font.info.__dict__.get("_" + field) is not None: unnecessary_fields.append(field) if unnecessary_fields: yield WARN, f"Unnecessary field(s) present: {unnecessary_fields}" else: yield PASS, "Unnecessary fields omitted."
[ "def", "com_daltonmaag_check_unnecessary_fields", "(", "ufo_font", ")", ":", "unnecessary_fields", "=", "[", "]", "for", "field", "in", "[", "\"openTypeNameUniqueID\"", ",", "\"openTypeNameVersion\"", ",", "\"postscriptUniqueID\"", ",", "\"year\"", "]", ":", "if", "ufo_font", ".", "info", ".", "__dict__", ".", "get", "(", "\"_\"", "+", "field", ")", "is", "not", "None", ":", "unnecessary_fields", ".", "append", "(", "field", ")", "if", "unnecessary_fields", ":", "yield", "WARN", ",", "f\"Unnecessary field(s) present: {unnecessary_fields}\"", "else", ":", "yield", "PASS", ",", "\"Unnecessary fields omitted.\"" ]
Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2.
[ "Check", "that", "no", "unnecessary", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L161-L184
9,854
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
UFOProfile.setup_argparse
def setup_argparse(self, argument_parser): """Set up custom arguments needed for this profile.""" import glob import logging import argparse def get_fonts(pattern): fonts_to_check = [] # use glob.glob to accept *.ufo for fullpath in glob.glob(pattern): fullpath_absolute = os.path.abspath(fullpath) if fullpath_absolute.lower().endswith(".ufo") and os.path.isdir( fullpath_absolute): fonts_to_check.append(fullpath) else: logging.warning( ("Skipping '{}' as it does not seem " "to be valid UFO source directory.").format(fullpath)) return fonts_to_check class MergeAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): target = [item for l in values for item in l] setattr(namespace, self.dest, target) argument_parser.add_argument( 'fonts', # To allow optional commands like "-L" to work without other input # files: nargs='*', type=get_fonts, action=MergeAction, help='font file path(s) to check.' ' Wildcards like *.ufo are allowed.') return ('fonts',)
python
def setup_argparse(self, argument_parser): """Set up custom arguments needed for this profile.""" import glob import logging import argparse def get_fonts(pattern): fonts_to_check = [] # use glob.glob to accept *.ufo for fullpath in glob.glob(pattern): fullpath_absolute = os.path.abspath(fullpath) if fullpath_absolute.lower().endswith(".ufo") and os.path.isdir( fullpath_absolute): fonts_to_check.append(fullpath) else: logging.warning( ("Skipping '{}' as it does not seem " "to be valid UFO source directory.").format(fullpath)) return fonts_to_check class MergeAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): target = [item for l in values for item in l] setattr(namespace, self.dest, target) argument_parser.add_argument( 'fonts', # To allow optional commands like "-L" to work without other input # files: nargs='*', type=get_fonts, action=MergeAction, help='font file path(s) to check.' ' Wildcards like *.ufo are allowed.') return ('fonts',)
[ "def", "setup_argparse", "(", "self", ",", "argument_parser", ")", ":", "import", "glob", "import", "logging", "import", "argparse", "def", "get_fonts", "(", "pattern", ")", ":", "fonts_to_check", "=", "[", "]", "# use glob.glob to accept *.ufo", "for", "fullpath", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "fullpath_absolute", "=", "os", ".", "path", ".", "abspath", "(", "fullpath", ")", "if", "fullpath_absolute", ".", "lower", "(", ")", ".", "endswith", "(", "\".ufo\"", ")", "and", "os", ".", "path", ".", "isdir", "(", "fullpath_absolute", ")", ":", "fonts_to_check", ".", "append", "(", "fullpath", ")", "else", ":", "logging", ".", "warning", "(", "(", "\"Skipping '{}' as it does not seem \"", "\"to be valid UFO source directory.\"", ")", ".", "format", "(", "fullpath", ")", ")", "return", "fonts_to_check", "class", "MergeAction", "(", "argparse", ".", "Action", ")", ":", "def", "__call__", "(", "self", ",", "parser", ",", "namespace", ",", "values", ",", "option_string", "=", "None", ")", ":", "target", "=", "[", "item", "for", "l", "in", "values", "for", "item", "in", "l", "]", "setattr", "(", "namespace", ",", "self", ".", "dest", ",", "target", ")", "argument_parser", ".", "add_argument", "(", "'fonts'", ",", "# To allow optional commands like \"-L\" to work without other input", "# files:", "nargs", "=", "'*'", ",", "type", "=", "get_fonts", ",", "action", "=", "MergeAction", ",", "help", "=", "'font file path(s) to check.'", "' Wildcards like *.ufo are allowed.'", ")", "return", "(", "'fonts'", ",", ")" ]
Set up custom arguments needed for this profile.
[ "Set", "up", "custom", "arguments", "needed", "for", "this", "profile", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L11-L49
9,855
googlefonts/fontbakery
Lib/fontbakery/profiles/hmtx.py
com_google_fonts_check_whitespace_widths
def com_google_fonts_check_whitespace_widths(ttFont): """Whitespace and non-breaking space have the same width?""" from fontbakery.utils import get_glyph_name space_name = get_glyph_name(ttFont, 0x0020) nbsp_name = get_glyph_name(ttFont, 0x00A0) space_width = ttFont['hmtx'][space_name][0] nbsp_width = ttFont['hmtx'][nbsp_name][0] if space_width > 0 and space_width == nbsp_width: yield PASS, "Whitespace and non-breaking space have the same width." else: yield FAIL, ("Whitespace and non-breaking space have differing width:" " Whitespace ({}) is {} font units wide, non-breaking space" " ({}) is {} font units wide. Both should be positive and the" " same.").format(space_name, space_width, nbsp_name, nbsp_width)
python
def com_google_fonts_check_whitespace_widths(ttFont): """Whitespace and non-breaking space have the same width?""" from fontbakery.utils import get_glyph_name space_name = get_glyph_name(ttFont, 0x0020) nbsp_name = get_glyph_name(ttFont, 0x00A0) space_width = ttFont['hmtx'][space_name][0] nbsp_width = ttFont['hmtx'][nbsp_name][0] if space_width > 0 and space_width == nbsp_width: yield PASS, "Whitespace and non-breaking space have the same width." else: yield FAIL, ("Whitespace and non-breaking space have differing width:" " Whitespace ({}) is {} font units wide, non-breaking space" " ({}) is {} font units wide. Both should be positive and the" " same.").format(space_name, space_width, nbsp_name, nbsp_width)
[ "def", "com_google_fonts_check_whitespace_widths", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_glyph_name", "space_name", "=", "get_glyph_name", "(", "ttFont", ",", "0x0020", ")", "nbsp_name", "=", "get_glyph_name", "(", "ttFont", ",", "0x00A0", ")", "space_width", "=", "ttFont", "[", "'hmtx'", "]", "[", "space_name", "]", "[", "0", "]", "nbsp_width", "=", "ttFont", "[", "'hmtx'", "]", "[", "nbsp_name", "]", "[", "0", "]", "if", "space_width", ">", "0", "and", "space_width", "==", "nbsp_width", ":", "yield", "PASS", ",", "\"Whitespace and non-breaking space have the same width.\"", "else", ":", "yield", "FAIL", ",", "(", "\"Whitespace and non-breaking space have differing width:\"", "\" Whitespace ({}) is {} font units wide, non-breaking space\"", "\" ({}) is {} font units wide. Both should be positive and the\"", "\" same.\"", ")", ".", "format", "(", "space_name", ",", "space_width", ",", "nbsp_name", ",", "nbsp_width", ")" ]
Whitespace and non-breaking space have the same width?
[ "Whitespace", "and", "non", "-", "breaking", "space", "have", "the", "same", "width?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/hmtx.py#L13-L30
9,856
Kuniwak/vint
vint/linting/policy_set.py
PolicySet.update_by_config
def update_by_config(self, config_dict): """ Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok' """ policy_enabling_map = self._get_enabling_map(config_dict) self.enabled_policies = [] for policy_name, is_policy_enabled in policy_enabling_map.items(): if not self._is_policy_exists(policy_name): self._warn_unexistent_policy(policy_name) continue if is_policy_enabled: enabled_policy = self._get_policy(policy_name) self.enabled_policies.append(enabled_policy)
python
def update_by_config(self, config_dict): """ Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok' """ policy_enabling_map = self._get_enabling_map(config_dict) self.enabled_policies = [] for policy_name, is_policy_enabled in policy_enabling_map.items(): if not self._is_policy_exists(policy_name): self._warn_unexistent_policy(policy_name) continue if is_policy_enabled: enabled_policy = self._get_policy(policy_name) self.enabled_policies.append(enabled_policy)
[ "def", "update_by_config", "(", "self", ",", "config_dict", ")", ":", "policy_enabling_map", "=", "self", ".", "_get_enabling_map", "(", "config_dict", ")", "self", ".", "enabled_policies", "=", "[", "]", "for", "policy_name", ",", "is_policy_enabled", "in", "policy_enabling_map", ".", "items", "(", ")", ":", "if", "not", "self", ".", "_is_policy_exists", "(", "policy_name", ")", ":", "self", ".", "_warn_unexistent_policy", "(", "policy_name", ")", "continue", "if", "is_policy_enabled", ":", "enabled_policy", "=", "self", ".", "_get_policy", "(", "policy_name", ")", "self", ".", "enabled_policies", ".", "append", "(", "enabled_policy", ")" ]
Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok'
[ "Update", "policies", "set", "by", "the", "config", "dictionary", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy_set.py#L48-L68
9,857
Kuniwak/vint
vint/linting/cli.py
_build_cmdargs
def _build_cmdargs(argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = _build_arg_parser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs
python
def _build_cmdargs(argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = _build_arg_parser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs
[ "def", "_build_cmdargs", "(", "argv", ")", ":", "parser", "=", "_build_arg_parser", "(", ")", "namespace", "=", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "cmdargs", "=", "vars", "(", "namespace", ")", "return", "cmdargs" ]
Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure.
[ "Build", "command", "line", "arguments", "dict", "to", "use", ";", "-", "displaying", "usages", "-", "vint", ".", "linting", ".", "env", ".", "build_environment" ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/cli.py#L79-L90
9,858
Kuniwak/vint
vint/ast/parsing.py
Parser.parse
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any] """ Parse vim script file and return the AST. """ decoder = Decoder(default_decoding_strategy) decoded = decoder.decode(lint_target.read()) decoded_and_lf_normalized = decoded.replace('\r\n', '\n') return self.parse_string(decoded_and_lf_normalized)
python
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any] """ Parse vim script file and return the AST. """ decoder = Decoder(default_decoding_strategy) decoded = decoder.decode(lint_target.read()) decoded_and_lf_normalized = decoded.replace('\r\n', '\n') return self.parse_string(decoded_and_lf_normalized)
[ "def", "parse", "(", "self", ",", "lint_target", ")", ":", "# type: (AbstractLintTarget) -> Dict[str, Any]", "decoder", "=", "Decoder", "(", "default_decoding_strategy", ")", "decoded", "=", "decoder", ".", "decode", "(", "lint_target", ".", "read", "(", ")", ")", "decoded_and_lf_normalized", "=", "decoded", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", "return", "self", ".", "parse_string", "(", "decoded_and_lf_normalized", ")" ]
Parse vim script file and return the AST.
[ "Parse", "vim", "script", "file", "and", "return", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L19-L25
9,859
Kuniwak/vint
vint/ast/parsing.py
Parser.parse_string
def parse_string(self, string): # type: (str) -> Dict[str, Any] """ Parse vim script string and return the AST. """ lines = string.split('\n') reader = vimlparser.StringReader(lines) parser = vimlparser.VimLParser(self._enable_neovim) ast = parser.parse(reader) # TOPLEVEL does not have a pos, but we need pos for all nodes ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1} for plugin in self.plugins: plugin.process(ast) return ast
python
def parse_string(self, string): # type: (str) -> Dict[str, Any] """ Parse vim script string and return the AST. """ lines = string.split('\n') reader = vimlparser.StringReader(lines) parser = vimlparser.VimLParser(self._enable_neovim) ast = parser.parse(reader) # TOPLEVEL does not have a pos, but we need pos for all nodes ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1} for plugin in self.plugins: plugin.process(ast) return ast
[ "def", "parse_string", "(", "self", ",", "string", ")", ":", "# type: (str) -> Dict[str, Any]", "lines", "=", "string", ".", "split", "(", "'\\n'", ")", "reader", "=", "vimlparser", ".", "StringReader", "(", "lines", ")", "parser", "=", "vimlparser", ".", "VimLParser", "(", "self", ".", "_enable_neovim", ")", "ast", "=", "parser", ".", "parse", "(", "reader", ")", "# TOPLEVEL does not have a pos, but we need pos for all nodes", "ast", "[", "'pos'", "]", "=", "{", "'col'", ":", "1", ",", "'i'", ":", "0", ",", "'lnum'", ":", "1", "}", "for", "plugin", "in", "self", ".", "plugins", ":", "plugin", ".", "process", "(", "ast", ")", "return", "ast" ]
Parse vim script string and return the AST.
[ "Parse", "vim", "script", "string", "and", "return", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L28-L42
9,860
Kuniwak/vint
vint/ast/parsing.py
Parser.parse_string_expr
def parse_string_expr(self, string_expr_node): """ Parse a string node content. """ string_expr_node_value = string_expr_node['value'] string_expr_str = string_expr_node_value[1:-1] # Care escaped string literals if string_expr_node_value[0] == "'": string_expr_str = string_expr_str.replace("''", "'") else: string_expr_str = string_expr_str.replace('\\"', '"') # NOTE: This is a hack to parse expr1. See :help expr1 raw_ast = self.parse_string('echo ' + string_expr_str) # We need the left node of ECHO node parsed_string_expr_nodes = raw_ast['body'][0]['list'] start_pos = string_expr_node['pos'] def adjust_position(node): pos = node['pos'] # Care 1-based index and the length of "echo ". pos['col'] += start_pos['col'] - 1 - 5 # Care the length of "echo ". pos['i'] += start_pos['i'] - 5 # Care 1-based index pos['lnum'] += start_pos['lnum'] - 1 for parsed_string_expr_node in parsed_string_expr_nodes: traverse(parsed_string_expr_node, on_enter=adjust_position) return parsed_string_expr_nodes
python
def parse_string_expr(self, string_expr_node): """ Parse a string node content. """ string_expr_node_value = string_expr_node['value'] string_expr_str = string_expr_node_value[1:-1] # Care escaped string literals if string_expr_node_value[0] == "'": string_expr_str = string_expr_str.replace("''", "'") else: string_expr_str = string_expr_str.replace('\\"', '"') # NOTE: This is a hack to parse expr1. See :help expr1 raw_ast = self.parse_string('echo ' + string_expr_str) # We need the left node of ECHO node parsed_string_expr_nodes = raw_ast['body'][0]['list'] start_pos = string_expr_node['pos'] def adjust_position(node): pos = node['pos'] # Care 1-based index and the length of "echo ". pos['col'] += start_pos['col'] - 1 - 5 # Care the length of "echo ". pos['i'] += start_pos['i'] - 5 # Care 1-based index pos['lnum'] += start_pos['lnum'] - 1 for parsed_string_expr_node in parsed_string_expr_nodes: traverse(parsed_string_expr_node, on_enter=adjust_position) return parsed_string_expr_nodes
[ "def", "parse_string_expr", "(", "self", ",", "string_expr_node", ")", ":", "string_expr_node_value", "=", "string_expr_node", "[", "'value'", "]", "string_expr_str", "=", "string_expr_node_value", "[", "1", ":", "-", "1", "]", "# Care escaped string literals", "if", "string_expr_node_value", "[", "0", "]", "==", "\"'\"", ":", "string_expr_str", "=", "string_expr_str", ".", "replace", "(", "\"''\"", ",", "\"'\"", ")", "else", ":", "string_expr_str", "=", "string_expr_str", ".", "replace", "(", "'\\\\\"'", ",", "'\"'", ")", "# NOTE: This is a hack to parse expr1. See :help expr1", "raw_ast", "=", "self", ".", "parse_string", "(", "'echo '", "+", "string_expr_str", ")", "# We need the left node of ECHO node", "parsed_string_expr_nodes", "=", "raw_ast", "[", "'body'", "]", "[", "0", "]", "[", "'list'", "]", "start_pos", "=", "string_expr_node", "[", "'pos'", "]", "def", "adjust_position", "(", "node", ")", ":", "pos", "=", "node", "[", "'pos'", "]", "# Care 1-based index and the length of \"echo \".", "pos", "[", "'col'", "]", "+=", "start_pos", "[", "'col'", "]", "-", "1", "-", "5", "# Care the length of \"echo \".", "pos", "[", "'i'", "]", "+=", "start_pos", "[", "'i'", "]", "-", "5", "# Care 1-based index", "pos", "[", "'lnum'", "]", "+=", "start_pos", "[", "'lnum'", "]", "-", "1", "for", "parsed_string_expr_node", "in", "parsed_string_expr_nodes", ":", "traverse", "(", "parsed_string_expr_node", ",", "on_enter", "=", "adjust_position", ")", "return", "parsed_string_expr_nodes" ]
Parse a string node content.
[ "Parse", "a", "string", "node", "content", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L87-L121
9,861
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_detector.py
is_builtin_variable
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin identifier. """ # Builtin variables are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if id_value.startswith('v:'): # It is an explicit builtin variable such as: "v:count", "v:char" # TODO: Add unknown builtin flag return True if is_builtin_function(id_node): return True if id_value in ['key', 'val']: # These builtin variable names are available on only map() or filter(). return is_on_lambda_string_context(id_node) # It is an implicit builtin variable such as: "count", "char" return id_value in BuiltinVariablesCanHaveImplicitScope
python
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin identifier. """ # Builtin variables are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if id_value.startswith('v:'): # It is an explicit builtin variable such as: "v:count", "v:char" # TODO: Add unknown builtin flag return True if is_builtin_function(id_node): return True if id_value in ['key', 'val']: # These builtin variable names are available on only map() or filter(). return is_on_lambda_string_context(id_node) # It is an implicit builtin variable such as: "count", "char" return id_value in BuiltinVariablesCanHaveImplicitScope
[ "def", "is_builtin_variable", "(", "id_node", ")", ":", "# type: (Dict[str, Any]) -> bool", "# Builtin variables are always IDENTIFIER.", "if", "NodeType", "(", "id_node", "[", "'type'", "]", ")", "is", "not", "NodeType", ".", "IDENTIFIER", ":", "return", "False", "id_value", "=", "id_node", "[", "'value'", "]", "if", "id_value", ".", "startswith", "(", "'v:'", ")", ":", "# It is an explicit builtin variable such as: \"v:count\", \"v:char\"", "# TODO: Add unknown builtin flag", "return", "True", "if", "is_builtin_function", "(", "id_node", ")", ":", "return", "True", "if", "id_value", "in", "[", "'key'", ",", "'val'", "]", ":", "# These builtin variable names are available on only map() or filter().", "return", "is_on_lambda_string_context", "(", "id_node", ")", "# It is an implicit builtin variable such as: \"count\", \"char\"", "return", "id_value", "in", "BuiltinVariablesCanHaveImplicitScope" ]
Whether the specified node is a builtin identifier.
[ "Whether", "the", "specified", "node", "is", "a", "builtin", "identifier", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L69-L90
9,862
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_detector.py
is_builtin_function
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL. """ # Builtin functions are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if not is_function_identifier(id_node): return False # There are difference between a function identifier and variable # identifier: # # let localtime = 0 # echo localtime " => 0 # echo localtime() " => 1420011455 return id_value in BuiltinFunctions
python
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL. """ # Builtin functions are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if not is_function_identifier(id_node): return False # There are difference between a function identifier and variable # identifier: # # let localtime = 0 # echo localtime " => 0 # echo localtime() " => 1420011455 return id_value in BuiltinFunctions
[ "def", "is_builtin_function", "(", "id_node", ")", ":", "# type: (Dict[str, Any]) -> bool", "# Builtin functions are always IDENTIFIER.", "if", "NodeType", "(", "id_node", "[", "'type'", "]", ")", "is", "not", "NodeType", ".", "IDENTIFIER", ":", "return", "False", "id_value", "=", "id_node", "[", "'value'", "]", "if", "not", "is_function_identifier", "(", "id_node", ")", ":", "return", "False", "# There are difference between a function identifier and variable", "# identifier:", "#", "# let localtime = 0", "# echo localtime \" => 0", "# echo localtime() \" => 1420011455", "return", "id_value", "in", "BuiltinFunctions" ]
Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL.
[ "Whether", "the", "specified", "node", "is", "a", "builtin", "function", "name", "identifier", ".", "The", "given", "identifier", "should", "be", "a", "child", "node", "of", "NodeType", ".", "CALL", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L93-L112
9,863
Kuniwak/vint
vint/ast/plugin/scope_plugin/identifier_classifier.py
IdentifierClassifier.attach_identifier_attributes
def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any] """ Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument. """ redir_assignment_parser = RedirAssignmentParser() ast_with_parsed_redir = redir_assignment_parser.process(ast) map_and_filter_parser = CallNodeParser() ast_with_parse_map_and_filter_and_redir = \ map_and_filter_parser.process(ast_with_parsed_redir) traverse( ast_with_parse_map_and_filter_and_redir, on_enter=lambda node: self._enter_handler( node, is_on_lambda_str=None, is_on_lambda_body=None, ) ) return ast
python
def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any] """ Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument. """ redir_assignment_parser = RedirAssignmentParser() ast_with_parsed_redir = redir_assignment_parser.process(ast) map_and_filter_parser = CallNodeParser() ast_with_parse_map_and_filter_and_redir = \ map_and_filter_parser.process(ast_with_parsed_redir) traverse( ast_with_parse_map_and_filter_and_redir, on_enter=lambda node: self._enter_handler( node, is_on_lambda_str=None, is_on_lambda_body=None, ) ) return ast
[ "def", "attach_identifier_attributes", "(", "self", ",", "ast", ")", ":", "# type: (Dict[str, Any]) -> Dict[str, Any]", "redir_assignment_parser", "=", "RedirAssignmentParser", "(", ")", "ast_with_parsed_redir", "=", "redir_assignment_parser", ".", "process", "(", "ast", ")", "map_and_filter_parser", "=", "CallNodeParser", "(", ")", "ast_with_parse_map_and_filter_and_redir", "=", "map_and_filter_parser", ".", "process", "(", "ast_with_parsed_redir", ")", "traverse", "(", "ast_with_parse_map_and_filter_and_redir", ",", "on_enter", "=", "lambda", "node", ":", "self", ".", "_enter_handler", "(", "node", ",", "is_on_lambda_str", "=", "None", ",", "is_on_lambda_body", "=", "None", ",", ")", ")", "return", "ast" ]
Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument.
[ "Attach", "5", "flags", "to", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/identifier_classifier.py#L118-L150
9,864
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.create_violation_report
def create_violation_report(self, node, lint_context): """ Returns a violation report for the node. """ return { 'name': self.name, 'level': self.level, 'description': self.description, 'reference': self.reference, 'position': { 'line': node['pos']['lnum'], 'column': node['pos']['col'], 'path': lint_context['lint_target'].path, }, }
python
def create_violation_report(self, node, lint_context): """ Returns a violation report for the node. """ return { 'name': self.name, 'level': self.level, 'description': self.description, 'reference': self.reference, 'position': { 'line': node['pos']['lnum'], 'column': node['pos']['col'], 'path': lint_context['lint_target'].path, }, }
[ "def", "create_violation_report", "(", "self", ",", "node", ",", "lint_context", ")", ":", "return", "{", "'name'", ":", "self", ".", "name", ",", "'level'", ":", "self", ".", "level", ",", "'description'", ":", "self", ".", "description", ",", "'reference'", ":", "self", ".", "reference", ",", "'position'", ":", "{", "'line'", ":", "node", "[", "'pos'", "]", "[", "'lnum'", "]", ",", "'column'", ":", "node", "[", "'pos'", "]", "[", "'col'", "]", ",", "'path'", ":", "lint_context", "[", "'lint_target'", "]", ".", "path", ",", "}", ",", "}" ]
Returns a violation report for the node.
[ "Returns", "a", "violation", "report", "for", "the", "node", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L22-L34
9,865
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.get_policy_config
def get_policy_config(self, lint_context): """ Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil. """ policy_config = lint_context['config']\ .get('policies', {})\ .get(self.__class__.__name__, {}) return policy_config
python
def get_policy_config(self, lint_context): """ Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil. """ policy_config = lint_context['config']\ .get('policies', {})\ .get(self.__class__.__name__, {}) return policy_config
[ "def", "get_policy_config", "(", "self", ",", "lint_context", ")", ":", "policy_config", "=", "lint_context", "[", "'config'", "]", ".", "get", "(", "'policies'", ",", "{", "}", ")", ".", "get", "(", "self", ".", "__class__", ".", "__name__", ",", "{", "}", ")", "return", "policy_config" ]
Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil.
[ "Returns", "a", "config", "of", "the", "concrete", "policy", ".", "For", "example", "a", "config", "of", "ProhibitSomethingEvil", "is", "located", "on", "config", ".", "policies", ".", "ProhibitSomethingEvil", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L37-L46
9,866
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.get_violation_if_found
def get_violation_if_found(self, node, lint_context): """ Returns a violation if the node is invalid. """ if self.is_valid(node, lint_context): return None return self.create_violation_report(node, lint_context)
python
def get_violation_if_found(self, node, lint_context): """ Returns a violation if the node is invalid. """ if self.is_valid(node, lint_context): return None return self.create_violation_report(node, lint_context)
[ "def", "get_violation_if_found", "(", "self", ",", "node", ",", "lint_context", ")", ":", "if", "self", ".", "is_valid", "(", "node", ",", "lint_context", ")", ":", "return", "None", "return", "self", ".", "create_violation_report", "(", "node", ",", "lint_context", ")" ]
Returns a violation if the node is invalid.
[ "Returns", "a", "violation", "if", "the", "node", "is", "invalid", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L49-L54
9,867
Kuniwak/vint
vint/bootstrap.py
import_all_policies
def import_all_policies(): """ Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes """ pkg_name = _get_policy_package_name_for_test() pkg_path_list = pkg_name.split('.') pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve()) for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]): if not is_pkg: module_fqn = pkg_name + '.' + module_name logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn)) importlib.import_module(module_fqn)
python
def import_all_policies(): """ Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes """ pkg_name = _get_policy_package_name_for_test() pkg_path_list = pkg_name.split('.') pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve()) for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]): if not is_pkg: module_fqn = pkg_name + '.' + module_name logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn)) importlib.import_module(module_fqn)
[ "def", "import_all_policies", "(", ")", ":", "pkg_name", "=", "_get_policy_package_name_for_test", "(", ")", "pkg_path_list", "=", "pkg_name", ".", "split", "(", "'.'", ")", "pkg_path", "=", "str", "(", "Path", "(", "_get_vint_root", "(", ")", ",", "*", "pkg_path_list", ")", ".", "resolve", "(", ")", ")", "for", "_", ",", "module_name", ",", "is_pkg", "in", "pkgutil", ".", "iter_modules", "(", "[", "pkg_path", "]", ")", ":", "if", "not", "is_pkg", ":", "module_fqn", "=", "pkg_name", "+", "'.'", "+", "module_name", "logging", ".", "debug", "(", "'Loading the policy module: `{fqn}`'", ".", "format", "(", "fqn", "=", "module_fqn", ")", ")", "importlib", ".", "import_module", "(", "module_fqn", ")" ]
Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes
[ "Import", "all", "policies", "that", "were", "registered", "by", "vint", ".", "linting", ".", "policy_registry", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/bootstrap.py#L24-L41
9,868
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_linker.py
ScopeLinker.process
def process(self, ast): # type: (Dict[str, Any]) -> None """ Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry. """ id_classifier = IdentifierClassifier() attached_ast = id_classifier.attach_identifier_attributes(ast) # We are already in script local scope. self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL) traverse(attached_ast, on_enter=self._enter_handler, on_leave=self._leave_handler) self.scope_tree = self._scope_tree_builder.get_global_scope() self.link_registry = self._scope_tree_builder.link_registry
python
def process(self, ast): # type: (Dict[str, Any]) -> None """ Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry. """ id_classifier = IdentifierClassifier() attached_ast = id_classifier.attach_identifier_attributes(ast) # We are already in script local scope. self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL) traverse(attached_ast, on_enter=self._enter_handler, on_leave=self._leave_handler) self.scope_tree = self._scope_tree_builder.get_global_scope() self.link_registry = self._scope_tree_builder.link_registry
[ "def", "process", "(", "self", ",", "ast", ")", ":", "# type: (Dict[str, Any]) -> None", "id_classifier", "=", "IdentifierClassifier", "(", ")", "attached_ast", "=", "id_classifier", ".", "attach_identifier_attributes", "(", "ast", ")", "# We are already in script local scope.", "self", ".", "_scope_tree_builder", ".", "enter_new_scope", "(", "ScopeVisibility", ".", "SCRIPT_LOCAL", ")", "traverse", "(", "attached_ast", ",", "on_enter", "=", "self", ".", "_enter_handler", ",", "on_leave", "=", "self", ".", "_leave_handler", ")", "self", ".", "scope_tree", "=", "self", ".", "_scope_tree_builder", ".", "get_global_scope", "(", ")", "self", ".", "link_registry", "=", "self", ".", "_scope_tree_builder", ".", "link_registry" ]
Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry.
[ "Build", "a", "scope", "tree", "and", "links", "between", "scopes", "and", "identifiers", "by", "the", "specified", "ast", ".", "You", "can", "access", "the", "built", "scope", "tree", "and", "the", "built", "links", "by", ".", "scope_tree", "and", ".", "link_registry", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_linker.py#L326-L342
9,869
mozilla/mozdownload
mozdownload/cli.py
cli
def cli(argv=None): """CLI entry point for mozdownload.""" kwargs = parse_arguments(argv or sys.argv[1:]) log_level = kwargs.pop('log_level') logging.basicConfig(format='%(levelname)s | %(message)s', level=log_level) logger = logging.getLogger(__name__) # Configure logging levels for sub modules. Set to ERROR by default. sub_log_level = logging.ERROR if log_level == logging.getLevelName(logging.DEBUG): sub_log_level = logging.DEBUG logging.getLogger('redo').setLevel(sub_log_level) logging.getLogger('requests').setLevel(sub_log_level) logging.getLogger('thclient').setLevel(sub_log_level) try: scraper_type = kwargs.pop('scraper_type') # If a URL has been specified use the direct scraper if kwargs.get('url'): scraper_type = 'direct' build = factory.FactoryScraper(scraper_type, **kwargs) if kwargs.get('print_url'): logger.info(build.url) else: build.download() except KeyboardInterrupt: logger.error('Download interrupted by the user')
python
def cli(argv=None): """CLI entry point for mozdownload.""" kwargs = parse_arguments(argv or sys.argv[1:]) log_level = kwargs.pop('log_level') logging.basicConfig(format='%(levelname)s | %(message)s', level=log_level) logger = logging.getLogger(__name__) # Configure logging levels for sub modules. Set to ERROR by default. sub_log_level = logging.ERROR if log_level == logging.getLevelName(logging.DEBUG): sub_log_level = logging.DEBUG logging.getLogger('redo').setLevel(sub_log_level) logging.getLogger('requests').setLevel(sub_log_level) logging.getLogger('thclient').setLevel(sub_log_level) try: scraper_type = kwargs.pop('scraper_type') # If a URL has been specified use the direct scraper if kwargs.get('url'): scraper_type = 'direct' build = factory.FactoryScraper(scraper_type, **kwargs) if kwargs.get('print_url'): logger.info(build.url) else: build.download() except KeyboardInterrupt: logger.error('Download interrupted by the user')
[ "def", "cli", "(", "argv", "=", "None", ")", ":", "kwargs", "=", "parse_arguments", "(", "argv", "or", "sys", ".", "argv", "[", "1", ":", "]", ")", "log_level", "=", "kwargs", ".", "pop", "(", "'log_level'", ")", "logging", ".", "basicConfig", "(", "format", "=", "'%(levelname)s | %(message)s'", ",", "level", "=", "log_level", ")", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# Configure logging levels for sub modules. Set to ERROR by default.", "sub_log_level", "=", "logging", ".", "ERROR", "if", "log_level", "==", "logging", ".", "getLevelName", "(", "logging", ".", "DEBUG", ")", ":", "sub_log_level", "=", "logging", ".", "DEBUG", "logging", ".", "getLogger", "(", "'redo'", ")", ".", "setLevel", "(", "sub_log_level", ")", "logging", ".", "getLogger", "(", "'requests'", ")", ".", "setLevel", "(", "sub_log_level", ")", "logging", ".", "getLogger", "(", "'thclient'", ")", ".", "setLevel", "(", "sub_log_level", ")", "try", ":", "scraper_type", "=", "kwargs", ".", "pop", "(", "'scraper_type'", ")", "# If a URL has been specified use the direct scraper", "if", "kwargs", ".", "get", "(", "'url'", ")", ":", "scraper_type", "=", "'direct'", "build", "=", "factory", ".", "FactoryScraper", "(", "scraper_type", ",", "*", "*", "kwargs", ")", "if", "kwargs", ".", "get", "(", "'print_url'", ")", ":", "logger", ".", "info", "(", "build", ".", "url", ")", "else", ":", "build", ".", "download", "(", ")", "except", "KeyboardInterrupt", ":", "logger", ".", "error", "(", "'Download interrupted by the user'", ")" ]
CLI entry point for mozdownload.
[ "CLI", "entry", "point", "for", "mozdownload", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/cli.py#L146-L175
9,870
mozilla/mozdownload
mozdownload/treeherder.py
Treeherder.query_builds_by_revision
def query_builds_by_revision(self, revision, job_type_name='Build', debug_build=False): """Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build. """ builds = set() try: self.logger.info('Querying {url} for list of builds for revision: {revision}'.format( url=self.client.server_url, revision=revision)) # Retrieve the option hash to filter for type of build (opt, and debug for now) option_hash = None for key, values in self.client.get_option_collection_hash().iteritems(): for value in values: if value['name'] == ('debug' if debug_build else 'opt'): option_hash = key break if option_hash: break resultsets = self.client.get_pushes(self.branch, revision=revision) # Set filters to speed-up querying jobs kwargs = { 'option_collection_hash': option_hash, 'job_type_name': job_type_name, 'exclusion_profile': False, } kwargs.update(self.get_treeherder_platform(self.platform)) for resultset in resultsets: kwargs.update({'result_set_id': resultset['id']}) jobs = self.client.get_jobs(self.branch, **kwargs) for job in jobs: log_urls = self.client.get_job_log_url(self.branch, job_id=job['id']) for log_url in log_urls: if self.application in log_url['url']: self.logger.debug('Found build folder: {}'.format(log_url['url'])) builds.update([log_url['url']]) except Exception: self.logger.exception('Failure occurred when querying Treeherder for builds') return list(builds)
python
def query_builds_by_revision(self, revision, job_type_name='Build', debug_build=False): """Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build. """ builds = set() try: self.logger.info('Querying {url} for list of builds for revision: {revision}'.format( url=self.client.server_url, revision=revision)) # Retrieve the option hash to filter for type of build (opt, and debug for now) option_hash = None for key, values in self.client.get_option_collection_hash().iteritems(): for value in values: if value['name'] == ('debug' if debug_build else 'opt'): option_hash = key break if option_hash: break resultsets = self.client.get_pushes(self.branch, revision=revision) # Set filters to speed-up querying jobs kwargs = { 'option_collection_hash': option_hash, 'job_type_name': job_type_name, 'exclusion_profile': False, } kwargs.update(self.get_treeherder_platform(self.platform)) for resultset in resultsets: kwargs.update({'result_set_id': resultset['id']}) jobs = self.client.get_jobs(self.branch, **kwargs) for job in jobs: log_urls = self.client.get_job_log_url(self.branch, job_id=job['id']) for log_url in log_urls: if self.application in log_url['url']: self.logger.debug('Found build folder: {}'.format(log_url['url'])) builds.update([log_url['url']]) except Exception: self.logger.exception('Failure occurred when querying Treeherder for builds') return list(builds)
[ "def", "query_builds_by_revision", "(", "self", ",", "revision", ",", "job_type_name", "=", "'Build'", ",", "debug_build", "=", "False", ")", ":", "builds", "=", "set", "(", ")", "try", ":", "self", ".", "logger", ".", "info", "(", "'Querying {url} for list of builds for revision: {revision}'", ".", "format", "(", "url", "=", "self", ".", "client", ".", "server_url", ",", "revision", "=", "revision", ")", ")", "# Retrieve the option hash to filter for type of build (opt, and debug for now)", "option_hash", "=", "None", "for", "key", ",", "values", "in", "self", ".", "client", ".", "get_option_collection_hash", "(", ")", ".", "iteritems", "(", ")", ":", "for", "value", "in", "values", ":", "if", "value", "[", "'name'", "]", "==", "(", "'debug'", "if", "debug_build", "else", "'opt'", ")", ":", "option_hash", "=", "key", "break", "if", "option_hash", ":", "break", "resultsets", "=", "self", ".", "client", ".", "get_pushes", "(", "self", ".", "branch", ",", "revision", "=", "revision", ")", "# Set filters to speed-up querying jobs", "kwargs", "=", "{", "'option_collection_hash'", ":", "option_hash", ",", "'job_type_name'", ":", "job_type_name", ",", "'exclusion_profile'", ":", "False", ",", "}", "kwargs", ".", "update", "(", "self", ".", "get_treeherder_platform", "(", "self", ".", "platform", ")", ")", "for", "resultset", "in", "resultsets", ":", "kwargs", ".", "update", "(", "{", "'result_set_id'", ":", "resultset", "[", "'id'", "]", "}", ")", "jobs", "=", "self", ".", "client", ".", "get_jobs", "(", "self", ".", "branch", ",", "*", "*", "kwargs", ")", "for", "job", "in", "jobs", ":", "log_urls", "=", "self", ".", "client", ".", "get_job_log_url", "(", "self", ".", "branch", ",", "job_id", "=", "job", "[", "'id'", "]", ")", "for", "log_url", "in", "log_urls", ":", "if", "self", ".", "application", "in", "log_url", "[", "'url'", "]", ":", "self", ".", "logger", ".", "debug", "(", "'Found build folder: {}'", ".", "format", "(", "log_url", "[", "'url'", "]", ")", ")", "builds", ".", "update", "(", "[", "log_url", "[", "'url'", "]", "]", ")", "except", "Exception", ":", "self", ".", "logger", ".", "exception", "(", "'Failure occurred when querying Treeherder for builds'", ")", "return", "list", "(", "builds", ")" ]
Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build.
[ "Retrieve", "build", "folders", "for", "a", "given", "revision", "with", "the", "help", "of", "Treeherder", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/treeherder.py#L60-L107
9,871
mozilla/mozdownload
mozdownload/utils.py
urljoin
def urljoin(*fragments): """Concatenate multi part strings into urls.""" # Strip possible already existent final slashes of fragments except for the last one parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
python
def urljoin(*fragments): """Concatenate multi part strings into urls.""" # Strip possible already existent final slashes of fragments except for the last one parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
[ "def", "urljoin", "(", "*", "fragments", ")", ":", "# Strip possible already existent final slashes of fragments except for the last one", "parts", "=", "[", "fragment", ".", "rstrip", "(", "'/'", ")", "for", "fragment", "in", "fragments", "[", ":", "len", "(", "fragments", ")", "-", "1", "]", "]", "parts", ".", "append", "(", "fragments", "[", "-", "1", "]", ")", "return", "'/'", ".", "join", "(", "parts", ")" ]
Concatenate multi part strings into urls.
[ "Concatenate", "multi", "part", "strings", "into", "urls", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/utils.py#L12-L18
9,872
mozilla/mozdownload
mozdownload/utils.py
create_md5
def create_md5(path): """Create the md5 hash of a file using the hashlib library.""" m = hashlib.md5() # rb necessary to run correctly in windows. with open(path, "rb") as f: while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest()
python
def create_md5(path): """Create the md5 hash of a file using the hashlib library.""" m = hashlib.md5() # rb necessary to run correctly in windows. with open(path, "rb") as f: while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest()
[ "def", "create_md5", "(", "path", ")", ":", "m", "=", "hashlib", ".", "md5", "(", ")", "# rb necessary to run correctly in windows.", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "while", "True", ":", "data", "=", "f", ".", "read", "(", "8192", ")", "if", "not", "data", ":", "break", "m", ".", "update", "(", "data", ")", "return", "m", ".", "hexdigest", "(", ")" ]
Create the md5 hash of a file using the hashlib library.
[ "Create", "the", "md5", "hash", "of", "a", "file", "using", "the", "hashlib", "library", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/utils.py#L21-L32
9,873
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.filter
def filter(self, filter): """Filter entries by calling function or applying regex.""" if hasattr(filter, '__call__'): return [entry for entry in self.entries if filter(entry)] else: pattern = re.compile(filter, re.IGNORECASE) return [entry for entry in self.entries if pattern.match(entry)]
python
def filter(self, filter): """Filter entries by calling function or applying regex.""" if hasattr(filter, '__call__'): return [entry for entry in self.entries if filter(entry)] else: pattern = re.compile(filter, re.IGNORECASE) return [entry for entry in self.entries if pattern.match(entry)]
[ "def", "filter", "(", "self", ",", "filter", ")", ":", "if", "hasattr", "(", "filter", ",", "'__call__'", ")", ":", "return", "[", "entry", "for", "entry", "in", "self", ".", "entries", "if", "filter", "(", "entry", ")", "]", "else", ":", "pattern", "=", "re", ".", "compile", "(", "filter", ",", "re", ".", "IGNORECASE", ")", "return", "[", "entry", "for", "entry", "in", "self", ".", "entries", "if", "pattern", ".", "match", "(", "entry", ")", "]" ]
Filter entries by calling function or applying regex.
[ "Filter", "entries", "by", "calling", "function", "or", "applying", "regex", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L53-L59
9,874
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.handle_starttag
def handle_starttag(self, tag, attrs): """Callback for when a tag gets opened.""" if not tag == 'a': return for attr in attrs: if attr[0] == 'href': # Links look like: /pub/firefox/nightly/2015/ # We have to trim the fragment down to the last item. Also to ensure we # always get it, we remove a possible final slash first url = urllib.unquote(attr[1]) self.active_url = url.rstrip('/').split('/')[-1] return
python
def handle_starttag(self, tag, attrs): """Callback for when a tag gets opened.""" if not tag == 'a': return for attr in attrs: if attr[0] == 'href': # Links look like: /pub/firefox/nightly/2015/ # We have to trim the fragment down to the last item. Also to ensure we # always get it, we remove a possible final slash first url = urllib.unquote(attr[1]) self.active_url = url.rstrip('/').split('/')[-1] return
[ "def", "handle_starttag", "(", "self", ",", "tag", ",", "attrs", ")", ":", "if", "not", "tag", "==", "'a'", ":", "return", "for", "attr", "in", "attrs", ":", "if", "attr", "[", "0", "]", "==", "'href'", ":", "# Links look like: /pub/firefox/nightly/2015/", "# We have to trim the fragment down to the last item. Also to ensure we", "# always get it, we remove a possible final slash first", "url", "=", "urllib", ".", "unquote", "(", "attr", "[", "1", "]", ")", "self", ".", "active_url", "=", "url", ".", "rstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "return" ]
Callback for when a tag gets opened.
[ "Callback", "for", "when", "a", "tag", "gets", "opened", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L61-L74
9,875
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.handle_data
def handle_data(self, data): """Callback when the data of a tag has been collected.""" # Only process the data when we are in an active a tag and have an URL. if not self.active_url: return # The visible text can have a final slash so strip it off if data.strip('/') == self.active_url: self.entries.append(self.active_url)
python
def handle_data(self, data): """Callback when the data of a tag has been collected.""" # Only process the data when we are in an active a tag and have an URL. if not self.active_url: return # The visible text can have a final slash so strip it off if data.strip('/') == self.active_url: self.entries.append(self.active_url)
[ "def", "handle_data", "(", "self", ",", "data", ")", ":", "# Only process the data when we are in an active a tag and have an URL.", "if", "not", "self", ".", "active_url", ":", "return", "# The visible text can have a final slash so strip it off", "if", "data", ".", "strip", "(", "'/'", ")", "==", "self", ".", "active_url", ":", "self", ".", "entries", ".", "append", "(", "self", ".", "active_url", ")" ]
Callback when the data of a tag has been collected.
[ "Callback", "when", "the", "data", "of", "a", "tag", "has", "been", "collected", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L81-L89
9,876
mozilla/mozdownload
mozdownload/timezones.py
PacificTimezone.dst
def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
python
def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
[ "def", "dst", "(", "self", ",", "dt", ")", ":", "# Daylight saving starts on the second Sunday of March at 2AM standard", "dst_start_date", "=", "self", ".", "first_sunday", "(", "dt", ".", "year", ",", "3", ")", "+", "timedelta", "(", "days", "=", "7", ")", "+", "timedelta", "(", "hours", "=", "2", ")", "# Daylight saving ends on the first Sunday of November at 2AM standard", "dst_end_date", "=", "self", ".", "first_sunday", "(", "dt", ".", "year", ",", "11", ")", "+", "timedelta", "(", "hours", "=", "2", ")", "if", "dst_start_date", "<=", "dt", ".", "replace", "(", "tzinfo", "=", "None", ")", "<", "dst_end_date", ":", "return", "timedelta", "(", "hours", "=", "1", ")", "else", ":", "return", "timedelta", "(", "0", ")" ]
Calculate delta for daylight saving.
[ "Calculate", "delta", "for", "daylight", "saving", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L23-L34
9,877
mozilla/mozdownload
mozdownload/timezones.py
PacificTimezone.first_sunday
def first_sunday(self, year, month): """Get the first sunday of a month.""" date = datetime(year, month, 1, 0) days_until_sunday = 6 - date.weekday() return date + timedelta(days=days_until_sunday)
python
def first_sunday(self, year, month): """Get the first sunday of a month.""" date = datetime(year, month, 1, 0) days_until_sunday = 6 - date.weekday() return date + timedelta(days=days_until_sunday)
[ "def", "first_sunday", "(", "self", ",", "year", ",", "month", ")", ":", "date", "=", "datetime", "(", "year", ",", "month", ",", "1", ",", "0", ")", "days_until_sunday", "=", "6", "-", "date", ".", "weekday", "(", ")", "return", "date", "+", "timedelta", "(", "days", "=", "days_until_sunday", ")" ]
Get the first sunday of a month.
[ "Get", "the", "first", "sunday", "of", "a", "month", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L36-L41
9,878
mozilla/mozdownload
mozdownload/scraper.py
Scraper.binary
def binary(self): """Return the name of the build.""" def _get_binary(): # Retrieve all entries from the remote virtual folder parser = self._create_directory_parser(self.path) if not parser.entries: raise errors.NotFoundError('No entries found', self.path) # Download the first matched directory entry pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: self._binary = pattern.match(entry).group() break except Exception: # No match, continue with next entry continue else: raise errors.NotFoundError("Binary not found in folder", self.path) self._retry_check_404(_get_binary) return self._binary
python
def binary(self): """Return the name of the build.""" def _get_binary(): # Retrieve all entries from the remote virtual folder parser = self._create_directory_parser(self.path) if not parser.entries: raise errors.NotFoundError('No entries found', self.path) # Download the first matched directory entry pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: self._binary = pattern.match(entry).group() break except Exception: # No match, continue with next entry continue else: raise errors.NotFoundError("Binary not found in folder", self.path) self._retry_check_404(_get_binary) return self._binary
[ "def", "binary", "(", "self", ")", ":", "def", "_get_binary", "(", ")", ":", "# Retrieve all entries from the remote virtual folder", "parser", "=", "self", ".", "_create_directory_parser", "(", "self", ".", "path", ")", "if", "not", "parser", ".", "entries", ":", "raise", "errors", ".", "NotFoundError", "(", "'No entries found'", ",", "self", ".", "path", ")", "# Download the first matched directory entry", "pattern", "=", "re", ".", "compile", "(", "self", ".", "binary_regex", ",", "re", ".", "IGNORECASE", ")", "for", "entry", "in", "parser", ".", "entries", ":", "try", ":", "self", ".", "_binary", "=", "pattern", ".", "match", "(", "entry", ")", ".", "group", "(", ")", "break", "except", "Exception", ":", "# No match, continue with next entry", "continue", "else", ":", "raise", "errors", ".", "NotFoundError", "(", "\"Binary not found in folder\"", ",", "self", ".", "path", ")", "self", ".", "_retry_check_404", "(", "_get_binary", ")", "return", "self", ".", "_binary" ]
Return the name of the build.
[ "Return", "the", "name", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L165-L188
9,879
mozilla/mozdownload
mozdownload/scraper.py
Scraper.url
def url(self): """Return the URL of the build.""" return urllib.quote(urljoin(self.path, self.binary), safe='%/:=&?~#+!$,;\'@()*[]|')
python
def url(self): """Return the URL of the build.""" return urllib.quote(urljoin(self.path, self.binary), safe='%/:=&?~#+!$,;\'@()*[]|')
[ "def", "url", "(", "self", ")", ":", "return", "urllib", ".", "quote", "(", "urljoin", "(", "self", ".", "path", ",", "self", ".", "binary", ")", ",", "safe", "=", "'%/:=&?~#+!$,;\\'@()*[]|'", ")" ]
Return the URL of the build.
[ "Return", "the", "URL", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L196-L199
9,880
mozilla/mozdownload
mozdownload/scraper.py
Scraper.filename
def filename(self): """Return the local filename of the build.""" if self._filename is None: if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise create it from the build details target_file = os.path.join(self.destination, self.build_filename(self.binary)) self._filename = os.path.abspath(target_file) return self._filename
python
def filename(self): """Return the local filename of the build.""" if self._filename is None: if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise create it from the build details target_file = os.path.join(self.destination, self.build_filename(self.binary)) self._filename = os.path.abspath(target_file) return self._filename
[ "def", "filename", "(", "self", ")", ":", "if", "self", ".", "_filename", "is", "None", ":", "if", "os", ".", "path", ".", "splitext", "(", "self", ".", "destination", ")", "[", "1", "]", ":", "# If the filename has been given make use of it", "target_file", "=", "self", ".", "destination", "else", ":", "# Otherwise create it from the build details", "target_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "destination", ",", "self", ".", "build_filename", "(", "self", ".", "binary", ")", ")", "self", ".", "_filename", "=", "os", ".", "path", ".", "abspath", "(", "target_file", ")", "return", "self", ".", "_filename" ]
Return the local filename of the build.
[ "Return", "the", "local", "filename", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L217-L230
9,881
mozilla/mozdownload
mozdownload/scraper.py
Scraper.download
def download(self): """Download the specified file.""" def total_seconds(td): # Keep backward compatibility with Python 2.6 which doesn't have # this method if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 # Don't re-download the file if os.path.isfile(os.path.abspath(self.filename)): self.logger.info("File has already been downloaded: %s" % (self.filename)) return self.filename directory = os.path.dirname(self.filename) if not os.path.isdir(directory): os.makedirs(directory) self.logger.info('Downloading from: %s' % self.url) self.logger.info('Saving as: %s' % self.filename) tmp_file = self.filename + ".part" def _download(): try: start_time = datetime.now() # Enable streaming mode so we can download content in chunks r = self.session.get(self.url, stream=True) r.raise_for_status() content_length = r.headers.get('Content-length') # ValueError: Value out of range if only total_size given if content_length: total_size = int(content_length.strip()) max_value = ((total_size / CHUNK_SIZE) + 1) * CHUNK_SIZE bytes_downloaded = 0 log_level = self.logger.getEffectiveLevel() if log_level <= logging.INFO and content_length: widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.FileTransferSpeed()] pbar = pb.ProgressBar(widgets=widgets, maxval=max_value).start() with open(tmp_file, 'wb') as f: for chunk in r.iter_content(CHUNK_SIZE): f.write(chunk) bytes_downloaded += CHUNK_SIZE if log_level <= logging.INFO and content_length: pbar.update(bytes_downloaded) t1 = total_seconds(datetime.now() - start_time) if self.timeout_download and \ t1 >= self.timeout_download: raise errors.TimeoutError if log_level <= logging.INFO and content_length: pbar.finish() except Exception: if os.path.isfile(tmp_file): os.remove(tmp_file) raise self._retry(_download, retry_exceptions=(requests.exceptions.RequestException, errors.TimeoutError)) os.rename(tmp_file, self.filename) return self.filename
python
def download(self): """Download the specified file.""" def total_seconds(td): # Keep backward compatibility with Python 2.6 which doesn't have # this method if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 # Don't re-download the file if os.path.isfile(os.path.abspath(self.filename)): self.logger.info("File has already been downloaded: %s" % (self.filename)) return self.filename directory = os.path.dirname(self.filename) if not os.path.isdir(directory): os.makedirs(directory) self.logger.info('Downloading from: %s' % self.url) self.logger.info('Saving as: %s' % self.filename) tmp_file = self.filename + ".part" def _download(): try: start_time = datetime.now() # Enable streaming mode so we can download content in chunks r = self.session.get(self.url, stream=True) r.raise_for_status() content_length = r.headers.get('Content-length') # ValueError: Value out of range if only total_size given if content_length: total_size = int(content_length.strip()) max_value = ((total_size / CHUNK_SIZE) + 1) * CHUNK_SIZE bytes_downloaded = 0 log_level = self.logger.getEffectiveLevel() if log_level <= logging.INFO and content_length: widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.FileTransferSpeed()] pbar = pb.ProgressBar(widgets=widgets, maxval=max_value).start() with open(tmp_file, 'wb') as f: for chunk in r.iter_content(CHUNK_SIZE): f.write(chunk) bytes_downloaded += CHUNK_SIZE if log_level <= logging.INFO and content_length: pbar.update(bytes_downloaded) t1 = total_seconds(datetime.now() - start_time) if self.timeout_download and \ t1 >= self.timeout_download: raise errors.TimeoutError if log_level <= logging.INFO and content_length: pbar.finish() except Exception: if os.path.isfile(tmp_file): os.remove(tmp_file) raise self._retry(_download, retry_exceptions=(requests.exceptions.RequestException, errors.TimeoutError)) os.rename(tmp_file, self.filename) return self.filename
[ "def", "download", "(", "self", ")", ":", "def", "total_seconds", "(", "td", ")", ":", "# Keep backward compatibility with Python 2.6 which doesn't have", "# this method", "if", "hasattr", "(", "td", ",", "'total_seconds'", ")", ":", "return", "td", ".", "total_seconds", "(", ")", "else", ":", "return", "(", "td", ".", "microseconds", "+", "(", "td", ".", "seconds", "+", "td", ".", "days", "*", "24", "*", "3600", ")", "*", "10", "**", "6", ")", "/", "10", "**", "6", "# Don't re-download the file", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "abspath", "(", "self", ".", "filename", ")", ")", ":", "self", ".", "logger", ".", "info", "(", "\"File has already been downloaded: %s\"", "%", "(", "self", ".", "filename", ")", ")", "return", "self", ".", "filename", "directory", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "self", ".", "logger", ".", "info", "(", "'Downloading from: %s'", "%", "self", ".", "url", ")", "self", ".", "logger", ".", "info", "(", "'Saving as: %s'", "%", "self", ".", "filename", ")", "tmp_file", "=", "self", ".", "filename", "+", "\".part\"", "def", "_download", "(", ")", ":", "try", ":", "start_time", "=", "datetime", ".", "now", "(", ")", "# Enable streaming mode so we can download content in chunks", "r", "=", "self", ".", "session", ".", "get", "(", "self", ".", "url", ",", "stream", "=", "True", ")", "r", ".", "raise_for_status", "(", ")", "content_length", "=", "r", ".", "headers", ".", "get", "(", "'Content-length'", ")", "# ValueError: Value out of range if only total_size given", "if", "content_length", ":", "total_size", "=", "int", "(", "content_length", ".", "strip", "(", ")", ")", "max_value", "=", "(", "(", "total_size", "/", "CHUNK_SIZE", ")", "+", "1", ")", "*", "CHUNK_SIZE", "bytes_downloaded", "=", "0", "log_level", "=", "self", ".", "logger", ".", "getEffectiveLevel", "(", ")", "if", "log_level", "<=", "logging", ".", "INFO", "and", "content_length", ":", "widgets", "=", "[", "pb", ".", "Percentage", "(", ")", ",", "' '", ",", "pb", ".", "Bar", "(", ")", ",", "' '", ",", "pb", ".", "ETA", "(", ")", ",", "' '", ",", "pb", ".", "FileTransferSpeed", "(", ")", "]", "pbar", "=", "pb", ".", "ProgressBar", "(", "widgets", "=", "widgets", ",", "maxval", "=", "max_value", ")", ".", "start", "(", ")", "with", "open", "(", "tmp_file", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "r", ".", "iter_content", "(", "CHUNK_SIZE", ")", ":", "f", ".", "write", "(", "chunk", ")", "bytes_downloaded", "+=", "CHUNK_SIZE", "if", "log_level", "<=", "logging", ".", "INFO", "and", "content_length", ":", "pbar", ".", "update", "(", "bytes_downloaded", ")", "t1", "=", "total_seconds", "(", "datetime", ".", "now", "(", ")", "-", "start_time", ")", "if", "self", ".", "timeout_download", "and", "t1", ">=", "self", ".", "timeout_download", ":", "raise", "errors", ".", "TimeoutError", "if", "log_level", "<=", "logging", ".", "INFO", "and", "content_length", ":", "pbar", ".", "finish", "(", ")", "except", "Exception", ":", "if", "os", ".", "path", ".", "isfile", "(", "tmp_file", ")", ":", "os", ".", "remove", "(", "tmp_file", ")", "raise", "self", ".", "_retry", "(", "_download", ",", "retry_exceptions", "=", "(", "requests", ".", "exceptions", ".", "RequestException", ",", "errors", ".", "TimeoutError", ")", ")", "os", ".", "rename", "(", "tmp_file", ",", "self", ".", "filename", ")", "return", "self", ".", "filename" ]
Download the specified file.
[ "Download", "the", "specified", "file", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L249-L324
9,882
mozilla/mozdownload
mozdownload/scraper.py
Scraper.show_matching_builds
def show_matching_builds(self, builds): """Output the matching builds.""" self.logger.info('Found %s build%s: %s' % ( len(builds), len(builds) > 1 and 's' or '', len(builds) > 10 and ' ... '.join([', '.join(builds[:5]), ', '.join(builds[-5:])]) or ', '.join(builds)))
python
def show_matching_builds(self, builds): """Output the matching builds.""" self.logger.info('Found %s build%s: %s' % ( len(builds), len(builds) > 1 and 's' or '', len(builds) > 10 and ' ... '.join([', '.join(builds[:5]), ', '.join(builds[-5:])]) or ', '.join(builds)))
[ "def", "show_matching_builds", "(", "self", ",", "builds", ")", ":", "self", ".", "logger", ".", "info", "(", "'Found %s build%s: %s'", "%", "(", "len", "(", "builds", ")", ",", "len", "(", "builds", ")", ">", "1", "and", "'s'", "or", "''", ",", "len", "(", "builds", ")", ">", "10", "and", "' ... '", ".", "join", "(", "[", "', '", ".", "join", "(", "builds", "[", ":", "5", "]", ")", ",", "', '", ".", "join", "(", "builds", "[", "-", "5", ":", "]", ")", "]", ")", "or", "', '", ".", "join", "(", "builds", ")", ")", ")" ]
Output the matching builds.
[ "Output", "the", "matching", "builds", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L326-L333
9,883
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.is_build_dir
def is_build_dir(self, folder_name): """Return whether or not the given dir contains a build.""" # Cannot move up to base scraper due to parser.entries call in # get_build_info_for_date (see below) url = '%s/' % urljoin(self.base_url, self.monthly_build_list_regex, folder_name) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': url = '%s/' % urljoin(url, self.locale) parser = self._create_directory_parser(url) pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: pattern.match(entry).group() return True except Exception: # No match, continue with next entry continue return False
python
def is_build_dir(self, folder_name): """Return whether or not the given dir contains a build.""" # Cannot move up to base scraper due to parser.entries call in # get_build_info_for_date (see below) url = '%s/' % urljoin(self.base_url, self.monthly_build_list_regex, folder_name) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': url = '%s/' % urljoin(url, self.locale) parser = self._create_directory_parser(url) pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: pattern.match(entry).group() return True except Exception: # No match, continue with next entry continue return False
[ "def", "is_build_dir", "(", "self", ",", "folder_name", ")", ":", "# Cannot move up to base scraper due to parser.entries call in", "# get_build_info_for_date (see below)", "url", "=", "'%s/'", "%", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "monthly_build_list_regex", ",", "folder_name", ")", "if", "self", ".", "application", "in", "APPLICATIONS_MULTI_LOCALE", "and", "self", ".", "locale", "!=", "'multi'", ":", "url", "=", "'%s/'", "%", "urljoin", "(", "url", ",", "self", ".", "locale", ")", "parser", "=", "self", ".", "_create_directory_parser", "(", "url", ")", "pattern", "=", "re", ".", "compile", "(", "self", ".", "binary_regex", ",", "re", ".", "IGNORECASE", ")", "for", "entry", "in", "parser", ".", "entries", ":", "try", ":", "pattern", ".", "match", "(", "entry", ")", ".", "group", "(", ")", "return", "True", "except", "Exception", ":", "# No match, continue with next entry", "continue", "return", "False" ]
Return whether or not the given dir contains a build.
[ "Return", "whether", "or", "not", "the", "given", "dir", "contains", "a", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L435-L455
9,884
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.get_build_info_for_date
def get_build_info_for_date(self, date, build_index=None): """Return the build information for a given date.""" url = urljoin(self.base_url, self.monthly_build_list_regex) has_time = date and date.time() self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE': date.strftime('%Y-%m-%d'), 'BRANCH': self.branch, # ensure to select the correct subfolder for localized builds 'L10N': '(-l10n)?' if self.locale_build else '', 'PLATFORM': '' if self.application not in ( 'fennec') else '-' + self.platform } parser.entries = parser.filter(regex) parser.entries = parser.filter(self.is_build_dir) if has_time: # If a time is included in the date, use it to determine the # build's index regex = r'.*%s.*' % date.strftime('%H-%M-%S') parser.entries = parser.filter(regex) if not parser.entries: date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % \ self.date.strftime(date_format) raise errors.NotFoundError(message, url) # If no index has been given, set it to the last build of the day. self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
python
def get_build_info_for_date(self, date, build_index=None): """Return the build information for a given date.""" url = urljoin(self.base_url, self.monthly_build_list_regex) has_time = date and date.time() self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE': date.strftime('%Y-%m-%d'), 'BRANCH': self.branch, # ensure to select the correct subfolder for localized builds 'L10N': '(-l10n)?' if self.locale_build else '', 'PLATFORM': '' if self.application not in ( 'fennec') else '-' + self.platform } parser.entries = parser.filter(regex) parser.entries = parser.filter(self.is_build_dir) if has_time: # If a time is included in the date, use it to determine the # build's index regex = r'.*%s.*' % date.strftime('%H-%M-%S') parser.entries = parser.filter(regex) if not parser.entries: date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % \ self.date.strftime(date_format) raise errors.NotFoundError(message, url) # If no index has been given, set it to the last build of the day. self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
[ "def", "get_build_info_for_date", "(", "self", ",", "date", ",", "build_index", "=", "None", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "monthly_build_list_regex", ")", "has_time", "=", "date", "and", "date", ".", "time", "(", ")", "self", ".", "logger", ".", "info", "(", "'Retrieving list of builds from %s'", "%", "url", ")", "parser", "=", "self", ".", "_create_directory_parser", "(", "url", ")", "regex", "=", "r'%(DATE)s-(\\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$'", "%", "{", "'DATE'", ":", "date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "'BRANCH'", ":", "self", ".", "branch", ",", "# ensure to select the correct subfolder for localized builds", "'L10N'", ":", "'(-l10n)?'", "if", "self", ".", "locale_build", "else", "''", ",", "'PLATFORM'", ":", "''", "if", "self", ".", "application", "not", "in", "(", "'fennec'", ")", "else", "'-'", "+", "self", ".", "platform", "}", "parser", ".", "entries", "=", "parser", ".", "filter", "(", "regex", ")", "parser", ".", "entries", "=", "parser", ".", "filter", "(", "self", ".", "is_build_dir", ")", "if", "has_time", ":", "# If a time is included in the date, use it to determine the", "# build's index", "regex", "=", "r'.*%s.*'", "%", "date", ".", "strftime", "(", "'%H-%M-%S'", ")", "parser", ".", "entries", "=", "parser", ".", "filter", "(", "regex", ")", "if", "not", "parser", ".", "entries", ":", "date_format", "=", "'%Y-%m-%d-%H-%M-%S'", "if", "has_time", "else", "'%Y-%m-%d'", "message", "=", "'Folder for builds on %s has not been found'", "%", "self", ".", "date", ".", "strftime", "(", "date_format", ")", "raise", "errors", ".", "NotFoundError", "(", "message", ",", "url", ")", "# If no index has been given, set it to the last build of the day.", "self", ".", "show_matching_builds", "(", "parser", ".", "entries", ")", "# If no index has been given, set it to the last build of the day.", "if", "build_index", "is", "None", ":", "# Find the most recent non-empty entry.", "build_index", "=", "len", "(", "parser", ".", "entries", ")", "for", "build", "in", "reversed", "(", "parser", ".", "entries", ")", ":", "build_index", "-=", "1", "if", "not", "build_index", "or", "self", ".", "is_build_dir", "(", "build", ")", ":", "break", "self", ".", "logger", ".", "info", "(", "'Selected build: %s'", "%", "parser", ".", "entries", "[", "build_index", "]", ")", "return", "(", "parser", ".", "entries", ",", "build_index", ")" ]
Return the build information for a given date.
[ "Return", "the", "build", "information", "for", "a", "given", "date", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L457-L500
9,885
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.monthly_build_list_regex
def monthly_build_list_regex(self): """Return the regex for the folder containing builds of a month.""" # Regex for possible builds for the given date return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR': self.date.year, 'MONTH': str(self.date.month).zfill(2)}
python
def monthly_build_list_regex(self): """Return the regex for the folder containing builds of a month.""" # Regex for possible builds for the given date return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR': self.date.year, 'MONTH': str(self.date.month).zfill(2)}
[ "def", "monthly_build_list_regex", "(", "self", ")", ":", "# Regex for possible builds for the given date", "return", "r'nightly/%(YEAR)s/%(MONTH)s/'", "%", "{", "'YEAR'", ":", "self", ".", "date", ".", "year", ",", "'MONTH'", ":", "str", "(", "self", ".", "date", ".", "month", ")", ".", "zfill", "(", "2", ")", "}" ]
Return the regex for the folder containing builds of a month.
[ "Return", "the", "regex", "for", "the", "folder", "containing", "builds", "of", "a", "month", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L544-L549
9,886
mozilla/mozdownload
mozdownload/scraper.py
DirectScraper.filename
def filename(self): """File name of the downloaded file.""" if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise determine it from the url. parsed_url = urlparse(self.url) source_filename = (parsed_url.path.rpartition('/')[-1] or parsed_url.hostname) target_file = os.path.join(self.destination, source_filename) return os.path.abspath(target_file)
python
def filename(self): """File name of the downloaded file.""" if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise determine it from the url. parsed_url = urlparse(self.url) source_filename = (parsed_url.path.rpartition('/')[-1] or parsed_url.hostname) target_file = os.path.join(self.destination, source_filename) return os.path.abspath(target_file)
[ "def", "filename", "(", "self", ")", ":", "if", "os", ".", "path", ".", "splitext", "(", "self", ".", "destination", ")", "[", "1", "]", ":", "# If the filename has been given make use of it", "target_file", "=", "self", ".", "destination", "else", ":", "# Otherwise determine it from the url.", "parsed_url", "=", "urlparse", "(", "self", ".", "url", ")", "source_filename", "=", "(", "parsed_url", ".", "path", ".", "rpartition", "(", "'/'", ")", "[", "-", "1", "]", "or", "parsed_url", ".", "hostname", ")", "target_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "destination", ",", "source_filename", ")", "return", "os", ".", "path", ".", "abspath", "(", "target_file", ")" ]
File name of the downloaded file.
[ "File", "name", "of", "the", "downloaded", "file", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L577-L589
9,887
mozilla/mozdownload
mozdownload/scraper.py
ReleaseScraper.query_versions
def query_versions(self, version=None): """Check specified version and resolve special values.""" if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS: return [version] url = urljoin(self.base_url, 'releases/') parser = self._create_directory_parser(url) if version: versions = parser.filter(RELEASE_AND_CANDIDATE_LATEST_VERSIONS[version]) from distutils.version import LooseVersion versions.sort(key=LooseVersion) return [versions[-1]] else: return parser.entries
python
def query_versions(self, version=None): """Check specified version and resolve special values.""" if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS: return [version] url = urljoin(self.base_url, 'releases/') parser = self._create_directory_parser(url) if version: versions = parser.filter(RELEASE_AND_CANDIDATE_LATEST_VERSIONS[version]) from distutils.version import LooseVersion versions.sort(key=LooseVersion) return [versions[-1]] else: return parser.entries
[ "def", "query_versions", "(", "self", ",", "version", "=", "None", ")", ":", "if", "version", "not", "in", "RELEASE_AND_CANDIDATE_LATEST_VERSIONS", ":", "return", "[", "version", "]", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'releases/'", ")", "parser", "=", "self", ".", "_create_directory_parser", "(", "url", ")", "if", "version", ":", "versions", "=", "parser", ".", "filter", "(", "RELEASE_AND_CANDIDATE_LATEST_VERSIONS", "[", "version", "]", ")", "from", "distutils", ".", "version", "import", "LooseVersion", "versions", ".", "sort", "(", "key", "=", "LooseVersion", ")", "return", "[", "versions", "[", "-", "1", "]", "]", "else", ":", "return", "parser", ".", "entries" ]
Check specified version and resolve special values.
[ "Check", "specified", "version", "and", "resolve", "special", "values", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L657-L670
9,888
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.build_list_regex
def build_list_regex(self): """Return the regex for the folder which contains the list of builds.""" regex = 'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/' return regex % { 'BRANCH': self.branch, 'PLATFORM': '' if self.locale_build else self.platform_regex, 'L10N': 'l10n' if self.locale_build else '', 'DEBUG': '-debug' if self.debug_build else ''}
python
def build_list_regex(self): """Return the regex for the folder which contains the list of builds.""" regex = 'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/' return regex % { 'BRANCH': self.branch, 'PLATFORM': '' if self.locale_build else self.platform_regex, 'L10N': 'l10n' if self.locale_build else '', 'DEBUG': '-debug' if self.debug_build else ''}
[ "def", "build_list_regex", "(", "self", ")", ":", "regex", "=", "'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/'", "return", "regex", "%", "{", "'BRANCH'", ":", "self", ".", "branch", ",", "'PLATFORM'", ":", "''", "if", "self", ".", "locale_build", "else", "self", ".", "platform_regex", ",", "'L10N'", ":", "'l10n'", "if", "self", ".", "locale_build", "else", "''", ",", "'DEBUG'", ":", "'-debug'", "if", "self", ".", "debug_build", "else", "''", "}" ]
Return the regex for the folder which contains the list of builds.
[ "Return", "the", "regex", "for", "the", "folder", "which", "contains", "the", "list", "of", "builds", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L845-L853
9,889
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.date_matches
def date_matches(self, timestamp): """Determine whether the timestamp date is equal to the argument date.""" if self.date is None: return False timestamp = datetime.fromtimestamp(float(timestamp), self.timezone) if self.date.date() == timestamp.date(): return True return False
python
def date_matches(self, timestamp): """Determine whether the timestamp date is equal to the argument date.""" if self.date is None: return False timestamp = datetime.fromtimestamp(float(timestamp), self.timezone) if self.date.date() == timestamp.date(): return True return False
[ "def", "date_matches", "(", "self", ",", "timestamp", ")", ":", "if", "self", ".", "date", "is", "None", ":", "return", "False", "timestamp", "=", "datetime", ".", "fromtimestamp", "(", "float", "(", "timestamp", ")", ",", "self", ".", "timezone", ")", "if", "self", ".", "date", ".", "date", "(", ")", "==", "timestamp", ".", "date", "(", ")", ":", "return", "True", "return", "False" ]
Determine whether the timestamp date is equal to the argument date.
[ "Determine", "whether", "the", "timestamp", "date", "is", "equal", "to", "the", "argument", "date", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L855-L864
9,890
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.get_build_info_for_index
def get_build_info_for_index(self, build_index=None): """Get additional information for the build at the given index.""" url = urljoin(self.base_url, self.build_list_regex) self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) parser.entries = parser.filter(r'^\d+$') if self.timestamp: # If a timestamp is given, retrieve the folder with the timestamp # as name parser.entries = self.timestamp in parser.entries and \ [self.timestamp] elif self.date: # If date is given, retrieve the subset of builds on that date parser.entries = filter(self.date_matches, parser.entries) if not parser.entries: message = 'No builds have been found' raise errors.NotFoundError(message, url) self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
python
def get_build_info_for_index(self, build_index=None): """Get additional information for the build at the given index.""" url = urljoin(self.base_url, self.build_list_regex) self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) parser.entries = parser.filter(r'^\d+$') if self.timestamp: # If a timestamp is given, retrieve the folder with the timestamp # as name parser.entries = self.timestamp in parser.entries and \ [self.timestamp] elif self.date: # If date is given, retrieve the subset of builds on that date parser.entries = filter(self.date_matches, parser.entries) if not parser.entries: message = 'No builds have been found' raise errors.NotFoundError(message, url) self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
[ "def", "get_build_info_for_index", "(", "self", ",", "build_index", "=", "None", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "build_list_regex", ")", "self", ".", "logger", ".", "info", "(", "'Retrieving list of builds from %s'", "%", "url", ")", "parser", "=", "self", ".", "_create_directory_parser", "(", "url", ")", "parser", ".", "entries", "=", "parser", ".", "filter", "(", "r'^\\d+$'", ")", "if", "self", ".", "timestamp", ":", "# If a timestamp is given, retrieve the folder with the timestamp", "# as name", "parser", ".", "entries", "=", "self", ".", "timestamp", "in", "parser", ".", "entries", "and", "[", "self", ".", "timestamp", "]", "elif", "self", ".", "date", ":", "# If date is given, retrieve the subset of builds on that date", "parser", ".", "entries", "=", "filter", "(", "self", ".", "date_matches", ",", "parser", ".", "entries", ")", "if", "not", "parser", ".", "entries", ":", "message", "=", "'No builds have been found'", "raise", "errors", ".", "NotFoundError", "(", "message", ",", "url", ")", "self", ".", "show_matching_builds", "(", "parser", ".", "entries", ")", "# If no index has been given, set it to the last build of the day.", "if", "build_index", "is", "None", ":", "# Find the most recent non-empty entry.", "build_index", "=", "len", "(", "parser", ".", "entries", ")", "for", "build", "in", "reversed", "(", "parser", ".", "entries", ")", ":", "build_index", "-=", "1", "if", "not", "build_index", "or", "self", ".", "is_build_dir", "(", "build", ")", ":", "break", "self", ".", "logger", ".", "info", "(", "'Selected build: %s'", "%", "parser", ".", "entries", "[", "build_index", "]", ")", "return", "(", "parser", ".", "entries", ",", "build_index", ")" ]
Get additional information for the build at the given index.
[ "Get", "additional", "information", "for", "the", "build", "at", "the", "given", "index", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L900-L935
9,891
romanz/trezor-agent
libagent/device/ui.py
create_default_options_getter
def create_default_options_getter(): """Return current TTY and DISPLAY settings for GnuPG pinentry.""" options = [] try: ttyname = subprocess.check_output(args=['tty']).strip() options.append(b'ttyname=' + ttyname) except subprocess.CalledProcessError as e: log.warning('no TTY found: %s', e) display = os.environ.get('DISPLAY') if display is not None: options.append('display={}'.format(display).encode('ascii')) else: log.warning('DISPLAY not defined') log.info('using %s for pinentry options', options) return lambda: options
python
def create_default_options_getter(): """Return current TTY and DISPLAY settings for GnuPG pinentry.""" options = [] try: ttyname = subprocess.check_output(args=['tty']).strip() options.append(b'ttyname=' + ttyname) except subprocess.CalledProcessError as e: log.warning('no TTY found: %s', e) display = os.environ.get('DISPLAY') if display is not None: options.append('display={}'.format(display).encode('ascii')) else: log.warning('DISPLAY not defined') log.info('using %s for pinentry options', options) return lambda: options
[ "def", "create_default_options_getter", "(", ")", ":", "options", "=", "[", "]", "try", ":", "ttyname", "=", "subprocess", ".", "check_output", "(", "args", "=", "[", "'tty'", "]", ")", ".", "strip", "(", ")", "options", ".", "append", "(", "b'ttyname='", "+", "ttyname", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "log", ".", "warning", "(", "'no TTY found: %s'", ",", "e", ")", "display", "=", "os", ".", "environ", ".", "get", "(", "'DISPLAY'", ")", "if", "display", "is", "not", "None", ":", "options", ".", "append", "(", "'display={}'", ".", "format", "(", "display", ")", ".", "encode", "(", "'ascii'", ")", ")", "else", ":", "log", ".", "warning", "(", "'DISPLAY not defined'", ")", "log", ".", "info", "(", "'using %s for pinentry options'", ",", "options", ")", "return", "lambda", ":", "options" ]
Return current TTY and DISPLAY settings for GnuPG pinentry.
[ "Return", "current", "TTY", "and", "DISPLAY", "settings", "for", "GnuPG", "pinentry", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L64-L80
9,892
romanz/trezor-agent
libagent/device/ui.py
write
def write(p, line): """Send and flush a single line to the subprocess' stdin.""" log.debug('%s <- %r', p.args, line) p.stdin.write(line) p.stdin.flush()
python
def write(p, line): """Send and flush a single line to the subprocess' stdin.""" log.debug('%s <- %r', p.args, line) p.stdin.write(line) p.stdin.flush()
[ "def", "write", "(", "p", ",", "line", ")", ":", "log", ".", "debug", "(", "'%s <- %r'", ",", "p", ".", "args", ",", "line", ")", "p", ".", "stdin", ".", "write", "(", "line", ")", "p", ".", "stdin", ".", "flush", "(", ")" ]
Send and flush a single line to the subprocess' stdin.
[ "Send", "and", "flush", "a", "single", "line", "to", "the", "subprocess", "stdin", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L83-L87
9,893
romanz/trezor-agent
libagent/device/ui.py
expect
def expect(p, prefixes, confidential=False): """Read a line and return it without required prefix.""" resp = p.stdout.readline() log.debug('%s -> %r', p.args, resp if not confidential else '********') for prefix in prefixes: if resp.startswith(prefix): return resp[len(prefix):] raise UnexpectedError(resp)
python
def expect(p, prefixes, confidential=False): """Read a line and return it without required prefix.""" resp = p.stdout.readline() log.debug('%s -> %r', p.args, resp if not confidential else '********') for prefix in prefixes: if resp.startswith(prefix): return resp[len(prefix):] raise UnexpectedError(resp)
[ "def", "expect", "(", "p", ",", "prefixes", ",", "confidential", "=", "False", ")", ":", "resp", "=", "p", ".", "stdout", ".", "readline", "(", ")", "log", ".", "debug", "(", "'%s -> %r'", ",", "p", ".", "args", ",", "resp", "if", "not", "confidential", "else", "'********'", ")", "for", "prefix", "in", "prefixes", ":", "if", "resp", ".", "startswith", "(", "prefix", ")", ":", "return", "resp", "[", "len", "(", "prefix", ")", ":", "]", "raise", "UnexpectedError", "(", "resp", ")" ]
Read a line and return it without required prefix.
[ "Read", "a", "line", "and", "return", "it", "without", "required", "prefix", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L94-L101
9,894
romanz/trezor-agent
libagent/device/ui.py
interact
def interact(title, description, prompt, binary, options): """Use GPG pinentry program to interact with the user.""" args = [binary] p = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ) p.args = args # TODO: remove after Python 2 deprecation. expect(p, [b'OK']) title = util.assuan_serialize(title.encode('ascii')) write(p, b'SETTITLE ' + title + b'\n') expect(p, [b'OK']) if description: description = util.assuan_serialize(description.encode('ascii')) write(p, b'SETDESC ' + description + b'\n') expect(p, [b'OK']) if prompt: prompt = util.assuan_serialize(prompt.encode('ascii')) write(p, b'SETPROMPT ' + prompt + b'\n') expect(p, [b'OK']) log.debug('setting %d options', len(options)) for opt in options: write(p, b'OPTION ' + opt + b'\n') expect(p, [b'OK', b'ERR']) write(p, b'GETPIN\n') pin = expect(p, [b'OK', b'D '], confidential=True) p.communicate() # close stdin and wait for the process to exit exit_code = p.wait() if exit_code: raise subprocess.CalledProcessError(exit_code, binary) return pin.decode('ascii').strip()
python
def interact(title, description, prompt, binary, options): """Use GPG pinentry program to interact with the user.""" args = [binary] p = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ) p.args = args # TODO: remove after Python 2 deprecation. expect(p, [b'OK']) title = util.assuan_serialize(title.encode('ascii')) write(p, b'SETTITLE ' + title + b'\n') expect(p, [b'OK']) if description: description = util.assuan_serialize(description.encode('ascii')) write(p, b'SETDESC ' + description + b'\n') expect(p, [b'OK']) if prompt: prompt = util.assuan_serialize(prompt.encode('ascii')) write(p, b'SETPROMPT ' + prompt + b'\n') expect(p, [b'OK']) log.debug('setting %d options', len(options)) for opt in options: write(p, b'OPTION ' + opt + b'\n') expect(p, [b'OK', b'ERR']) write(p, b'GETPIN\n') pin = expect(p, [b'OK', b'D '], confidential=True) p.communicate() # close stdin and wait for the process to exit exit_code = p.wait() if exit_code: raise subprocess.CalledProcessError(exit_code, binary) return pin.decode('ascii').strip()
[ "def", "interact", "(", "title", ",", "description", ",", "prompt", ",", "binary", ",", "options", ")", ":", "args", "=", "[", "binary", "]", "p", "=", "subprocess", ".", "Popen", "(", "args", "=", "args", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "env", "=", "os", ".", "environ", ")", "p", ".", "args", "=", "args", "# TODO: remove after Python 2 deprecation.", "expect", "(", "p", ",", "[", "b'OK'", "]", ")", "title", "=", "util", ".", "assuan_serialize", "(", "title", ".", "encode", "(", "'ascii'", ")", ")", "write", "(", "p", ",", "b'SETTITLE '", "+", "title", "+", "b'\\n'", ")", "expect", "(", "p", ",", "[", "b'OK'", "]", ")", "if", "description", ":", "description", "=", "util", ".", "assuan_serialize", "(", "description", ".", "encode", "(", "'ascii'", ")", ")", "write", "(", "p", ",", "b'SETDESC '", "+", "description", "+", "b'\\n'", ")", "expect", "(", "p", ",", "[", "b'OK'", "]", ")", "if", "prompt", ":", "prompt", "=", "util", ".", "assuan_serialize", "(", "prompt", ".", "encode", "(", "'ascii'", ")", ")", "write", "(", "p", ",", "b'SETPROMPT '", "+", "prompt", "+", "b'\\n'", ")", "expect", "(", "p", ",", "[", "b'OK'", "]", ")", "log", ".", "debug", "(", "'setting %d options'", ",", "len", "(", "options", ")", ")", "for", "opt", "in", "options", ":", "write", "(", "p", ",", "b'OPTION '", "+", "opt", "+", "b'\\n'", ")", "expect", "(", "p", ",", "[", "b'OK'", ",", "b'ERR'", "]", ")", "write", "(", "p", ",", "b'GETPIN\\n'", ")", "pin", "=", "expect", "(", "p", ",", "[", "b'OK'", ",", "b'D '", "]", ",", "confidential", "=", "True", ")", "p", ".", "communicate", "(", ")", "# close stdin and wait for the process to exit", "exit_code", "=", "p", ".", "wait", "(", ")", "if", "exit_code", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "exit_code", ",", "binary", ")", "return", "pin", ".", "decode", "(", "'ascii'", ")", ".", "strip", "(", ")" ]
Use GPG pinentry program to interact with the user.
[ "Use", "GPG", "pinentry", "program", "to", "interact", "with", "the", "user", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L104-L141
9,895
romanz/trezor-agent
libagent/device/ui.py
UI.get_passphrase
def get_passphrase(self, prompt='Passphrase:'): """Ask the user for passphrase.""" passphrase = None if self.cached_passphrase_ack: passphrase = self.cached_passphrase_ack.get() if passphrase is None: passphrase = interact( title='{} passphrase'.format(self.device_name), prompt=prompt, description=None, binary=self.passphrase_entry_binary, options=self.options_getter()) if self.cached_passphrase_ack: self.cached_passphrase_ack.set(passphrase) return passphrase
python
def get_passphrase(self, prompt='Passphrase:'): """Ask the user for passphrase.""" passphrase = None if self.cached_passphrase_ack: passphrase = self.cached_passphrase_ack.get() if passphrase is None: passphrase = interact( title='{} passphrase'.format(self.device_name), prompt=prompt, description=None, binary=self.passphrase_entry_binary, options=self.options_getter()) if self.cached_passphrase_ack: self.cached_passphrase_ack.set(passphrase) return passphrase
[ "def", "get_passphrase", "(", "self", ",", "prompt", "=", "'Passphrase:'", ")", ":", "passphrase", "=", "None", "if", "self", ".", "cached_passphrase_ack", ":", "passphrase", "=", "self", ".", "cached_passphrase_ack", ".", "get", "(", ")", "if", "passphrase", "is", "None", ":", "passphrase", "=", "interact", "(", "title", "=", "'{} passphrase'", ".", "format", "(", "self", ".", "device_name", ")", ",", "prompt", "=", "prompt", ",", "description", "=", "None", ",", "binary", "=", "self", ".", "passphrase_entry_binary", ",", "options", "=", "self", ".", "options_getter", "(", ")", ")", "if", "self", ".", "cached_passphrase_ack", ":", "self", ".", "cached_passphrase_ack", ".", "set", "(", "passphrase", ")", "return", "passphrase" ]
Ask the user for passphrase.
[ "Ask", "the", "user", "for", "passphrase", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L43-L57
9,896
romanz/trezor-agent
libagent/ssh/client.py
Client.export_public_keys
def export_public_keys(self, identities): """Export SSH public keys from the device.""" public_keys = [] with self.device: for i in identities: pubkey = self.device.pubkey(identity=i) vk = formats.decompress_pubkey(pubkey=pubkey, curve_name=i.curve_name) public_key = formats.export_public_key(vk=vk, label=i.to_string()) public_keys.append(public_key) return public_keys
python
def export_public_keys(self, identities): """Export SSH public keys from the device.""" public_keys = [] with self.device: for i in identities: pubkey = self.device.pubkey(identity=i) vk = formats.decompress_pubkey(pubkey=pubkey, curve_name=i.curve_name) public_key = formats.export_public_key(vk=vk, label=i.to_string()) public_keys.append(public_key) return public_keys
[ "def", "export_public_keys", "(", "self", ",", "identities", ")", ":", "public_keys", "=", "[", "]", "with", "self", ".", "device", ":", "for", "i", "in", "identities", ":", "pubkey", "=", "self", ".", "device", ".", "pubkey", "(", "identity", "=", "i", ")", "vk", "=", "formats", ".", "decompress_pubkey", "(", "pubkey", "=", "pubkey", ",", "curve_name", "=", "i", ".", "curve_name", ")", "public_key", "=", "formats", ".", "export_public_key", "(", "vk", "=", "vk", ",", "label", "=", "i", ".", "to_string", "(", ")", ")", "public_keys", ".", "append", "(", "public_key", ")", "return", "public_keys" ]
Export SSH public keys from the device.
[ "Export", "SSH", "public", "keys", "from", "the", "device", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/client.py#L21-L32
9,897
romanz/trezor-agent
libagent/ssh/client.py
Client.sign_ssh_challenge
def sign_ssh_challenge(self, blob, identity): """Sign given blob using a private key on the device.""" msg = _parse_ssh_blob(blob) log.debug('%s: user %r via %r (%r)', msg['conn'], msg['user'], msg['auth'], msg['key_type']) log.debug('nonce: %r', msg['nonce']) fp = msg['public_key']['fingerprint'] log.debug('fingerprint: %s', fp) log.debug('hidden challenge size: %d bytes', len(blob)) log.info('please confirm user "%s" login to "%s" using %s...', msg['user'].decode('ascii'), identity.to_string(), self.device) with self.device: return self.device.sign(blob=blob, identity=identity)
python
def sign_ssh_challenge(self, blob, identity): """Sign given blob using a private key on the device.""" msg = _parse_ssh_blob(blob) log.debug('%s: user %r via %r (%r)', msg['conn'], msg['user'], msg['auth'], msg['key_type']) log.debug('nonce: %r', msg['nonce']) fp = msg['public_key']['fingerprint'] log.debug('fingerprint: %s', fp) log.debug('hidden challenge size: %d bytes', len(blob)) log.info('please confirm user "%s" login to "%s" using %s...', msg['user'].decode('ascii'), identity.to_string(), self.device) with self.device: return self.device.sign(blob=blob, identity=identity)
[ "def", "sign_ssh_challenge", "(", "self", ",", "blob", ",", "identity", ")", ":", "msg", "=", "_parse_ssh_blob", "(", "blob", ")", "log", ".", "debug", "(", "'%s: user %r via %r (%r)'", ",", "msg", "[", "'conn'", "]", ",", "msg", "[", "'user'", "]", ",", "msg", "[", "'auth'", "]", ",", "msg", "[", "'key_type'", "]", ")", "log", ".", "debug", "(", "'nonce: %r'", ",", "msg", "[", "'nonce'", "]", ")", "fp", "=", "msg", "[", "'public_key'", "]", "[", "'fingerprint'", "]", "log", ".", "debug", "(", "'fingerprint: %s'", ",", "fp", ")", "log", ".", "debug", "(", "'hidden challenge size: %d bytes'", ",", "len", "(", "blob", ")", ")", "log", ".", "info", "(", "'please confirm user \"%s\" login to \"%s\" using %s...'", ",", "msg", "[", "'user'", "]", ".", "decode", "(", "'ascii'", ")", ",", "identity", ".", "to_string", "(", ")", ",", "self", ".", "device", ")", "with", "self", ".", "device", ":", "return", "self", ".", "device", ".", "sign", "(", "blob", "=", "blob", ",", "identity", "=", "identity", ")" ]
Sign given blob using a private key on the device.
[ "Sign", "given", "blob", "using", "a", "private", "key", "on", "the", "device", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/client.py#L34-L49
9,898
romanz/trezor-agent
libagent/formats.py
fingerprint
def fingerprint(blob): """ Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details. """ digest = hashlib.md5(blob).digest() return ':'.join('{:02x}'.format(c) for c in bytearray(digest))
python
def fingerprint(blob): """ Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details. """ digest = hashlib.md5(blob).digest() return ':'.join('{:02x}'.format(c) for c in bytearray(digest))
[ "def", "fingerprint", "(", "blob", ")", ":", "digest", "=", "hashlib", ".", "md5", "(", "blob", ")", ".", "digest", "(", ")", "return", "':'", ".", "join", "(", "'{:02x}'", ".", "format", "(", "c", ")", "for", "c", "in", "bytearray", "(", "digest", ")", ")" ]
Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details.
[ "Compute", "SSH", "fingerprint", "for", "specified", "blob", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L34-L41
9,899
romanz/trezor-agent
libagent/formats.py
parse_pubkey
def parse_pubkey(blob): """ Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported. """ fp = fingerprint(blob) s = io.BytesIO(blob) key_type = util.read_frame(s) log.debug('key type: %s', key_type) assert key_type in SUPPORTED_KEY_TYPES, key_type result = {'blob': blob, 'type': key_type, 'fingerprint': fp} if key_type == SSH_NIST256_KEY_TYPE: curve_name = util.read_frame(s) log.debug('curve name: %s', curve_name) point = util.read_frame(s) assert s.read() == b'' _type, point = point[:1], point[1:] assert _type == SSH_NIST256_DER_OCTET size = len(point) // 2 assert len(point) == 2 * size coords = (util.bytes2num(point[:size]), util.bytes2num(point[size:])) curve = ecdsa.NIST256p point = ecdsa.ellipticcurve.Point(curve.curve, *coords) def ecdsa_verifier(sig, msg): assert len(sig) == 2 * size sig_decode = ecdsa.util.sigdecode_string vk = ecdsa.VerifyingKey.from_public_point(point, curve, hashfunc) vk.verify(signature=sig, data=msg, sigdecode=sig_decode) parts = [sig[:size], sig[size:]] return b''.join([util.frame(b'\x00' + p) for p in parts]) result.update(point=coords, curve=CURVE_NIST256, verifier=ecdsa_verifier) if key_type == SSH_ED25519_KEY_TYPE: pubkey = util.read_frame(s) assert s.read() == b'' def ed25519_verify(sig, msg): assert len(sig) == 64 vk = ed25519.VerifyingKey(pubkey) vk.verify(sig, msg) return sig result.update(curve=CURVE_ED25519, verifier=ed25519_verify) return result
python
def parse_pubkey(blob): """ Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported. """ fp = fingerprint(blob) s = io.BytesIO(blob) key_type = util.read_frame(s) log.debug('key type: %s', key_type) assert key_type in SUPPORTED_KEY_TYPES, key_type result = {'blob': blob, 'type': key_type, 'fingerprint': fp} if key_type == SSH_NIST256_KEY_TYPE: curve_name = util.read_frame(s) log.debug('curve name: %s', curve_name) point = util.read_frame(s) assert s.read() == b'' _type, point = point[:1], point[1:] assert _type == SSH_NIST256_DER_OCTET size = len(point) // 2 assert len(point) == 2 * size coords = (util.bytes2num(point[:size]), util.bytes2num(point[size:])) curve = ecdsa.NIST256p point = ecdsa.ellipticcurve.Point(curve.curve, *coords) def ecdsa_verifier(sig, msg): assert len(sig) == 2 * size sig_decode = ecdsa.util.sigdecode_string vk = ecdsa.VerifyingKey.from_public_point(point, curve, hashfunc) vk.verify(signature=sig, data=msg, sigdecode=sig_decode) parts = [sig[:size], sig[size:]] return b''.join([util.frame(b'\x00' + p) for p in parts]) result.update(point=coords, curve=CURVE_NIST256, verifier=ecdsa_verifier) if key_type == SSH_ED25519_KEY_TYPE: pubkey = util.read_frame(s) assert s.read() == b'' def ed25519_verify(sig, msg): assert len(sig) == 64 vk = ed25519.VerifyingKey(pubkey) vk.verify(sig, msg) return sig result.update(curve=CURVE_ED25519, verifier=ed25519_verify) return result
[ "def", "parse_pubkey", "(", "blob", ")", ":", "fp", "=", "fingerprint", "(", "blob", ")", "s", "=", "io", ".", "BytesIO", "(", "blob", ")", "key_type", "=", "util", ".", "read_frame", "(", "s", ")", "log", ".", "debug", "(", "'key type: %s'", ",", "key_type", ")", "assert", "key_type", "in", "SUPPORTED_KEY_TYPES", ",", "key_type", "result", "=", "{", "'blob'", ":", "blob", ",", "'type'", ":", "key_type", ",", "'fingerprint'", ":", "fp", "}", "if", "key_type", "==", "SSH_NIST256_KEY_TYPE", ":", "curve_name", "=", "util", ".", "read_frame", "(", "s", ")", "log", ".", "debug", "(", "'curve name: %s'", ",", "curve_name", ")", "point", "=", "util", ".", "read_frame", "(", "s", ")", "assert", "s", ".", "read", "(", ")", "==", "b''", "_type", ",", "point", "=", "point", "[", ":", "1", "]", ",", "point", "[", "1", ":", "]", "assert", "_type", "==", "SSH_NIST256_DER_OCTET", "size", "=", "len", "(", "point", ")", "//", "2", "assert", "len", "(", "point", ")", "==", "2", "*", "size", "coords", "=", "(", "util", ".", "bytes2num", "(", "point", "[", ":", "size", "]", ")", ",", "util", ".", "bytes2num", "(", "point", "[", "size", ":", "]", ")", ")", "curve", "=", "ecdsa", ".", "NIST256p", "point", "=", "ecdsa", ".", "ellipticcurve", ".", "Point", "(", "curve", ".", "curve", ",", "*", "coords", ")", "def", "ecdsa_verifier", "(", "sig", ",", "msg", ")", ":", "assert", "len", "(", "sig", ")", "==", "2", "*", "size", "sig_decode", "=", "ecdsa", ".", "util", ".", "sigdecode_string", "vk", "=", "ecdsa", ".", "VerifyingKey", ".", "from_public_point", "(", "point", ",", "curve", ",", "hashfunc", ")", "vk", ".", "verify", "(", "signature", "=", "sig", ",", "data", "=", "msg", ",", "sigdecode", "=", "sig_decode", ")", "parts", "=", "[", "sig", "[", ":", "size", "]", ",", "sig", "[", "size", ":", "]", "]", "return", "b''", ".", "join", "(", "[", "util", ".", "frame", "(", "b'\\x00'", "+", "p", ")", "for", "p", "in", "parts", "]", ")", "result", ".", "update", "(", "point", "=", "coords", ",", "curve", "=", "CURVE_NIST256", ",", "verifier", "=", "ecdsa_verifier", ")", "if", "key_type", "==", "SSH_ED25519_KEY_TYPE", ":", "pubkey", "=", "util", ".", "read_frame", "(", "s", ")", "assert", "s", ".", "read", "(", ")", "==", "b''", "def", "ed25519_verify", "(", "sig", ",", "msg", ")", ":", "assert", "len", "(", "sig", ")", "==", "64", "vk", "=", "ed25519", ".", "VerifyingKey", "(", "pubkey", ")", "vk", ".", "verify", "(", "sig", ",", "msg", ")", "return", "sig", "result", ".", "update", "(", "curve", "=", "CURVE_ED25519", ",", "verifier", "=", "ed25519_verify", ")", "return", "result" ]
Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported.
[ "Parse", "SSH", "public", "key", "from", "given", "blob", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L44-L97