idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
53,800 | def com_google_fonts_check_metadata_nameid_font_name ( ttFont , style , font_metadata ) : from fontbakery . utils import get_name_entry_strings from fontbakery . constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES : font_familynames = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME ) nameid = NameID . FONT_FAMILY_NAME else : font_familynames = get_name_entry_strings ( ttFont , NameID . TYPOGRAPHIC_FAMILY_NAME ) nameid = NameID . TYPOGRAPHIC_FAMILY_NAME if len ( font_familynames ) == 0 : yield FAIL , Message ( "lacks-entry" , ( f"This font lacks a {NameID(nameid).name} entry" f" (nameID={nameid}) in the name table." ) ) else : for font_familyname in font_familynames : if font_familyname != font_metadata . name : yield FAIL , Message ( "mismatch" , ( "Unmatched familyname in font:" " TTF has \"{}\" while METADATA.pb has" " name=\"{}\"." ) . format ( font_familyname , font_metadata . name ) ) else : yield PASS , ( "OK: Family name \"{}\" is identical" " in METADATA.pb and on the" " TTF file." ) . format ( font_metadata . name ) | METADATA . pb font . name value should be same as the family name declared on the name table . |
53,801 | def com_google_fonts_check_metadata_match_fullname_postscript ( font_metadata ) : import re regex = re . compile ( r"\W" ) post_script_name = regex . sub ( "" , font_metadata . post_script_name ) fullname = regex . sub ( "" , font_metadata . full_name ) if fullname != post_script_name : yield FAIL , ( "METADATA.pb font full_name=\"{}\"" " does not match post_script_name =" " \"{}\"" ) . format ( font_metadata . full_name , font_metadata . post_script_name ) else : yield PASS , ( "METADATA.pb font fields \"full_name\" and" " \"post_script_name\" have equivalent values." ) | METADATA . pb font . full_name and font . post_script_name fields have equivalent values ? |
53,802 | def com_google_fonts_check_metadata_match_filename_postscript ( font_metadata ) : post_script_name = font_metadata . post_script_name filename = os . path . splitext ( font_metadata . filename ) [ 0 ] if filename != post_script_name : yield FAIL , ( "METADATA.pb font filename=\"{}\" does not match" " post_script_name=\"{}\"." "" ) . format ( font_metadata . filename , font_metadata . post_script_name ) else : yield PASS , ( "METADATA.pb font fields \"filename\" and" " \"post_script_name\" have equivalent values." ) | METADATA . pb font . filename and font . post_script_name fields have equivalent values? |
53,803 | def com_google_fonts_check_metadata_valid_name_values ( style , font_metadata , font_familynames , typographic_familynames ) : from fontbakery . constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES : familynames = font_familynames else : familynames = typographic_familynames failed = False for font_familyname in familynames : if font_familyname not in font_metadata . name : failed = True yield FAIL , ( "METADATA.pb font.name field (\"{}\")" " does not match correct font name format (\"{}\")." "" ) . format ( font_metadata . name , font_familyname ) if not failed : yield PASS , ( "METADATA.pb font.name field contains" " font name in right format." ) | METADATA . pb font . name field contains font name in right format? |
53,804 | def com_google_fonts_check_metadata_valid_full_name_values ( style , font_metadata , font_familynames , typographic_familynames ) : from fontbakery . constants import RIBBI_STYLE_NAMES if style in RIBBI_STYLE_NAMES : familynames = font_familynames if familynames == [ ] : yield SKIP , "No FONT_FAMILYNAME" else : familynames = typographic_familynames if familynames == [ ] : yield SKIP , "No TYPOGRAPHIC_FAMILYNAME" for font_familyname in familynames : if font_familyname in font_metadata . full_name : yield PASS , ( "METADATA.pb font.full_name field contains" " font name in right format." " ('{}' in '{}')" ) . format ( font_familyname , font_metadata . full_name ) else : yield FAIL , ( "METADATA.pb font.full_name field (\"{}\")" " does not match correct font name format (\"{}\")." "" ) . format ( font_metadata . full_name , font_familyname ) | METADATA . pb font . full_name field contains font name in right format? |
53,805 | def com_google_fonts_check_metadata_valid_filename_values ( font , family_metadata ) : expected = os . path . basename ( font ) failed = True for font_metadata in family_metadata . fonts : if font_metadata . filename == expected : failed = False yield PASS , ( "METADATA.pb filename field contains" " font name in right format." ) break if failed : yield FAIL , ( "None of the METADATA.pb filename fields match" f" correct font name format (\"{expected}\")." ) | METADATA . pb font . filename field contains font name in right format? |
53,806 | def com_google_fonts_check_metadata_valid_post_script_name_values ( font_metadata , font_familynames ) : for font_familyname in font_familynames : psname = "" . join ( str ( font_familyname ) . split ( ) ) if psname in "" . join ( font_metadata . post_script_name . split ( "-" ) ) : yield PASS , ( "METADATA.pb postScriptName field" " contains font name in right format." ) else : yield FAIL , ( "METADATA.pb postScriptName (\"{}\")" " does not match correct font name format (\"{}\")." "" ) . format ( font_metadata . post_script_name , font_familyname ) | METADATA . pb font . post_script_name field contains font name in right format? |
53,807 | def com_google_fonts_check_metadata_valid_copyright ( font_metadata ) : import re string = font_metadata . copyright does_match = re . search ( r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)' , string ) if does_match : yield PASS , "METADATA.pb copyright string is good" else : yield FAIL , ( "METADATA.pb: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'" ) . format ( string ) | Copyright notices match canonical pattern in METADATA . pb |
53,808 | def com_google_fonts_check_font_copyright ( ttFont ) : import re from fontbakery . utils import get_name_entry_strings failed = False for string in get_name_entry_strings ( ttFont , NameID . COPYRIGHT_NOTICE ) : does_match = re . search ( r'Copyright [0-9]{4} The .* Project Authors \([^\@]*\)' , string ) if does_match : yield PASS , ( "Name Table entry: Copyright field '{}'" " matches canonical pattern." ) . format ( string ) else : failed = True yield FAIL , ( "Name Table entry: Copyright notices should match" " a pattern similar to:" " 'Copyright 2017 The Familyname" " Project Authors (git url)'\n" "But instead we have got:" " '{}'" ) . format ( string ) if not failed : yield PASS , "Name table copyright entries are good" | Copyright notices match canonical pattern in fonts |
53,809 | def com_google_fonts_check_metadata_italic_style ( ttFont , font_metadata ) : from fontbakery . utils import get_name_entry_strings from fontbakery . constants import MacStyle if font_metadata . style != "italic" : yield SKIP , "This check only applies to italic fonts." else : font_fullname = get_name_entry_strings ( ttFont , NameID . FULL_FONT_NAME ) if len ( font_fullname ) == 0 : yield SKIP , "Font lacks fullname entries in name table." else : font_fullname = font_fullname [ 0 ] if not bool ( ttFont [ "head" ] . macStyle & MacStyle . ITALIC ) : yield FAIL , Message ( "bad-macstyle" , "METADATA.pb style has been set to italic" " but font macStyle is improperly set." ) elif not font_fullname . split ( "-" ) [ - 1 ] . endswith ( "Italic" ) : yield FAIL , Message ( "bad-fullfont-name" , ( "Font macStyle Italic bit is set" " but nameID {} (\"{}\") is not ended with" " \"Italic\"" ) . format ( NameID . FULL_FONT_NAME , font_fullname ) ) else : yield PASS , ( "OK: METADATA.pb font.style \"italic\"" " matches font internals." ) | METADATA . pb font . style italic matches font internals? |
53,810 | def com_google_fonts_check_metadata_normal_style ( ttFont , font_metadata ) : from fontbakery . utils import get_name_entry_strings from fontbakery . constants import MacStyle if font_metadata . style != "normal" : yield SKIP , "This check only applies to normal fonts." else : font_familyname = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME ) font_fullname = get_name_entry_strings ( ttFont , NameID . FULL_FONT_NAME ) if len ( font_familyname ) == 0 or len ( font_fullname ) == 0 : yield SKIP , ( "Font lacks familyname and/or" " fullname entries in name table." ) else : font_familyname = font_familyname [ 0 ] font_fullname = font_fullname [ 0 ] if bool ( ttFont [ "head" ] . macStyle & MacStyle . ITALIC ) : yield FAIL , Message ( "bad-macstyle" , ( "METADATA.pb style has been set to normal" " but font macStyle is improperly set." ) ) elif font_familyname . split ( "-" ) [ - 1 ] . endswith ( 'Italic' ) : yield FAIL , Message ( "familyname-italic" , ( "Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\"." ) . format ( NameID . FONT_FAMILY_NAME , font_familyname ) ) elif font_fullname . split ( "-" ) [ - 1 ] . endswith ( "Italic" ) : yield FAIL , Message ( "fullfont-italic" , ( "Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\"." ) . format ( NameID . FULL_FONT_NAME , font_fullname ) ) else : yield PASS , ( "METADATA.pb font.style \"normal\"" " matches font internals." ) | METADATA . pb font . style normal matches font internals? |
53,811 | def com_google_fonts_check_metadata_nameid_family_and_full_names ( ttFont , font_metadata ) : from fontbakery . utils import get_name_entry_strings font_familynames = get_name_entry_strings ( ttFont , NameID . TYPOGRAPHIC_FAMILY_NAME ) if font_familynames : font_familyname = font_familynames [ 0 ] else : font_familyname = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME ) [ 0 ] font_fullname = get_name_entry_strings ( ttFont , NameID . FULL_FONT_NAME ) [ 0 ] if font_fullname != font_metadata . full_name : yield FAIL , Message ( "fullname-mismatch" , ( "METADATA.pb: Fullname (\"{}\")" " does not match name table" " entry \"{}\" !" ) . format ( font_metadata . full_name , font_fullname ) ) elif font_familyname != font_metadata . name : yield FAIL , Message ( "familyname-mismatch" , ( "METADATA.pb Family name \"{}\")" " does not match name table" " entry \"{}\" !" ) . format ( font_metadata . name , font_familyname ) ) else : yield PASS , ( "METADATA.pb familyname and fullName fields" " match corresponding name table entries." ) | METADATA . pb font . name and font . full_name fields match the values declared on the name table? |
53,812 | def com_google_fonts_check_metadata_match_weight_postscript ( font_metadata ) : WEIGHTS = { "Thin" : 100 , "ThinItalic" : 100 , "ExtraLight" : 200 , "ExtraLightItalic" : 200 , "Light" : 300 , "LightItalic" : 300 , "Regular" : 400 , "Italic" : 400 , "Medium" : 500 , "MediumItalic" : 500 , "SemiBold" : 600 , "SemiBoldItalic" : 600 , "Bold" : 700 , "BoldItalic" : 700 , "ExtraBold" : 800 , "ExtraBoldItalic" : 800 , "Black" : 900 , "BlackItalic" : 900 } pair = [ ] for k , weight in WEIGHTS . items ( ) : if weight == font_metadata . weight : pair . append ( ( k , weight ) ) if not pair : yield FAIL , ( "METADATA.pb: Font weight value ({})" " is invalid." ) . format ( font_metadata . weight ) elif not ( font_metadata . post_script_name . endswith ( '-' + pair [ 0 ] [ 0 ] ) or font_metadata . post_script_name . endswith ( '-' + pair [ 1 ] [ 0 ] ) ) : yield FAIL , ( "METADATA.pb: Mismatch between postScriptName (\"{}\")" " and weight value ({}). The name must be" " ended with \"{}\" or \"{}\"." "" ) . format ( font_metadata . post_script_name , pair [ 0 ] [ 1 ] , pair [ 0 ] [ 0 ] , pair [ 1 ] [ 0 ] ) else : yield PASS , "Weight value matches postScriptName." | METADATA . pb weight matches postScriptName . |
53,813 | def com_google_fonts_check_unitsperem_strict ( ttFont ) : upm_height = ttFont [ "head" ] . unitsPerEm ACCEPTABLE = [ 16 , 32 , 64 , 128 , 256 , 500 , 512 , 1000 , 1024 , 2000 , 2048 ] if upm_height not in ACCEPTABLE : yield FAIL , ( f"Font em size (unitsPerEm) is {upm_height}." " If possible, please consider using 1000" " or even 2000 (which is ideal for" " Variable Fonts)." " The acceptable values for unitsPerEm," f" though, are: {ACCEPTABLE}." ) elif upm_height != 2000 : yield WARN , ( f"Even though unitsPerEm ({upm_height}) in" " this font is reasonable. It is strongly" " advised to consider changing it to 2000," " since it will likely improve the quality of" " Variable Fonts by avoiding excessive" " rounding of coordinates on interpolations." ) else : yield PASS , "Font em size is good (unitsPerEm = 2000)." | Stricter unitsPerEm criteria for Google Fonts . |
53,814 | def remote_styles ( family_metadata ) : def download_family_from_Google_Fonts ( family_name ) : from zipfile import ZipFile from fontbakery . utils import download_file url_prefix = 'https://fonts.google.com/download?family=' url = '{}{}' . format ( url_prefix , family_name . replace ( ' ' , '+' ) ) return ZipFile ( download_file ( url ) ) def fonts_from_zip ( zipfile ) : from fontTools . ttLib import TTFont from io import BytesIO fonts = [ ] for file_name in zipfile . namelist ( ) : if file_name . lower ( ) . endswith ( ".ttf" ) : file_obj = BytesIO ( zipfile . open ( file_name ) . read ( ) ) fonts . append ( [ file_name , TTFont ( file_obj ) ] ) return fonts if ( not listed_on_gfonts_api ( family_metadata ) or not family_metadata ) : return None remote_fonts_zip = download_family_from_Google_Fonts ( family_metadata . name ) rstyles = { } for remote_filename , remote_font in fonts_from_zip ( remote_fonts_zip ) : remote_style = os . path . splitext ( remote_filename ) [ 0 ] if '-' in remote_style : remote_style = remote_style . split ( '-' ) [ 1 ] rstyles [ remote_style ] = remote_font return rstyles | Get a dictionary of TTFont objects of all font files of a given family as currently hosted at Google Fonts . |
53,815 | def github_gfonts_ttFont ( ttFont , license ) : if not license : return from fontbakery . utils import download_file from fontTools . ttLib import TTFont from urllib . request import HTTPError LICENSE_DIRECTORY = { "OFL.txt" : "ofl" , "UFL.txt" : "ufl" , "LICENSE.txt" : "apache" } filename = os . path . basename ( ttFont . reader . file . name ) fontname = filename . split ( '-' ) [ 0 ] . lower ( ) url = ( "https://github.com/google/fonts/raw/master" "/{}/{}/{}" ) . format ( LICENSE_DIRECTORY [ license ] , fontname , filename ) try : fontfile = download_file ( url ) return TTFont ( fontfile ) except HTTPError : return None | Get a TTFont object of a font downloaded from Google Fonts git repository . |
53,816 | def com_google_fonts_check_version_bump ( ttFont , api_gfonts_ttFont , github_gfonts_ttFont ) : v_number = ttFont [ "head" ] . fontRevision api_gfonts_v_number = api_gfonts_ttFont [ "head" ] . fontRevision github_gfonts_v_number = github_gfonts_ttFont [ "head" ] . fontRevision failed = False if v_number == api_gfonts_v_number : failed = True yield FAIL , ( "Version number {} is equal to" " version on Google Fonts." ) . format ( v_number ) if v_number < api_gfonts_v_number : failed = True yield FAIL , ( "Version number {} is less than" " version on Google Fonts ({})." "" ) . format ( v_number , api_gfonts_v_number ) if v_number == github_gfonts_v_number : failed = True yield FAIL , ( "Version number {} is equal to" " version on Google Fonts GitHub repo." "" ) . format ( v_number ) if v_number < github_gfonts_v_number : failed = True yield FAIL , ( "Version number {} is less than" " version on Google Fonts GitHub repo ({})." "" ) . format ( v_number , github_gfonts_v_number ) if not failed : yield PASS , ( "Version number {} is greater than" " version on Google Fonts GitHub ({})" " and production servers ({})." "" ) . format ( v_number , github_gfonts_v_number , api_gfonts_v_number ) | Version number has increased since previous release on Google Fonts? |
53,817 | def com_google_fonts_check_production_glyphs_similarity ( ttFont , api_gfonts_ttFont ) : def glyphs_surface_area ( ttFont ) : from fontTools . pens . areaPen import AreaPen glyphs = { } glyph_set = ttFont . getGlyphSet ( ) area_pen = AreaPen ( glyph_set ) for glyph in glyph_set . keys ( ) : glyph_set [ glyph ] . draw ( area_pen ) area = area_pen . value area_pen . value = 0 glyphs [ glyph ] = area return glyphs bad_glyphs = [ ] these_glyphs = glyphs_surface_area ( ttFont ) gfonts_glyphs = glyphs_surface_area ( api_gfonts_ttFont ) shared_glyphs = set ( these_glyphs ) & set ( gfonts_glyphs ) this_upm = ttFont [ 'head' ] . unitsPerEm gfonts_upm = api_gfonts_ttFont [ 'head' ] . unitsPerEm for glyph in shared_glyphs : this_glyph_area = ( these_glyphs [ glyph ] / this_upm ) * gfonts_upm gfont_glyph_area = ( gfonts_glyphs [ glyph ] / gfonts_upm ) * this_upm if abs ( this_glyph_area - gfont_glyph_area ) > 7000 : bad_glyphs . append ( glyph ) if bad_glyphs : yield WARN , ( "Following glyphs differ greatly from" " Google Fonts version: [{}]" ) . format ( ", " . join ( bad_glyphs ) ) else : yield PASS , ( "Glyphs are similar in" " comparison to the Google Fonts version." ) | Glyphs are similiar to Google Fonts version? |
53,818 | def com_google_fonts_check_italic_angle ( ttFont , style ) : failed = False value = ttFont [ "post" ] . italicAngle if value > 0 : failed = True yield FAIL , Message ( "positive" , ( "The value of post.italicAngle is positive, which" " is likely a mistake and should become negative," " from {} to {}." ) . format ( value , - value ) ) if abs ( value ) > 30 : failed = True yield FAIL , Message ( "over -30 degrees" , ( "The value of post.italicAngle ({}) is very" " high (over -30°!) and should be" " confirmed." ) . format ( value ) ) elif abs ( value ) > 20 : failed = True yield WARN , Message ( "over -20 degrees" , ( "The value of post.italicAngle ({}) seems very" " high (over -20°!) and should be" " confirmed." ) . format ( value ) ) if "Italic" in style : if ttFont [ 'post' ] . italicAngle == 0 : failed = True yield FAIL , Message ( "zero-italic" , ( "Font is italic, so post.italicAngle" " should be non-zero." ) ) else : if ttFont [ "post" ] . italicAngle != 0 : failed = True yield FAIL , Message ( "non-zero-normal" , ( "Font is not italic, so post.italicAngle" " should be equal to zero." ) ) if not failed : yield PASS , ( "Value of post.italicAngle is {}" " with style='{}'." ) . format ( value , style ) | Checking post . italicAngle value . |
53,819 | def com_google_fonts_check_mac_style ( ttFont , style ) : from fontbakery . utils import check_bit_entry from fontbakery . constants import MacStyle expected = "Italic" in style yield check_bit_entry ( ttFont , "head" , "macStyle" , expected , bitmask = MacStyle . ITALIC , bitname = "ITALIC" ) expected = style in [ "Bold" , "BoldItalic" ] yield check_bit_entry ( ttFont , "head" , "macStyle" , expected , bitmask = MacStyle . BOLD , bitname = "BOLD" ) | Checking head . macStyle value . |
53,820 | def com_google_fonts_check_contour_count ( ttFont ) : from fontbakery . glyphdata import desired_glyph_data as glyph_data from fontbakery . utils import ( get_font_glyph_data , pretty_print_list ) desired_glyph_data = { } for glyph in glyph_data : desired_glyph_data [ glyph [ 'unicode' ] ] = glyph bad_glyphs = [ ] desired_glyph_contours = { f : desired_glyph_data [ f ] [ 'contours' ] for f in desired_glyph_data } font_glyph_data = get_font_glyph_data ( ttFont ) if font_glyph_data is None : yield FAIL , "This font lacks cmap data." else : font_glyph_contours = { f [ 'unicode' ] : list ( f [ 'contours' ] ) [ 0 ] for f in font_glyph_data } shared_glyphs = set ( desired_glyph_contours ) & set ( font_glyph_contours ) for glyph in shared_glyphs : if font_glyph_contours [ glyph ] not in desired_glyph_contours [ glyph ] : bad_glyphs . append ( [ glyph , font_glyph_contours [ glyph ] , desired_glyph_contours [ glyph ] ] ) if len ( bad_glyphs ) > 0 : cmap = ttFont [ 'cmap' ] . getcmap ( PlatformID . WINDOWS , WindowsEncodingID . UNICODE_BMP ) . cmap bad_glyphs_name = [ ( "Glyph name: {}\t" "Contours detected: {}\t" "Expected: {}" ) . format ( cmap [ name ] , count , pretty_print_list ( expected , shorten = None , glue = "or" ) ) for name , count , expected in bad_glyphs ] yield WARN , ( ( "This check inspects the glyph outlines and detects the" " total number of contours in each of them. The expected" " values are infered from the typical ammounts of" " contours observed in a large collection of reference" " font families. The divergences listed below may simply" " indicate a significantly different design on some of" " your glyphs. On the other hand, some of these may flag" " actual bugs in the font such as glyphs mapped to an" " incorrect codepoint. Please consider reviewing" " the design and codepoint assignment of these to make" " sure they are correct.\n" "\n" "The following glyphs do not have the recommended" " number of contours:\n" "\n{}" ) . format ( '\n' . join ( bad_glyphs_name ) ) ) else : yield PASS , "All glyphs have the recommended amount of contours" | Check if each glyph has the recommended amount of contours . |
53,821 | def com_google_fonts_check_metadata_nameid_copyright ( ttFont , font_metadata ) : failed = False for nameRecord in ttFont [ 'name' ] . names : string = nameRecord . string . decode ( nameRecord . getEncoding ( ) ) if nameRecord . nameID == NameID . COPYRIGHT_NOTICE and string != font_metadata . copyright : failed = True yield FAIL , ( "Copyright field for this font on METADATA.pb ('{}')" " differs from a copyright notice entry" " on the name table:" " '{}'" ) . format ( font_metadata . copyright , string ) if not failed : yield PASS , ( "Copyright field for this font on METADATA.pb matches" " copyright notice entries on the name table." ) | Copyright field for this font on METADATA . pb matches all copyright notice entries on the name table ? |
53,822 | def com_google_fonts_check_name_mandatory_entries ( ttFont , style ) : from fontbakery . utils import get_name_entry_strings from fontbakery . constants import RIBBI_STYLE_NAMES required_nameIDs = [ NameID . FONT_FAMILY_NAME , NameID . FONT_SUBFAMILY_NAME , NameID . FULL_FONT_NAME , NameID . POSTSCRIPT_NAME ] if style not in RIBBI_STYLE_NAMES : required_nameIDs += [ NameID . TYPOGRAPHIC_FAMILY_NAME , NameID . TYPOGRAPHIC_SUBFAMILY_NAME ] failed = False for nameId in required_nameIDs : if len ( get_name_entry_strings ( ttFont , nameId ) ) == 0 : failed = True yield FAIL , ( f"Font lacks entry with nameId={nameId}" f" ({NameID(nameId).name})" ) if not failed : yield PASS , "Font contains values for all mandatory name table entries." | Font has all mandatory name table entries ? |
53,823 | def com_google_fonts_check_name_copyright_length ( ttFont ) : from fontbakery . utils import get_name_entries failed = False for notice in get_name_entries ( ttFont , NameID . COPYRIGHT_NOTICE ) : notice_str = notice . string . decode ( notice . getEncoding ( ) ) if len ( notice_str ) > 500 : failed = True yield FAIL , ( "The length of the following copyright notice ({})" " exceeds 500 chars: '{}'" "" ) . format ( len ( notice_str ) , notice_str ) if not failed : yield PASS , ( "All copyright notice name entries on the" " 'name' table are shorter than 500 characters." ) | Length of copyright notice must not exceed 500 characters . |
53,824 | def com_google_fonts_check_fontv ( ttFont ) : from fontv . libfv import FontVersion fv = FontVersion ( ttFont ) if fv . version and ( fv . is_development or fv . is_release ) : yield PASS , "Font version string looks GREAT!" else : yield INFO , ( "Version string is: \"{}\"\n" "The version string must ideally include a git commit hash" " and either a 'dev' or a 'release' suffix such as in the" " example below:\n" "\"Version 1.3; git-0d08353-release\"" "" ) . format ( fv . get_name_id5_version_string ( ) ) | Check for font - v versioning |
53,825 | def com_google_fonts_check_negative_advance_width ( ttFont ) : failed = False for glyphName in ttFont [ "glyf" ] . glyphs : coords = ttFont [ "glyf" ] [ glyphName ] . coordinates rightX = coords [ - 3 ] [ 0 ] leftX = coords [ - 4 ] [ 0 ] advwidth = rightX - leftX if advwidth < 0 : failed = True yield FAIL , ( "glyph '{}' has bad coordinates on the glyf table," " which may lead to the advance width to be" " interpreted as a negative" " value ({})." ) . format ( glyphName , advwidth ) if not failed : yield PASS , "The x-coordinates of all glyphs look good." | Check that advance widths cannot be inferred as negative . |
53,826 | def com_google_fonts_check_varfont_generate_static ( ttFont ) : import tempfile from fontTools . varLib import mutator try : loc = { k . axisTag : float ( ( k . maxValue + k . minValue ) / 2 ) for k in ttFont [ 'fvar' ] . axes } with tempfile . TemporaryFile ( ) as instance : font = mutator . instantiateVariableFont ( ttFont , loc ) font . save ( instance ) yield PASS , ( "fontTools.varLib.mutator generated a static font " "instance" ) except Exception as e : yield FAIL , ( "fontTools.varLib.mutator failed to generated a static font " "instance\n{}" . format ( repr ( e ) ) ) | Check a static ttf can be generated from a variable font . |
53,827 | def com_google_fonts_check_smart_dropout ( ttFont ) : INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ( "prep" in ttFont and INSTRUCTIONS in ttFont [ "prep" ] . program . getBytecode ( ) ) : yield PASS , ( "'prep' table contains instructions" " enabling smart dropout control." ) else : yield FAIL , ( "'prep' table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script." ) | Font enables smart dropout control in prep table instructions? |
53,828 | def com_google_fonts_check_aat ( ttFont ) : UNWANTED_TABLES = { 'EBSC' , 'Zaph' , 'acnt' , 'ankr' , 'bdat' , 'bhed' , 'bloc' , 'bmap' , 'bsln' , 'fdsc' , 'feat' , 'fond' , 'gcid' , 'just' , 'kerx' , 'lcar' , 'ltag' , 'mort' , 'morx' , 'opbd' , 'prop' , 'trak' , 'xref' } unwanted_tables_found = [ ] for table in ttFont . keys ( ) : if table in UNWANTED_TABLES : unwanted_tables_found . append ( table ) if len ( unwanted_tables_found ) > 0 : yield FAIL , ( "Unwanted AAT tables were found" " in the font and should be removed, either by" " fonttools/ttx or by editing them using the tool" " they built with:" " {}" ) . format ( ", " . join ( unwanted_tables_found ) ) else : yield PASS , "There are no unwanted AAT tables." | Are there unwanted Apple tables? |
53,829 | def com_google_fonts_check_fvar_name_entries ( ttFont ) : failed = False for instance in ttFont [ "fvar" ] . instances : entries = [ entry for entry in ttFont [ "name" ] . names if entry . nameID == instance . subfamilyNameID ] if len ( entries ) == 0 : failed = True yield FAIL , ( f"Named instance with coordinates {instance.coordinates}" f" lacks an entry on the name table (nameID={instance.subfamilyNameID})." ) if not failed : yield PASS , "OK" | All name entries referenced by fvar instances exist on the name table? |
53,830 | def com_google_fonts_check_varfont_weight_instances ( ttFont ) : failed = False for instance in ttFont [ "fvar" ] . instances : if 'wght' in instance . coordinates and instance . coordinates [ 'wght' ] % 100 != 0 : failed = True yield FAIL , ( "Found an variable font instance with" f" 'wght'={instance.coordinates['wght']}." " This should instead be a multiple of 100." ) if not failed : yield PASS , "OK" | Variable font weight coordinates must be multiples of 100 . |
53,831 | def com_google_fonts_check_family_tnum_horizontal_metrics ( fonts ) : from fontbakery . constants import RIBBI_STYLE_NAMES from fontTools . ttLib import TTFont RIBBI_ttFonts = [ TTFont ( f ) for f in fonts if style ( f ) in RIBBI_STYLE_NAMES ] tnum_widths = { } for ttFont in RIBBI_ttFonts : glyphs = ttFont . getGlyphSet ( ) tnum_glyphs = [ ( glyph_id , glyphs [ glyph_id ] ) for glyph_id in glyphs . keys ( ) if glyph_id . endswith ( ".tnum" ) ] for glyph_id , glyph in tnum_glyphs : if glyph . width not in tnum_widths : tnum_widths [ glyph . width ] = [ glyph_id ] else : tnum_widths [ glyph . width ] . append ( glyph_id ) if len ( tnum_widths . keys ( ) ) > 1 : max_num = 0 most_common_width = None for width , glyphs in tnum_widths . items ( ) : if len ( glyphs ) > max_num : max_num = len ( glyphs ) most_common_width = width del tnum_widths [ most_common_width ] yield FAIL , ( f"The most common tabular glyph width is {most_common_width}." " But there are other tabular glyphs with different widths" f" such as the following ones:\n\t{tnum_widths}." ) else : yield PASS , "OK" | All tabular figures must have the same width across the RIBBI - family . |
53,832 | def com_google_fonts_check_ligature_carets ( ttFont , ligature_glyphs ) : if ligature_glyphs == - 1 : yield FAIL , Message ( "malformed" , "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596" ) elif "GDEF" not in ttFont : yield WARN , Message ( "GDEF-missing" , ( "GDEF table is missing, but it is mandatory" " to declare it on fonts that provide ligature" " glyphs because the caret (text cursor)" " positioning for each ligature must be" " provided in this table." ) ) else : lig_caret_list = ttFont [ "GDEF" ] . table . LigCaretList if lig_caret_list is None : missing = set ( ligature_glyphs ) else : missing = set ( ligature_glyphs ) - set ( lig_caret_list . Coverage . glyphs ) if lig_caret_list is None or lig_caret_list . LigGlyphCount == 0 : yield WARN , Message ( "lacks-caret-pos" , ( "This font lacks caret position values for" " ligature glyphs on its GDEF table." ) ) elif missing : missing = "\n\t- " . join ( missing ) yield WARN , Message ( "incomplete-caret-pos-data" , ( "This font lacks caret positioning" " values for these ligature glyphs:" f"\n\t- {missing}\n\n " ) ) else : yield PASS , "Looks good!" | Are there caret positions declared for every ligature? |
53,833 | def com_google_fonts_check_kerning_for_non_ligated_sequences ( ttFont , ligatures , has_kerning_info ) : def look_for_nonligated_kern_info ( table ) : for pairpos in table . SubTable : for i , glyph in enumerate ( pairpos . Coverage . glyphs ) : if not hasattr ( pairpos , 'PairSet' ) : continue for pairvalue in pairpos . PairSet [ i ] . PairValueRecord : kern_pair = ( glyph , pairvalue . SecondGlyph ) if kern_pair in ligature_pairs : ligature_pairs . remove ( kern_pair ) def ligatures_str ( pairs ) : result = [ f"\t- {first} + {second}" for first , second in pairs ] return "\n" . join ( result ) if ligatures == - 1 : yield FAIL , Message ( "malformed" , "Failed to lookup ligatures." " This font file seems to be malformed." " For more info, read:" " https://github.com" "/googlefonts/fontbakery/issues/1596" ) else : ligature_pairs = [ ] for first , comp in ligatures . items ( ) : for components in comp : while components : pair = ( first , components [ 0 ] ) if pair not in ligature_pairs : ligature_pairs . append ( pair ) first = components [ 0 ] components . pop ( 0 ) for record in ttFont [ "GSUB" ] . table . FeatureList . FeatureRecord : if record . FeatureTag == 'kern' : for index in record . Feature . LookupListIndex : lookup = ttFont [ "GSUB" ] . table . LookupList . Lookup [ index ] look_for_nonligated_kern_info ( lookup ) if ligature_pairs : yield WARN , Message ( "lacks-kern-info" , ( "GPOS table lacks kerning info for the following" " non-ligated sequences:\n" "{}\n\n " ) . format ( ligatures_str ( ligature_pairs ) ) ) else : yield PASS , ( "GPOS table provides kerning info for " "all non-ligated sequences." ) | Is there kerning info for non - ligated sequences? |
53,834 | def com_google_fonts_check_name_family_and_style_max_length ( ttFont ) : from fontbakery . utils import ( get_name_entries , get_name_entry_strings ) failed = False for familyname in get_name_entries ( ttFont , NameID . FONT_FAMILY_NAME ) : plat = familyname . platformID familyname_str = familyname . string . decode ( familyname . getEncoding ( ) ) for stylename_str in get_name_entry_strings ( ttFont , NameID . FONT_SUBFAMILY_NAME , platformID = plat ) : if len ( familyname_str + stylename_str ) > 27 : failed = True yield WARN , ( "The combined length of family and style" " exceeds 27 chars in the following '{}' entries:" " FONT_FAMILY_NAME = '{}' / SUBFAMILY_NAME = '{}'" "" ) . format ( PlatformID ( plat ) . name , familyname_str , stylename_str ) yield WARN , ( "Please take a look at the conversation at" " https://github.com/googlefonts/fontbakery/issues/2179" " in order to understand the reasoning behing these" " name table records max-length criteria." ) if not failed : yield PASS , "All name entries are good." | Combined length of family and style must not exceed 27 characters . |
53,835 | def com_google_fonts_check_family_control_chars ( ttFonts ) : unacceptable_cc_list = [ "uni0001" , "uni0002" , "uni0003" , "uni0004" , "uni0005" , "uni0006" , "uni0007" , "uni0008" , "uni0009" , "uni000A" , "uni000B" , "uni000C" , "uni000E" , "uni000F" , "uni0010" , "uni0011" , "uni0012" , "uni0013" , "uni0014" , "uni0015" , "uni0016" , "uni0017" , "uni0018" , "uni0019" , "uni001A" , "uni001B" , "uni001C" , "uni001D" , "uni001E" , "uni001F" ] failed_font_dict = { } for ttFont in ttFonts : font_failed = False unacceptable_glyphs_in_set = [ ] glyph_name_set = set ( ttFont [ "glyf" ] . glyphs . keys ( ) ) fontname = ttFont . reader . file . name for unacceptable_glyph_name in unacceptable_cc_list : if unacceptable_glyph_name in glyph_name_set : font_failed = True unacceptable_glyphs_in_set . append ( unacceptable_glyph_name ) if font_failed : failed_font_dict [ fontname ] = unacceptable_glyphs_in_set if len ( failed_font_dict ) > 0 : unacceptable_cc_report_string = "The following unacceptable control characters were identified:\n" for fnt in failed_font_dict . keys ( ) : unacceptable_cc_report_string += " {}: {}\n" . format ( fnt , ", " . join ( failed_font_dict [ fnt ] ) ) yield FAIL , ( "{}" . format ( unacceptable_cc_report_string ) ) else : yield PASS , ( "Unacceptable control characters were not identified." ) | Does font file include unacceptable control character glyphs? |
53,836 | def gfonts_repo_structure ( fonts ) : from fontbakery . utils import get_absolute_path abspath = get_absolute_path ( fonts [ 0 ] ) return abspath . split ( os . path . sep ) [ - 3 ] in [ "ufl" , "ofl" , "apache" ] | The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ? |
53,837 | def com_google_fonts_check_repo_dirname_match_nameid_1 ( fonts , gfonts_repo_structure ) : from fontTools . ttLib import TTFont from fontbakery . utils import ( get_name_entry_strings , get_absolute_path , get_regular ) regular = get_regular ( fonts ) if not regular : yield FAIL , "The font seems to lack a regular." entry = get_name_entry_strings ( TTFont ( regular ) , NameID . FONT_FAMILY_NAME ) [ 0 ] expected = entry . lower ( ) expected = "" . join ( expected . split ( ' ' ) ) expected = "" . join ( expected . split ( '-' ) ) license , familypath , filename = get_absolute_path ( regular ) . split ( os . path . sep ) [ - 3 : ] if familypath == expected : yield PASS , "OK" else : yield FAIL , ( f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'." ) | Directory name in GFonts repo structure must match NameID 1 of the regular . |
53,838 | def com_google_fonts_check_family_panose_proportion ( ttFonts ) : failed = False proportion = None for ttFont in ttFonts : if proportion is None : proportion = ttFont [ 'OS/2' ] . panose . bProportion if proportion != ttFont [ 'OS/2' ] . panose . bProportion : failed = True if failed : yield FAIL , ( "PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files." ) else : yield PASS , "Fonts have consistent PANOSE proportion." | Fonts have consistent PANOSE proportion? |
53,839 | def com_google_fonts_check_family_panose_familytype ( ttFonts ) : failed = False familytype = None for ttfont in ttFonts : if familytype is None : familytype = ttfont [ 'OS/2' ] . panose . bFamilyType if familytype != ttfont [ 'OS/2' ] . panose . bFamilyType : failed = True if failed : yield FAIL , ( "PANOSE family type is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bFamilyType value" " is the same in the OS/2 table of all of this family" " font files." ) else : yield PASS , "Fonts have consistent PANOSE family type." | Fonts have consistent PANOSE family type? |
53,840 | def com_google_fonts_check_code_pages ( ttFont ) : if not hasattr ( ttFont [ 'OS/2' ] , "ulCodePageRange1" ) or not hasattr ( ttFont [ 'OS/2' ] , "ulCodePageRange2" ) or ( ttFont [ 'OS/2' ] . ulCodePageRange1 == 0 and ttFont [ 'OS/2' ] . ulCodePageRange2 == 0 ) : yield FAIL , ( "No code pages defined in the OS/2 table" " ulCodePageRage1 and CodePageRage2 fields." ) else : yield PASS , "At least one code page is defined." | Check code page character ranges |
53,841 | def com_google_fonts_check_glyf_unused_data ( ttFont ) : try : expected_glyphs = len ( ttFont . getGlyphOrder ( ) ) actual_glyphs = len ( ttFont [ 'glyf' ] . glyphs ) diff = actual_glyphs - expected_glyphs if diff < 0 : yield FAIL , Message ( "unreachable-data" , ( "Glyf table has unreachable data at the end of " " the table. Expected glyf table length {}" " (from loca table), got length" " {} (difference: {})" ) . format ( expected_glyphs , actual_glyphs , diff ) ) elif not diff : yield PASS , "There is no unused data at the end of the glyf table." else : raise Exception ( "Bug: fontTools did not raise an expected exception." ) except fontTools . ttLib . TTLibError as error : if "not enough 'glyf' table data" in format ( error ) : yield FAIL , Message ( "missing-data" , ( "Loca table references data beyond" " the end of the glyf table." " Expected glyf table length {}" " (from loca table)." ) . format ( expected_glyphs ) ) else : raise Exception ( "Bug: Unexpected fontTools exception." ) | Is there any unused data at the end of the glyf table? |
53,842 | def com_google_fonts_check_points_out_of_bounds ( ttFont ) : failed = False out_of_bounds = [ ] for glyphName in ttFont [ 'glyf' ] . keys ( ) : glyph = ttFont [ 'glyf' ] [ glyphName ] coords = glyph . getCoordinates ( ttFont [ 'glyf' ] ) [ 0 ] for x , y in coords : if x < glyph . xMin or x > glyph . xMax or y < glyph . yMin or y > glyph . yMax or abs ( x ) > 32766 or abs ( y ) > 32766 : failed = True out_of_bounds . append ( ( glyphName , x , y ) ) if failed : yield WARN , ( "The following glyphs have coordinates which are" " out of bounds:\n{}\nThis happens a lot when points" " are not extremes, which is usually bad. However," " fixing this alert by adding points on extremes may" " do more harm than good, especially with italics," " calligraphic-script, handwriting, rounded and" " other fonts. So it is common to" " ignore this message" . format ( out_of_bounds ) ) else : yield PASS , "All glyph paths have coordinates within bounds!" | Check for points out of bounds . |
53,843 | def com_daltonmaag_check_ufolint ( font ) : import subprocess ufolint_cmd = [ "ufolint" , font ] try : subprocess . check_output ( ufolint_cmd , stderr = subprocess . STDOUT ) except subprocess . CalledProcessError as e : yield FAIL , ( "ufolint failed the UFO source. Output follows :" "\n\n{}\n" ) . format ( e . output . decode ( ) ) except OSError : yield ERROR , "ufolint is not available!" else : yield PASS , "ufolint passed the UFO source." | Run ufolint on UFO source directory . |
53,844 | def com_daltonmaag_check_required_fields ( ufo_font ) : recommended_fields = [ ] for field in [ "unitsPerEm" , "ascender" , "descender" , "xHeight" , "capHeight" , "familyName" ] : if ufo_font . info . __dict__ . get ( "_" + field ) is None : recommended_fields . append ( field ) if recommended_fields : yield FAIL , f"Required field(s) missing: {recommended_fields}" else : yield PASS , "Required fields present." | Check that required fields are present in the UFO fontinfo . |
53,845 | def com_daltonmaag_check_recommended_fields ( ufo_font ) : recommended_fields = [ ] for field in [ "postscriptUnderlineThickness" , "postscriptUnderlinePosition" , "versionMajor" , "versionMinor" , "styleName" , "copyright" , "openTypeOS2Panose" ] : if ufo_font . info . __dict__ . get ( "_" + field ) is None : recommended_fields . append ( field ) if recommended_fields : yield WARN , f"Recommended field(s) missing: {recommended_fields}" else : yield PASS , "Recommended fields present." | Check that recommended fields are present in the UFO fontinfo . |
53,846 | def com_daltonmaag_check_unnecessary_fields ( ufo_font ) : unnecessary_fields = [ ] for field in [ "openTypeNameUniqueID" , "openTypeNameVersion" , "postscriptUniqueID" , "year" ] : if ufo_font . info . __dict__ . get ( "_" + field ) is not None : unnecessary_fields . append ( field ) if unnecessary_fields : yield WARN , f"Unnecessary field(s) present: {unnecessary_fields}" else : yield PASS , "Unnecessary fields omitted." | Check that no unnecessary fields are present in the UFO fontinfo . |
53,847 | def setup_argparse ( self , argument_parser ) : import glob import logging import argparse def get_fonts ( pattern ) : fonts_to_check = [ ] for fullpath in glob . glob ( pattern ) : fullpath_absolute = os . path . abspath ( fullpath ) if fullpath_absolute . lower ( ) . endswith ( ".ufo" ) and os . path . isdir ( fullpath_absolute ) : fonts_to_check . append ( fullpath ) else : logging . warning ( ( "Skipping '{}' as it does not seem " "to be valid UFO source directory." ) . format ( fullpath ) ) return fonts_to_check class MergeAction ( argparse . Action ) : def __call__ ( self , parser , namespace , values , option_string = None ) : target = [ item for l in values for item in l ] setattr ( namespace , self . dest , target ) argument_parser . add_argument ( 'fonts' , nargs = '*' , type = get_fonts , action = MergeAction , help = 'font file path(s) to check.' ' Wildcards like *.ufo are allowed.' ) return ( 'fonts' , ) | Set up custom arguments needed for this profile . |
53,848 | def com_google_fonts_check_whitespace_widths ( ttFont ) : from fontbakery . utils import get_glyph_name space_name = get_glyph_name ( ttFont , 0x0020 ) nbsp_name = get_glyph_name ( ttFont , 0x00A0 ) space_width = ttFont [ 'hmtx' ] [ space_name ] [ 0 ] nbsp_width = ttFont [ 'hmtx' ] [ nbsp_name ] [ 0 ] if space_width > 0 and space_width == nbsp_width : yield PASS , "Whitespace and non-breaking space have the same width." else : yield FAIL , ( "Whitespace and non-breaking space have differing width:" " Whitespace ({}) is {} font units wide, non-breaking space" " ({}) is {} font units wide. Both should be positive and the" " same." ) . format ( space_name , space_width , nbsp_name , nbsp_width ) | Whitespace and non - breaking space have the same width? |
53,849 | def update_by_config ( self , config_dict ) : policy_enabling_map = self . _get_enabling_map ( config_dict ) self . enabled_policies = [ ] for policy_name , is_policy_enabled in policy_enabling_map . items ( ) : if not self . _is_policy_exists ( policy_name ) : self . _warn_unexistent_policy ( policy_name ) continue if is_policy_enabled : enabled_policy = self . _get_policy ( policy_name ) self . enabled_policies . append ( enabled_policy ) | Update policies set by the config dictionary . |
53,850 | def _build_cmdargs ( argv ) : parser = _build_arg_parser ( ) namespace = parser . parse_args ( argv [ 1 : ] ) cmdargs = vars ( namespace ) return cmdargs | Build command line arguments dict to use ; - displaying usages - vint . linting . env . build_environment |
53,851 | def parse ( self , lint_target ) : decoder = Decoder ( default_decoding_strategy ) decoded = decoder . decode ( lint_target . read ( ) ) decoded_and_lf_normalized = decoded . replace ( '\r\n' , '\n' ) return self . parse_string ( decoded_and_lf_normalized ) | Parse vim script file and return the AST . |
53,852 | def parse_string ( self , string ) : lines = string . split ( '\n' ) reader = vimlparser . StringReader ( lines ) parser = vimlparser . VimLParser ( self . _enable_neovim ) ast = parser . parse ( reader ) ast [ 'pos' ] = { 'col' : 1 , 'i' : 0 , 'lnum' : 1 } for plugin in self . plugins : plugin . process ( ast ) return ast | Parse vim script string and return the AST . |
53,853 | def parse_string_expr ( self , string_expr_node ) : string_expr_node_value = string_expr_node [ 'value' ] string_expr_str = string_expr_node_value [ 1 : - 1 ] if string_expr_node_value [ 0 ] == "'" : string_expr_str = string_expr_str . replace ( "''" , "'" ) else : string_expr_str = string_expr_str . replace ( '\\"' , '"' ) raw_ast = self . parse_string ( 'echo ' + string_expr_str ) parsed_string_expr_nodes = raw_ast [ 'body' ] [ 0 ] [ 'list' ] start_pos = string_expr_node [ 'pos' ] def adjust_position ( node ) : pos = node [ 'pos' ] pos [ 'col' ] += start_pos [ 'col' ] - 1 - 5 pos [ 'i' ] += start_pos [ 'i' ] - 5 pos [ 'lnum' ] += start_pos [ 'lnum' ] - 1 for parsed_string_expr_node in parsed_string_expr_nodes : traverse ( parsed_string_expr_node , on_enter = adjust_position ) return parsed_string_expr_nodes | Parse a string node content . |
53,854 | def is_builtin_variable ( id_node ) : if NodeType ( id_node [ 'type' ] ) is not NodeType . IDENTIFIER : return False id_value = id_node [ 'value' ] if id_value . startswith ( 'v:' ) : return True if is_builtin_function ( id_node ) : return True if id_value in [ 'key' , 'val' ] : return is_on_lambda_string_context ( id_node ) return id_value in BuiltinVariablesCanHaveImplicitScope | Whether the specified node is a builtin identifier . |
53,855 | def is_builtin_function ( id_node ) : if NodeType ( id_node [ 'type' ] ) is not NodeType . IDENTIFIER : return False id_value = id_node [ 'value' ] if not is_function_identifier ( id_node ) : return False return id_value in BuiltinFunctions | Whether the specified node is a builtin function name identifier . The given identifier should be a child node of NodeType . CALL . |
53,856 | def attach_identifier_attributes ( self , ast ) : redir_assignment_parser = RedirAssignmentParser ( ) ast_with_parsed_redir = redir_assignment_parser . process ( ast ) map_and_filter_parser = CallNodeParser ( ) ast_with_parse_map_and_filter_and_redir = map_and_filter_parser . process ( ast_with_parsed_redir ) traverse ( ast_with_parse_map_and_filter_and_redir , on_enter = lambda node : self . _enter_handler ( node , is_on_lambda_str = None , is_on_lambda_body = None , ) ) return ast | Attach 5 flags to the AST . |
53,857 | def create_violation_report ( self , node , lint_context ) : return { 'name' : self . name , 'level' : self . level , 'description' : self . description , 'reference' : self . reference , 'position' : { 'line' : node [ 'pos' ] [ 'lnum' ] , 'column' : node [ 'pos' ] [ 'col' ] , 'path' : lint_context [ 'lint_target' ] . path , } , } | Returns a violation report for the node . |
53,858 | def get_policy_config ( self , lint_context ) : policy_config = lint_context [ 'config' ] . get ( 'policies' , { } ) . get ( self . __class__ . __name__ , { } ) return policy_config | Returns a config of the concrete policy . For example a config of ProhibitSomethingEvil is located on config . policies . ProhibitSomethingEvil . |
53,859 | def get_violation_if_found ( self , node , lint_context ) : if self . is_valid ( node , lint_context ) : return None return self . create_violation_report ( node , lint_context ) | Returns a violation if the node is invalid . |
53,860 | def import_all_policies ( ) : pkg_name = _get_policy_package_name_for_test ( ) pkg_path_list = pkg_name . split ( '.' ) pkg_path = str ( Path ( _get_vint_root ( ) , * pkg_path_list ) . resolve ( ) ) for _ , module_name , is_pkg in pkgutil . iter_modules ( [ pkg_path ] ) : if not is_pkg : module_fqn = pkg_name + '.' + module_name logging . debug ( 'Loading the policy module: `{fqn}`' . format ( fqn = module_fqn ) ) importlib . import_module ( module_fqn ) | Import all policies that were registered by vint . linting . policy_registry . |
53,861 | def process ( self , ast ) : id_classifier = IdentifierClassifier ( ) attached_ast = id_classifier . attach_identifier_attributes ( ast ) self . _scope_tree_builder . enter_new_scope ( ScopeVisibility . SCRIPT_LOCAL ) traverse ( attached_ast , on_enter = self . _enter_handler , on_leave = self . _leave_handler ) self . scope_tree = self . _scope_tree_builder . get_global_scope ( ) self . link_registry = self . _scope_tree_builder . link_registry | Build a scope tree and links between scopes and identifiers by the specified ast . You can access the built scope tree and the built links by . scope_tree and . link_registry . |
53,862 | def cli ( argv = None ) : kwargs = parse_arguments ( argv or sys . argv [ 1 : ] ) log_level = kwargs . pop ( 'log_level' ) logging . basicConfig ( format = '%(levelname)s | %(message)s' , level = log_level ) logger = logging . getLogger ( __name__ ) sub_log_level = logging . ERROR if log_level == logging . getLevelName ( logging . DEBUG ) : sub_log_level = logging . DEBUG logging . getLogger ( 'redo' ) . setLevel ( sub_log_level ) logging . getLogger ( 'requests' ) . setLevel ( sub_log_level ) logging . getLogger ( 'thclient' ) . setLevel ( sub_log_level ) try : scraper_type = kwargs . pop ( 'scraper_type' ) if kwargs . get ( 'url' ) : scraper_type = 'direct' build = factory . FactoryScraper ( scraper_type , ** kwargs ) if kwargs . get ( 'print_url' ) : logger . info ( build . url ) else : build . download ( ) except KeyboardInterrupt : logger . error ( 'Download interrupted by the user' ) | CLI entry point for mozdownload . |
53,863 | def query_builds_by_revision ( self , revision , job_type_name = 'Build' , debug_build = False ) : builds = set ( ) try : self . logger . info ( 'Querying {url} for list of builds for revision: {revision}' . format ( url = self . client . server_url , revision = revision ) ) option_hash = None for key , values in self . client . get_option_collection_hash ( ) . iteritems ( ) : for value in values : if value [ 'name' ] == ( 'debug' if debug_build else 'opt' ) : option_hash = key break if option_hash : break resultsets = self . client . get_pushes ( self . branch , revision = revision ) kwargs = { 'option_collection_hash' : option_hash , 'job_type_name' : job_type_name , 'exclusion_profile' : False , } kwargs . update ( self . get_treeherder_platform ( self . platform ) ) for resultset in resultsets : kwargs . update ( { 'result_set_id' : resultset [ 'id' ] } ) jobs = self . client . get_jobs ( self . branch , ** kwargs ) for job in jobs : log_urls = self . client . get_job_log_url ( self . branch , job_id = job [ 'id' ] ) for log_url in log_urls : if self . application in log_url [ 'url' ] : self . logger . debug ( 'Found build folder: {}' . format ( log_url [ 'url' ] ) ) builds . update ( [ log_url [ 'url' ] ] ) except Exception : self . logger . exception ( 'Failure occurred when querying Treeherder for builds' ) return list ( builds ) | Retrieve build folders for a given revision with the help of Treeherder . |
53,864 | def urljoin ( * fragments ) : parts = [ fragment . rstrip ( '/' ) for fragment in fragments [ : len ( fragments ) - 1 ] ] parts . append ( fragments [ - 1 ] ) return '/' . join ( parts ) | Concatenate multi part strings into urls . |
53,865 | def create_md5 ( path ) : m = hashlib . md5 ( ) with open ( path , "rb" ) as f : while True : data = f . read ( 8192 ) if not data : break m . update ( data ) return m . hexdigest ( ) | Create the md5 hash of a file using the hashlib library . |
53,866 | def filter ( self , filter ) : if hasattr ( filter , '__call__' ) : return [ entry for entry in self . entries if filter ( entry ) ] else : pattern = re . compile ( filter , re . IGNORECASE ) return [ entry for entry in self . entries if pattern . match ( entry ) ] | Filter entries by calling function or applying regex . |
53,867 | def handle_starttag ( self , tag , attrs ) : if not tag == 'a' : return for attr in attrs : if attr [ 0 ] == 'href' : url = urllib . unquote ( attr [ 1 ] ) self . active_url = url . rstrip ( '/' ) . split ( '/' ) [ - 1 ] return | Callback for when a tag gets opened . |
53,868 | def handle_data ( self , data ) : if not self . active_url : return if data . strip ( '/' ) == self . active_url : self . entries . append ( self . active_url ) | Callback when the data of a tag has been collected . |
53,869 | def dst ( self , dt ) : dst_start_date = self . first_sunday ( dt . year , 3 ) + timedelta ( days = 7 ) + timedelta ( hours = 2 ) dst_end_date = self . first_sunday ( dt . year , 11 ) + timedelta ( hours = 2 ) if dst_start_date <= dt . replace ( tzinfo = None ) < dst_end_date : return timedelta ( hours = 1 ) else : return timedelta ( 0 ) | Calculate delta for daylight saving . |
53,870 | def first_sunday ( self , year , month ) : date = datetime ( year , month , 1 , 0 ) days_until_sunday = 6 - date . weekday ( ) return date + timedelta ( days = days_until_sunday ) | Get the first sunday of a month . |
53,871 | def binary ( self ) : def _get_binary ( ) : parser = self . _create_directory_parser ( self . path ) if not parser . entries : raise errors . NotFoundError ( 'No entries found' , self . path ) pattern = re . compile ( self . binary_regex , re . IGNORECASE ) for entry in parser . entries : try : self . _binary = pattern . match ( entry ) . group ( ) break except Exception : continue else : raise errors . NotFoundError ( "Binary not found in folder" , self . path ) self . _retry_check_404 ( _get_binary ) return self . _binary | Return the name of the build . |
53,872 | def url ( self ) : return urllib . quote ( urljoin ( self . path , self . binary ) , safe = '%/:=&?~#+!$,;\'@()*[]|' ) | Return the URL of the build . |
53,873 | def filename ( self ) : if self . _filename is None : if os . path . splitext ( self . destination ) [ 1 ] : target_file = self . destination else : target_file = os . path . join ( self . destination , self . build_filename ( self . binary ) ) self . _filename = os . path . abspath ( target_file ) return self . _filename | Return the local filename of the build . |
53,874 | def download ( self ) : def total_seconds ( td ) : if hasattr ( td , 'total_seconds' ) : return td . total_seconds ( ) else : return ( td . microseconds + ( td . seconds + td . days * 24 * 3600 ) * 10 ** 6 ) / 10 ** 6 if os . path . isfile ( os . path . abspath ( self . filename ) ) : self . logger . info ( "File has already been downloaded: %s" % ( self . filename ) ) return self . filename directory = os . path . dirname ( self . filename ) if not os . path . isdir ( directory ) : os . makedirs ( directory ) self . logger . info ( 'Downloading from: %s' % self . url ) self . logger . info ( 'Saving as: %s' % self . filename ) tmp_file = self . filename + ".part" def _download ( ) : try : start_time = datetime . now ( ) r = self . session . get ( self . url , stream = True ) r . raise_for_status ( ) content_length = r . headers . get ( 'Content-length' ) if content_length : total_size = int ( content_length . strip ( ) ) max_value = ( ( total_size / CHUNK_SIZE ) + 1 ) * CHUNK_SIZE bytes_downloaded = 0 log_level = self . logger . getEffectiveLevel ( ) if log_level <= logging . INFO and content_length : widgets = [ pb . Percentage ( ) , ' ' , pb . Bar ( ) , ' ' , pb . ETA ( ) , ' ' , pb . FileTransferSpeed ( ) ] pbar = pb . ProgressBar ( widgets = widgets , maxval = max_value ) . start ( ) with open ( tmp_file , 'wb' ) as f : for chunk in r . iter_content ( CHUNK_SIZE ) : f . write ( chunk ) bytes_downloaded += CHUNK_SIZE if log_level <= logging . INFO and content_length : pbar . update ( bytes_downloaded ) t1 = total_seconds ( datetime . now ( ) - start_time ) if self . timeout_download and t1 >= self . timeout_download : raise errors . TimeoutError if log_level <= logging . INFO and content_length : pbar . finish ( ) except Exception : if os . path . isfile ( tmp_file ) : os . remove ( tmp_file ) raise self . _retry ( _download , retry_exceptions = ( requests . exceptions . RequestException , errors . TimeoutError ) ) os . rename ( tmp_file , self . filename ) return self . filename | Download the specified file . |
53,875 | def show_matching_builds ( self , builds ) : self . logger . info ( 'Found %s build%s: %s' % ( len ( builds ) , len ( builds ) > 1 and 's' or '' , len ( builds ) > 10 and ' ... ' . join ( [ ', ' . join ( builds [ : 5 ] ) , ', ' . join ( builds [ - 5 : ] ) ] ) or ', ' . join ( builds ) ) ) | Output the matching builds . |
53,876 | def is_build_dir ( self , folder_name ) : url = '%s/' % urljoin ( self . base_url , self . monthly_build_list_regex , folder_name ) if self . application in APPLICATIONS_MULTI_LOCALE and self . locale != 'multi' : url = '%s/' % urljoin ( url , self . locale ) parser = self . _create_directory_parser ( url ) pattern = re . compile ( self . binary_regex , re . IGNORECASE ) for entry in parser . entries : try : pattern . match ( entry ) . group ( ) return True except Exception : continue return False | Return whether or not the given dir contains a build . |
53,877 | def get_build_info_for_date ( self , date , build_index = None ) : url = urljoin ( self . base_url , self . monthly_build_list_regex ) has_time = date and date . time ( ) self . logger . info ( 'Retrieving list of builds from %s' % url ) parser = self . _create_directory_parser ( url ) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE' : date . strftime ( '%Y-%m-%d' ) , 'BRANCH' : self . branch , 'L10N' : '(-l10n)?' if self . locale_build else '' , 'PLATFORM' : '' if self . application not in ( 'fennec' ) else '-' + self . platform } parser . entries = parser . filter ( regex ) parser . entries = parser . filter ( self . is_build_dir ) if has_time : regex = r'.*%s.*' % date . strftime ( '%H-%M-%S' ) parser . entries = parser . filter ( regex ) if not parser . entries : date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % self . date . strftime ( date_format ) raise errors . NotFoundError ( message , url ) self . show_matching_builds ( parser . entries ) if build_index is None : build_index = len ( parser . entries ) for build in reversed ( parser . entries ) : build_index -= 1 if not build_index or self . is_build_dir ( build ) : break self . logger . info ( 'Selected build: %s' % parser . entries [ build_index ] ) return ( parser . entries , build_index ) | Return the build information for a given date . |
53,878 | def monthly_build_list_regex ( self ) : return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR' : self . date . year , 'MONTH' : str ( self . date . month ) . zfill ( 2 ) } | Return the regex for the folder containing builds of a month . |
53,879 | def filename ( self ) : if os . path . splitext ( self . destination ) [ 1 ] : target_file = self . destination else : parsed_url = urlparse ( self . url ) source_filename = ( parsed_url . path . rpartition ( '/' ) [ - 1 ] or parsed_url . hostname ) target_file = os . path . join ( self . destination , source_filename ) return os . path . abspath ( target_file ) | File name of the downloaded file . |
53,880 | def query_versions ( self , version = None ) : if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS : return [ version ] url = urljoin ( self . base_url , 'releases/' ) parser = self . _create_directory_parser ( url ) if version : versions = parser . filter ( RELEASE_AND_CANDIDATE_LATEST_VERSIONS [ version ] ) from distutils . version import LooseVersion versions . sort ( key = LooseVersion ) return [ versions [ - 1 ] ] else : return parser . entries | Check specified version and resolve special values . |
53,881 | def build_list_regex ( self ) : regex = 'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/' return regex % { 'BRANCH' : self . branch , 'PLATFORM' : '' if self . locale_build else self . platform_regex , 'L10N' : 'l10n' if self . locale_build else '' , 'DEBUG' : '-debug' if self . debug_build else '' } | Return the regex for the folder which contains the list of builds . |
53,882 | def date_matches ( self , timestamp ) : if self . date is None : return False timestamp = datetime . fromtimestamp ( float ( timestamp ) , self . timezone ) if self . date . date ( ) == timestamp . date ( ) : return True return False | Determine whether the timestamp date is equal to the argument date . |
53,883 | def get_build_info_for_index ( self , build_index = None ) : url = urljoin ( self . base_url , self . build_list_regex ) self . logger . info ( 'Retrieving list of builds from %s' % url ) parser = self . _create_directory_parser ( url ) parser . entries = parser . filter ( r'^\d+$' ) if self . timestamp : parser . entries = self . timestamp in parser . entries and [ self . timestamp ] elif self . date : parser . entries = filter ( self . date_matches , parser . entries ) if not parser . entries : message = 'No builds have been found' raise errors . NotFoundError ( message , url ) self . show_matching_builds ( parser . entries ) if build_index is None : build_index = len ( parser . entries ) for build in reversed ( parser . entries ) : build_index -= 1 if not build_index or self . is_build_dir ( build ) : break self . logger . info ( 'Selected build: %s' % parser . entries [ build_index ] ) return ( parser . entries , build_index ) | Get additional information for the build at the given index . |
53,884 | def create_default_options_getter ( ) : options = [ ] try : ttyname = subprocess . check_output ( args = [ 'tty' ] ) . strip ( ) options . append ( b'ttyname=' + ttyname ) except subprocess . CalledProcessError as e : log . warning ( 'no TTY found: %s' , e ) display = os . environ . get ( 'DISPLAY' ) if display is not None : options . append ( 'display={}' . format ( display ) . encode ( 'ascii' ) ) else : log . warning ( 'DISPLAY not defined' ) log . info ( 'using %s for pinentry options' , options ) return lambda : options | Return current TTY and DISPLAY settings for GnuPG pinentry . |
53,885 | def write ( p , line ) : log . debug ( '%s <- %r' , p . args , line ) p . stdin . write ( line ) p . stdin . flush ( ) | Send and flush a single line to the subprocess stdin . |
53,886 | def expect ( p , prefixes , confidential = False ) : resp = p . stdout . readline ( ) log . debug ( '%s -> %r' , p . args , resp if not confidential else '********' ) for prefix in prefixes : if resp . startswith ( prefix ) : return resp [ len ( prefix ) : ] raise UnexpectedError ( resp ) | Read a line and return it without required prefix . |
53,887 | def interact ( title , description , prompt , binary , options ) : args = [ binary ] p = subprocess . Popen ( args = args , stdin = subprocess . PIPE , stdout = subprocess . PIPE , env = os . environ ) p . args = args expect ( p , [ b'OK' ] ) title = util . assuan_serialize ( title . encode ( 'ascii' ) ) write ( p , b'SETTITLE ' + title + b'\n' ) expect ( p , [ b'OK' ] ) if description : description = util . assuan_serialize ( description . encode ( 'ascii' ) ) write ( p , b'SETDESC ' + description + b'\n' ) expect ( p , [ b'OK' ] ) if prompt : prompt = util . assuan_serialize ( prompt . encode ( 'ascii' ) ) write ( p , b'SETPROMPT ' + prompt + b'\n' ) expect ( p , [ b'OK' ] ) log . debug ( 'setting %d options' , len ( options ) ) for opt in options : write ( p , b'OPTION ' + opt + b'\n' ) expect ( p , [ b'OK' , b'ERR' ] ) write ( p , b'GETPIN\n' ) pin = expect ( p , [ b'OK' , b'D ' ] , confidential = True ) p . communicate ( ) exit_code = p . wait ( ) if exit_code : raise subprocess . CalledProcessError ( exit_code , binary ) return pin . decode ( 'ascii' ) . strip ( ) | Use GPG pinentry program to interact with the user . |
53,888 | def get_passphrase ( self , prompt = 'Passphrase:' ) : passphrase = None if self . cached_passphrase_ack : passphrase = self . cached_passphrase_ack . get ( ) if passphrase is None : passphrase = interact ( title = '{} passphrase' . format ( self . device_name ) , prompt = prompt , description = None , binary = self . passphrase_entry_binary , options = self . options_getter ( ) ) if self . cached_passphrase_ack : self . cached_passphrase_ack . set ( passphrase ) return passphrase | Ask the user for passphrase . |
53,889 | def export_public_keys ( self , identities ) : public_keys = [ ] with self . device : for i in identities : pubkey = self . device . pubkey ( identity = i ) vk = formats . decompress_pubkey ( pubkey = pubkey , curve_name = i . curve_name ) public_key = formats . export_public_key ( vk = vk , label = i . to_string ( ) ) public_keys . append ( public_key ) return public_keys | Export SSH public keys from the device . |
53,890 | def sign_ssh_challenge ( self , blob , identity ) : msg = _parse_ssh_blob ( blob ) log . debug ( '%s: user %r via %r (%r)' , msg [ 'conn' ] , msg [ 'user' ] , msg [ 'auth' ] , msg [ 'key_type' ] ) log . debug ( 'nonce: %r' , msg [ 'nonce' ] ) fp = msg [ 'public_key' ] [ 'fingerprint' ] log . debug ( 'fingerprint: %s' , fp ) log . debug ( 'hidden challenge size: %d bytes' , len ( blob ) ) log . info ( 'please confirm user "%s" login to "%s" using %s...' , msg [ 'user' ] . decode ( 'ascii' ) , identity . to_string ( ) , self . device ) with self . device : return self . device . sign ( blob = blob , identity = identity ) | Sign given blob using a private key on the device . |
53,891 | def fingerprint ( blob ) : digest = hashlib . md5 ( blob ) . digest ( ) return ':' . join ( '{:02x}' . format ( c ) for c in bytearray ( digest ) ) | Compute SSH fingerprint for specified blob . |
53,892 | def parse_pubkey ( blob ) : fp = fingerprint ( blob ) s = io . BytesIO ( blob ) key_type = util . read_frame ( s ) log . debug ( 'key type: %s' , key_type ) assert key_type in SUPPORTED_KEY_TYPES , key_type result = { 'blob' : blob , 'type' : key_type , 'fingerprint' : fp } if key_type == SSH_NIST256_KEY_TYPE : curve_name = util . read_frame ( s ) log . debug ( 'curve name: %s' , curve_name ) point = util . read_frame ( s ) assert s . read ( ) == b'' _type , point = point [ : 1 ] , point [ 1 : ] assert _type == SSH_NIST256_DER_OCTET size = len ( point ) // 2 assert len ( point ) == 2 * size coords = ( util . bytes2num ( point [ : size ] ) , util . bytes2num ( point [ size : ] ) ) curve = ecdsa . NIST256p point = ecdsa . ellipticcurve . Point ( curve . curve , * coords ) def ecdsa_verifier ( sig , msg ) : assert len ( sig ) == 2 * size sig_decode = ecdsa . util . sigdecode_string vk = ecdsa . VerifyingKey . from_public_point ( point , curve , hashfunc ) vk . verify ( signature = sig , data = msg , sigdecode = sig_decode ) parts = [ sig [ : size ] , sig [ size : ] ] return b'' . join ( [ util . frame ( b'\x00' + p ) for p in parts ] ) result . update ( point = coords , curve = CURVE_NIST256 , verifier = ecdsa_verifier ) if key_type == SSH_ED25519_KEY_TYPE : pubkey = util . read_frame ( s ) assert s . read ( ) == b'' def ed25519_verify ( sig , msg ) : assert len ( sig ) == 64 vk = ed25519 . VerifyingKey ( pubkey ) vk . verify ( sig , msg ) return sig result . update ( curve = CURVE_ED25519 , verifier = ed25519_verify ) return result | Parse SSH public key from given blob . |
53,893 | def export_public_key ( vk , label ) : key_type , blob = serialize_verifying_key ( vk ) log . debug ( 'fingerprint: %s' , fingerprint ( blob ) ) b64 = base64 . b64encode ( blob ) . decode ( 'ascii' ) return u'{} {} {}\n' . format ( key_type . decode ( 'ascii' ) , b64 , label ) | Export public key to text format . |
53,894 | def import_public_key ( line ) : log . debug ( 'loading SSH public key: %r' , line ) file_type , base64blob , name = line . split ( ) blob = base64 . b64decode ( base64blob ) result = parse_pubkey ( blob ) result [ 'name' ] = name . encode ( 'utf-8' ) assert result [ 'type' ] == file_type . encode ( 'ascii' ) log . debug ( 'loaded %s public key: %s' , file_type , result [ 'fingerprint' ] ) return result | Parse public key textual format as saved at a . pub file . |
53,895 | def parse_packets ( stream ) : reader = util . Reader ( stream ) while True : try : value = reader . readfmt ( 'B' ) except EOFError : return log . debug ( 'prefix byte: %s' , bin ( value ) ) assert util . bit ( value , 7 ) == 1 tag = util . low_bits ( value , 6 ) if util . bit ( value , 6 ) == 0 : length_type = util . low_bits ( tag , 2 ) tag = tag >> 2 fmt = { 0 : '>B' , 1 : '>H' , 2 : '>L' } [ length_type ] packet_size = reader . readfmt ( fmt ) else : first = reader . readfmt ( 'B' ) if first < 192 : packet_size = first elif first < 224 : packet_size = ( ( first - 192 ) << 8 ) + reader . readfmt ( 'B' ) + 192 elif first == 255 : packet_size = reader . readfmt ( '>L' ) else : log . error ( 'Partial Body Lengths unsupported' ) log . debug ( 'packet length: %d' , packet_size ) packet_data = reader . read ( packet_size ) packet_type = PACKET_TYPES . get ( tag ) p = { 'type' : 'unknown' , 'tag' : tag , 'raw' : packet_data } if packet_type is not None : try : p = packet_type ( util . Reader ( io . BytesIO ( packet_data ) ) ) p [ 'tag' ] = tag except ValueError : log . exception ( 'Skipping packet: %s' , util . hexlify ( packet_data ) ) log . debug ( 'packet "%s": %s' , p [ 'type' ] , p ) yield p | Support iterative parsing of available GPG packets . |
53,896 | def digest_packets ( packets , hasher ) : data_to_hash = io . BytesIO ( ) for p in packets : data_to_hash . write ( p [ '_to_hash' ] ) hasher . update ( data_to_hash . getvalue ( ) ) return hasher . digest ( ) | Compute digest on specified packets according to _to_hash field . |
53,897 | def load_by_keygrip ( pubkey_bytes , keygrip ) : stream = io . BytesIO ( pubkey_bytes ) packets = list ( parse_packets ( stream ) ) packets_per_pubkey = [ ] for p in packets : if p [ 'type' ] == 'pubkey' : packets_per_pubkey . append ( [ ] ) packets_per_pubkey [ - 1 ] . append ( p ) for packets in packets_per_pubkey : user_ids = [ p for p in packets if p [ 'type' ] == 'user_id' ] for p in packets : if p . get ( 'keygrip' ) == keygrip : return p , user_ids raise KeyError ( '{} keygrip not found' . format ( util . hexlify ( keygrip ) ) ) | Return public key and first user ID for specified keygrip . |
53,898 | def load_signature ( stream , original_data ) : signature , = list ( parse_packets ( ( stream ) ) ) hash_alg = HASH_ALGORITHMS [ signature [ 'hash_alg' ] ] digest = digest_packets ( [ { '_to_hash' : original_data } , signature ] , hasher = hashlib . new ( hash_alg ) ) assert signature [ 'hash_prefix' ] == digest [ : 2 ] return signature , digest | Load signature from stream and compute GPG digest for verification . |
53,899 | def remove_armor ( armored_data ) : stream = io . BytesIO ( armored_data ) lines = stream . readlines ( ) [ 3 : - 1 ] data = base64 . b64decode ( b'' . join ( lines ) ) payload , checksum = data [ : - 3 ] , data [ - 3 : ] assert util . crc24 ( payload ) == checksum return payload | Decode armored data into its binary form . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.