idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
58,300
def with_updated_configuration ( self , options = None , attribute_options = None ) : return self . _mapping . with_updated_configuration ( options = options , attribute_options = attribute_options )
Returns a context in which this representer is updated with the given options and attribute options .
58,301
def jsPath ( path ) : shortPath = path . replace ( "C:\\Users\\scheinerbock\\Desktop\\" + "ideogram\\scrapeSource\\test\\" , "" ) noDash = shortPath . replace ( "-" , "_dash_" ) jsPath = noDash . replace ( "\\" , "_slash_" ) . replace ( "." , "_dot_" ) return jsPath
Returns a relative path without \ - and . so that the string will play nicely with javascript .
58,302
def jsName ( path , name ) : shortPath = path . replace ( "C:\\Users\\scheinerbock\\Desktop\\" + "ideogram\\scrapeSource\\test\\" , "" ) noDash = shortPath . replace ( "-" , "_dash_" ) jsPath = noDash . replace ( "\\" , "_slash_" ) . replace ( "." , "_dot_" ) jsName = jsPath + '_slash_' + name return jsName
Returns a name string without \ - and . so that the string will play nicely with javascript .
58,303
def getStartNodes ( fdefs , calls ) : s = [ ] for source in fdefs : for fn in fdefs [ source ] : inboundEdges = False for call in calls : if call . target == fn : inboundEdges = True if not inboundEdges : s . append ( fn ) return s
Return a list of nodes in fdefs that have no inbound edges
58,304
def getChildren ( current , calls , blacklist = [ ] ) : return [ c . target for c in calls if c . source == current and c . target not in blacklist ]
Return a list of the children of current that are not in used .
58,305
def tagAttributes ( fdef_master_list , node , depth = 0 ) : if type ( node ) == list : for i in node : depth += 1 tagAttributes ( fdef_master_list , i , depth ) if type ( node ) == dict : for x in fdef_master_list : if jsName ( x . path , x . name ) == node [ 'name' ] : node [ 'path' ] = x . path node [ 'depth' ] = depth if "children" not in node : node [ "size" ] = x . weight for i in node . values ( ) : depth += 1 tagAttributes ( fdef_master_list , i , depth ) return node
recursively tag objects with sizes depths and path names
58,306
def tagAttributes_while ( fdef_master_list , root ) : depth = 0 current = root untagged_nodes = [ root ] while untagged_nodes : current = untagged_nodes . pop ( ) for x in fdef_master_list : if jsName ( x . path , x . name ) == current [ 'name' ] : current [ 'path' ] = x . path if children in current : for child in children : child [ "depth" ] = depth untagged_nodes . append ( child ) if depth not in current : current [ "depth" ] = depth depth += 1 return root
Tag each node under root with the appropriate depth .
58,307
def noEmptyNests ( node ) : if type ( node ) == list : for i in node : noEmptyNests ( i ) if type ( node ) == dict : for i in node . values ( ) : noEmptyNests ( i ) if node [ "children" ] == [ ] : node . pop ( "children" ) return node
recursively make sure that no dictionaries inside node contain empty children lists
58,308
def remove_old_tmp_files ( profiles = None , max_lifetime = ( 7 * 24 ) ) : assert isinstance ( profiles , ( list , tuple ) ) or profiles is None if profiles is None : profiles = dju_settings . DJU_IMG_UPLOAD_PROFILES . keys ( ) profiles = set ( ( 'default' , ) + tuple ( profiles ) ) total = removed = 0 old_dt = datetime . datetime . utcnow ( ) - datetime . timedelta ( hours = max_lifetime ) for profile in profiles : conf = get_profile_configs ( profile = profile ) root_path = os . path . join ( settings . MEDIA_ROOT , dju_settings . DJU_IMG_UPLOAD_SUBDIR , conf [ 'PATH' ] ) for file_path in get_files_recursive ( root_path ) : m = re_tmp . match ( os . path . basename ( file_path ) ) if m is None : continue total += 1 fdt = dtstr_to_datetime ( m . group ( 'dtstr' ) ) if fdt and old_dt > fdt : os . remove ( file_path ) removed += 1 return removed , total
Removes old temp files that is older than expiration_hours . If profiles is None then will be use all profiles .
58,309
def next_task ( self , item , ** kwargs ) : filename = os . path . basename ( item ) try : self . tx_importer . import_batch ( filename = filename ) except TransactionImporterError as e : raise TransactionsFileQueueError ( e ) from e else : self . archive ( filename )
Calls import_batch for the next filename in the queue and archives the file .
58,310
def get_public_comments_for_model ( model ) : if not IS_INSTALLED : return CommentModelStub . objects . none ( ) else : return CommentModel . objects . for_model ( model ) . filter ( is_public = True , is_removed = False )
Get visible comments for the model .
58,311
def get_comments_are_open ( instance ) : if not IS_INSTALLED : return False try : mod = moderator . _registry [ instance . __class__ ] except KeyError : return True return CommentModerator . allow ( mod , None , instance , None )
Check if comments are open for the instance
58,312
def get_comments_are_moderated ( instance ) : if not IS_INSTALLED : return False try : mod = moderator . _registry [ instance . __class__ ] except KeyError : return False return CommentModerator . moderate ( mod , None , instance , None )
Check if comments are moderated for the instance
58,313
def calc_local_indices ( shape , num_partitions , coordinate ) : dimension = len ( shape ) assert dimension == len ( num_partitions ) decomposed_shapes = [ ] for idx in range ( dimension ) : local_shape = shape [ idx ] // num_partitions [ idx ] temp_shape_list = [ ] for _ in range ( num_partitions [ idx ] ) : temp_shape_list . append ( local_shape ) for j in range ( shape [ idx ] % num_partitions [ idx ] ) : temp_shape_list [ j ] += 1 decomposed_shapes . append ( temp_shape_list ) indices = [ ] for i in range ( dimension ) : temp_index_list = [ ] start_idx = 0 end_idx = 0 for j in range ( num_partitions [ i ] ) : end_idx = end_idx + decomposed_shapes [ i ] [ j ] temp_index_list . append ( [ start_idx , end_idx ] ) start_idx = end_idx indices . append ( temp_index_list ) start_index = [ ] stop_index = [ ] shape = [ ] for idx in range ( dimension ) : start_index . append ( indices [ idx ] [ coordinate [ idx ] ] [ 0 ] ) stop_index . append ( indices [ idx ] [ coordinate [ idx ] ] [ 1 ] ) shape . append ( decomposed_shapes [ idx ] [ coordinate [ idx ] ] ) shape = tuple ( shape ) start_index = tuple ( start_index ) stop_index = tuple ( stop_index ) return start_index , stop_index , shape
calculate local indices return start and stop index per dimension per process for local data field
58,314
def load_file ( self , filename ) : with open ( filename , 'r' ) as sourcefile : self . set_string ( sourcefile . read ( ) )
Read in file contents and set the current string .
58,315
def set_string ( self , string ) : self . string = string self . length = len ( string ) self . reset_position ( )
Set the working string and its length then reset positions .
58,316
def add_string ( self , string ) : self . string += string self . length += len ( string ) self . eos = 0
Add to the working string and its length and reset eos .
58,317
def reset_position ( self ) : self . pos = 0 self . col = 0 self . row = 1 self . eos = 0
Reset all current positions .
58,318
def has_space ( self , length = 1 , offset = 0 ) : return self . pos + ( length + offset ) - 1 < self . length
Returns boolean if self . pos + length < working string length .
58,319
def eol_distance_next ( self , offset = 0 ) : distance = 0 for char in self . string [ self . pos + offset : ] : if char == '\n' : break else : distance += 1 return distance
Return the amount of characters until the next newline .
58,320
def eol_distance_last ( self , offset = 0 ) : distance = 0 for char in reversed ( self . string [ : self . pos + offset ] ) : if char == '\n' : break else : distance += 1 return distance
Return the ammount of characters until the last newline .
58,321
def spew_length ( self , length ) : pos = self . pos if not pos or length > pos : return None row = self . row for char in reversed ( self . string [ pos - length : pos ] ) : pos -= 1 if char == '\n' : row -= 1 self . pos = pos self . col = self . eol_distance_last ( ) self . row = row if self . has_space ( ) : self . eos = 0
Move current position backwards by length .
58,322
def eat_length ( self , length ) : pos = self . pos if self . eos or pos + length > self . length : return None col = self . col row = self . row for char in self . string [ pos : pos + length ] : col += 1 pos += 1 if char == '\n' : col = 0 row += 1 self . pos = pos self . col = col self . row = row if not self . has_space ( ) : self . eos = 1
Move current position forward by length and sets eos if needed .
58,323
def eat_string ( self , string ) : pos = self . pos if self . eos or pos + len ( string ) > self . length : return None col = self . col row = self . row for char in string : col += 1 pos += 1 if char == '\n' : col = 0 row += 1 self . pos = pos self . col = col self . row = row if not self . has_space ( ) : self . eos = 1
Move current position by length of string and count lines by \ n .
58,324
def eat_line ( self ) : if self . eos : return None eat_length = self . eat_length get_char = self . get_char has_space = self . has_space while has_space ( ) and get_char ( ) != '\n' : eat_length ( 1 ) eat_length ( 1 )
Move current position forward until the next line .
58,325
def get_char ( self , offset = 0 ) : if not self . has_space ( offset = offset ) : return '' return self . string [ self . pos + offset ]
Return the current character in the working string .
58,326
def get_length ( self , length , trim = 0 , offset = 0 ) : if trim and not self . has_space ( offset + length ) : return self . string [ self . pos + offset : ] elif self . has_space ( offset + length ) : return self . string [ self . pos + offset : self . pos + offset + length ] else : return ''
Return string at current position + length . If trim == true then get as much as possible before eos .
58,327
def get_string ( self , offset = 0 ) : if not self . has_space ( offset = offset ) : return '' string = self . string pos = self . pos + offset for i , char in enumerate ( string [ pos : ] ) : if char . isspace ( ) : return string [ pos : pos + i ] else : return string [ pos : ]
Return non space chars from current position until a whitespace .
58,328
def rest_of_string ( self , offset = 0 ) : if self . has_space ( offset = offset ) : return self . string [ self . pos + offset : ] else : return ''
A copy of the current position till the end of the source string .
58,329
def get_current_line ( self ) : if not self . has_space ( ) : return None pos = self . pos - self . col string = self . string end = self . length output = [ ] while pos < len ( string ) and string [ pos ] != '\n' : output . append ( string [ pos ] ) pos += 1 if pos == end : break else : output . append ( string [ pos ] ) if not output : return None return SourceLine ( '' . join ( output ) , self . row )
Return a SourceLine of the current line .
58,330
def get_lines ( self , first , last ) : line = 1 linestring = [ ] linestrings = [ ] for char in self . string : if line >= first and line <= last : linestring . append ( char ) if char == '\n' : linestrings . append ( ( '' . join ( linestring ) , line ) ) linestring = [ ] elif line > last : break if char == '\n' : line += 1 if linestring : linestrings . append ( ( '' . join ( linestring ) , line ) ) elif not linestrings : return None return [ SourceLine ( string , lineno ) for string , lineno in linestrings ]
Return SourceLines for lines between and including first & last .
58,331
def get_surrounding_lines ( self , past = 1 , future = 1 ) : string = self . string pos = self . pos - self . col end = self . length row = self . row linesback = 0 while linesback > - past : if pos <= 0 : break elif string [ pos - 2 ] == '\n' : linesback -= 1 pos -= 1 output = [ ] linestring = [ ] lines = future + 1 while linesback < lines : if pos >= end : linestring . append ( string [ pos - 1 ] ) output . append ( SourceLine ( '' . join ( linestring [ : - 1 ] ) , row + linesback ) ) break elif string [ pos ] == '\n' : linestring . append ( string [ pos ] ) pos += 1 output . append ( SourceLine ( '' . join ( linestring ) , row + linesback ) ) linesback += 1 linestring = [ ] linestring . append ( string [ pos ] ) pos += 1 return output
Return the current line and x y previous and future lines . Returns a list of SourceLine s .
58,332
def get_all_lines ( self ) : output = [ ] line = [ ] lineno = 1 for char in self . string : line . append ( char ) if char == '\n' : output . append ( SourceLine ( '' . join ( line ) , lineno ) ) line = [ ] lineno += 1 if line : output . append ( SourceLine ( '' . join ( line ) , lineno ) ) return output
Return all lines of the SourceString as a list of SourceLine s .
58,333
def match_string ( self , string , word = 0 , offset = 0 ) : if word : return self . get_string ( offset ) == string return self . get_length ( len ( string ) , offset ) == string
Returns 1 if string can be matches against SourceString s current position .
58,334
def match_any_string ( self , strings , word = 0 , offset = 0 ) : if word : current = self . get_string ( offset ) return current if current in strings else '' current = '' currentlength = 0 length = 0 for string in strings : length = len ( string ) if length != currentlength : current = self . get_length ( length , offset ) if string == current : return string return ''
Attempts to match each string in strings in order . Will return the string that matches or an empty string if no match .
58,335
def match_any_char ( self , chars , offset = 0 ) : if not self . has_space ( offset = offset ) : return '' current = self . string [ self . pos + offset ] return current if current in chars else ''
Match and return the current SourceString char if its in chars .
58,336
def match_function_pattern ( self , first , rest = None , least = 1 , offset = 0 ) : if not self . has_space ( offset = offset ) : return '' firstchar = self . string [ self . pos + offset ] if not first ( firstchar ) : return '' output = [ firstchar ] pattern = first if rest is None else rest for char in self . string [ self . pos + offset + 1 : ] : if pattern ( char ) : output . append ( char ) else : break if len ( output ) < least : return '' return '' . join ( output )
Match each char sequentially from current SourceString position until the pattern doesnt match and return all maches .
58,337
def count_indents_last_line ( self , spacecount , tabs = 0 , back = 5 ) : if not self . has_space ( ) : return 0 lines = self . get_surrounding_lines ( back , 0 ) for line in reversed ( lines ) : if not line . string . isspace ( ) : return line . count_indents ( spacecount , tabs ) return 0
Finds the last meaningful line and returns its indent level . Back specifies the amount of lines to look back for a none whitespace line .
58,338
def count_indents_length_last_line ( self , spacecount , tabs = 0 , back = 5 ) : if not self . has_space ( ) : return 0 lines = self . get_surrounding_lines ( back , 0 ) for line in reversed ( lines ) : if not line . string . isspace ( ) : return line . count_indents_length ( spacecount , tabs ) return ( 0 , 0 )
Finds the last meaningful line and returns its indent level and character length . Back specifies the amount of lines to look back for a none whitespace line .
58,339
def skip_whitespace ( self , newlines = 0 ) : if newlines : while not self . eos : if self . get_char ( ) . isspace ( ) : self . eat_length ( 1 ) else : break else : char = '' while not self . eos : char = self . get_char ( ) if char . isspace ( ) and char != '\n' : self . eat_length ( 1 ) else : break
Moves the position forwards to the next non newline space character . If newlines > = 1 include newlines as spaces .
58,340
def pretty_print ( self , carrot = False ) : lineno = self . lineno padding = 0 if lineno < 1000 : padding = 1 if lineno < 100 : padding = 2 if lineno < 10 : padding = 3 string = str ( lineno ) + ( ' ' * padding ) + '|' + self . string if carrot : string += '\n' + ( ' ' * ( self . col + 5 ) ) return string
Return a string of this line including linenumber . If carrot is True then a line is added under the string with a carrot under the current character position .
58,341
def safe_exit ( output ) : try : sys . stdout . write ( output ) sys . stdout . flush ( ) except IOError : pass
exit without breaking pipes .
58,342
def frag2text ( endpoint , stype , selector , clean = False , raw = False , verbose = False ) : try : return main ( endpoint , stype , selector , clean , raw , verbose ) except StandardError as err : return err
returns Markdown text of selected fragment .
58,343
def read ( self , _file ) : with open ( _file ) as fh : data = fh . read ( ) if self . verbose : sys . stdout . write ( "read %d bytes from %s\n" % ( fh . tell ( ) , _file ) ) return data
return local file contents as endpoint .
58,344
def GET ( self , url ) : r = requests . get ( url ) if self . verbose : sys . stdout . write ( "%s %s\n" % ( r . status_code , r . encoding ) ) sys . stdout . write ( str ( r . headers ) + "\n" ) self . encoding = r . encoding return r . text
returns text content of HTTP GET response .
58,345
def select ( self , html , stype , expression ) : etree = html5lib . parse ( html , treebuilder = 'lxml' , namespaceHTMLElements = False ) if stype == 'css' : selector = lxml . cssselect . CSSSelector ( expression ) frag = list ( selector ( etree ) ) else : frag = etree . xpath ( expression ) if not frag : raise RuntimeError ( "Nothing found for: %s" % expression ) return "" . join ( [ lxml . etree . tostring ( x ) for x in frag ] )
returns WHATWG spec HTML fragment from selector expression .
58,346
def clean ( self , html ) : return lxml . html . clean . clean_html ( unicode ( html , self . encoding ) )
removes evil HTML per lxml . html . clean defaults .
58,347
def filesystem_repository ( _context , name = None , make_default = False , aggregate_class = None , repository_class = None , directory = None , content_type = None ) : cnf = { } if not directory is None : cnf [ 'directory' ] = directory if not content_type is None : cnf [ 'content_type' ] = content_type _repository ( _context , name , make_default , aggregate_class , repository_class , REPOSITORY_TYPES . FILE_SYSTEM , 'add_filesystem_repository' , cnf )
Directive for registering a file - system based repository .
58,348
def rdb_repository ( _context , name = None , make_default = False , aggregate_class = None , repository_class = None , db_string = None , metadata_factory = None ) : cnf = { } if not db_string is None : cnf [ 'db_string' ] = db_string if not metadata_factory is None : cnf [ 'metadata_factory' ] = metadata_factory _repository ( _context , name , make_default , aggregate_class , repository_class , REPOSITORY_TYPES . RDB , 'add_rdb_repository' , cnf )
Directive for registering a RDBM based repository .
58,349
def messaging ( _context , repository , reset_on_start = False ) : discriminator = ( 'messaging' , repository ) reg = get_current_registry ( ) config = Configurator ( reg , package = _context . package ) _context . action ( discriminator = discriminator , callable = config . setup_system_repository , args = ( repository , ) , kw = dict ( reset_on_start = reset_on_start ) )
Directive for setting up the user message resource in the appropriate repository .
58,350
def _filter ( self , dict , keep ) : if not keep : return dict result = { } for key , value in dict . iteritems ( ) : if key in keep : result [ key ] = value return result
Remove any keys not in keep
58,351
def main ( upload = 'usbasp' , core = 'arduino' , replace_existing = True , ) : def install ( mcu , f_cpu , kbyte ) : board = AutoBunch ( ) board . name = TEMPL_NAME . format ( mcu = mcu , f_cpu = format_freq ( f_cpu ) , upload = upload ) board_id = TEMPL_ID . format ( mcu = mcu , f_cpu = ( f_cpu ) , upload = upload ) board . upload . using = upload board . upload . maximum_size = kbyte * 1024 board . build . mcu = mcu board . build . f_cpu = str ( f_cpu ) + 'L' board . build . core = core board . build . variant = 'standard' install_board ( board_id , board , replace_existing = replace_existing ) install ( 'atmega8' , 1000000 , 8 ) install ( 'atmega8' , 8000000 , 8 ) install ( 'atmega8' , 12000000 , 8 ) install ( 'atmega88' , 1000000 , 8 ) install ( 'atmega88' , 8000000 , 8 ) install ( 'atmega88' , 12000000 , 8 ) install ( 'atmega88' , 20000000 , 8 ) install ( 'atmega328p' , 20000000 , 32 ) install ( 'atmega328p' , 16000000 , 32 ) install ( 'atmega328p' , 8000000 , 32 ) install ( 'atmega328p' , 1000000 , 32 )
install custom boards .
58,352
def write_county_estimate ( self , table , variable , code , datum ) : try : division = Division . objects . get ( code = "{}{}" . format ( datum [ "state" ] , datum [ "county" ] ) , level = self . COUNTY_LEVEL , ) CensusEstimate . objects . update_or_create ( division = division , variable = variable , defaults = { "estimate" : datum [ code ] or 0 } , ) except ObjectDoesNotExist : print ( "ERROR: {}, {}" . format ( datum [ "NAME" ] , datum [ "state" ] ) )
Creates new estimate from a census series .
58,353
def get_district_estimates_by_state ( self , api , table , variable , estimate , state ) : state = Division . objects . get ( level = self . STATE_LEVEL , code = state ) district_data = api . get ( ( "NAME" , estimate ) , { "for" : "congressional district:*" , "in" : "state:{}" . format ( state . code ) , } , year = int ( table . year ) , ) for datum in district_data : self . write_district_estimate ( table , variable , estimate , datum )
Calls API for all districts in a state and a given estimate .
58,354
def get_county_estimates_by_state ( self , api , table , variable , estimate , state ) : state = Division . objects . get ( level = self . STATE_LEVEL , code = state ) county_data = api . get ( ( "NAME" , estimate ) , { "for" : "county:*" , "in" : "state:{}" . format ( state . code ) } , year = int ( table . year ) , ) for datum in county_data : self . write_county_estimate ( table , variable , estimate , datum )
Calls API for all counties in a state and a given estimate .
58,355
def get_state_estimates_by_state ( self , api , table , variable , estimate , state ) : state = Division . objects . get ( level = self . STATE_LEVEL , code = state ) state_data = api . get ( ( "NAME" , estimate ) , { "for" : "state:{}" . format ( state . code ) } , year = int ( table . year ) , ) for datum in state_data : self . write_state_estimate ( table , variable , estimate , datum )
Calls API for a state and a given estimate .
58,356
def aggregate_variable ( estimate , id ) : estimates = [ variable . estimates . get ( division__id = id ) . estimate for variable in estimate . variable . label . variables . all ( ) ] method = estimate . variable . label . aggregation if method == "s" : aggregate = sum ( estimates ) elif method == "a" : aggregate = statistics . mean ( estimates ) elif method == "m" : aggregate = statistics . median ( estimates ) else : aggregate = None return aggregate
Aggregate census table variables by a custom label .
58,357
def aggregate_national_estimates_by_district ( self ) : data = { } fips = "00" aggregated_labels = [ ] states = Division . objects . filter ( level = self . DISTRICT_LEVEL ) estimates = CensusEstimate . objects . filter ( division__level = self . DISTRICT_LEVEL ) for estimate in estimates : series = estimate . variable . table . series year = estimate . variable . table . year table = estimate . variable . table . code label = estimate . variable . label . label table_label = "{}{}" . format ( table , label ) code = estimate . variable . code if series not in data : data [ series ] = { } if year not in data [ series ] : data [ series ] [ year ] = { } if table not in data [ series ] [ year ] : data [ series ] [ year ] [ table ] = { } if fips not in data [ series ] [ year ] [ table ] : data [ series ] [ year ] [ table ] [ fips ] = { } if label is not None : if table_label not in aggregated_labels : aggregated_labels . append ( table_label ) data [ series ] [ year ] [ table ] [ fips ] [ label ] = [ self . aggregate_variable ( estimate , division . id ) for division in states if len ( CensusEstimate . objects . filter ( variable = estimate . variable , division = division . id , ) ) > 0 ] else : if code in data [ series ] [ year ] [ table ] [ fips ] : data [ series ] [ year ] [ table ] [ fips ] [ code ] . append ( estimate . estimate ) else : data [ series ] [ year ] [ table ] [ fips ] [ code ] = [ estimate . estimate ] return data
Aggregates district - level estimates for each table within the country .
58,358
def aggregate_state_estimates_by_county ( self , parent ) : data = { } for division in tqdm ( Division . objects . filter ( level = self . COUNTY_LEVEL , parent = parent ) ) : fips = division . code id = division . id aggregated_labels = [ ] for estimate in division . census_estimates . all ( ) : series = estimate . variable . table . series year = estimate . variable . table . year table = estimate . variable . table . code label = estimate . variable . label . label table_label = "{}{}" . format ( table , label ) code = estimate . variable . code if series not in data : data [ series ] = { } if year not in data [ series ] : data [ series ] [ year ] = { } if table not in data [ series ] [ year ] : data [ series ] [ year ] [ table ] = { } if fips not in data [ series ] [ year ] [ table ] : data [ series ] [ year ] [ table ] [ fips ] = { } if label is not None : if table_label not in aggregated_labels : aggregated_labels . append ( table_label ) data [ series ] [ year ] [ table ] [ fips ] [ label ] = self . aggregate_variable ( estimate , id ) else : data [ series ] [ year ] [ table ] [ division . code ] [ code ] = estimate . estimate return data
Aggregates county - level estimates for each table within a given state .
58,359
def xml ( self , fn = None , src = 'word/document.xml' , XMLClass = XML , ** params ) : "return the src with the given transformation applied, if any." if src in self . xml_cache : return self . xml_cache [ src ] if src not in self . zipfile . namelist ( ) : return x = XMLClass ( fn = fn or ( self . fn and self . fn . replace ( '.docx' , '.xml' ) ) or None , root = self . zipfile . read ( src ) ) self . xml_cache [ src ] = x return x
return the src with the given transformation applied if any .
58,360
def endnotemap ( self , cache = True ) : if self . __endnotemap is not None and cache == True : return self . __endnotemap else : x = self . xml ( src = 'word/endnotes.xml' ) d = Dict ( ) if x is None : return d for endnote in x . root . xpath ( "w:endnote" , namespaces = self . NS ) : id = endnote . get ( "{%(w)s}id" % self . NS ) typ = endnote . get ( "{%(w)s}type" % self . NS ) d [ id ] = Dict ( id = id , type = typ , elem = endnote ) if cache == True : self . __endnotemap = d return d
return the endnotes from the docx keyed to string id .
58,361
def footnotemap ( self , cache = True ) : if self . __footnotemap is not None and cache == True : return self . __footnotemap else : x = self . xml ( src = 'word/footnotes.xml' ) d = Dict ( ) if x is None : return d for footnote in x . root . xpath ( "w:footnote" , namespaces = self . NS ) : id = footnote . get ( "{%(w)s}id" % self . NS ) typ = footnote . get ( "{%(w)s}type" % self . NS ) d [ id ] = Dict ( id = id , type = typ , elem = footnote ) if cache == True : self . __footnotemap = d return d
return the footnotes from the docx keyed to string id .
58,362
def commentmap ( self , cache = True ) : if self . __commentmap is not None and cache == True : return self . __commentmap else : x = self . xml ( src = 'word/comments.xml' ) d = Dict ( ) if x is None : return d for comment in x . root . xpath ( "w:comment" , namespaces = self . NS ) : id = comment . get ( "{%(w)s}id" % self . NS ) typ = comment . get ( "{%(w)s}type" % self . NS ) d [ id ] = Dict ( id = id , type = typ , elem = comment ) if cache == True : self . __commentmap = d return d
return the comments from the docx keyed to string id .
58,363
def selector ( C , style ) : clas = C . classname ( style . name ) if style . type == 'paragraph' : outlineLvl = int ( ( style . properties . get ( 'outlineLvl' ) or { } ) . get ( 'val' ) or 8 ) + 1 if outlineLvl < 9 : tag = 'h%d' % outlineLvl else : tag = 'p' elif style . type == 'character' : tag = 'span' elif style . type == 'table' : tag = 'table' elif style . type == 'numbering' : tag = 'ol' return "%s.%s" % ( tag , clas )
return the selector for the given stylemap style
58,364
def load_collection_from_stream ( resource , stream , content_type ) : coll = create_staging_collection ( resource ) load_into_collection_from_stream ( coll , stream , content_type ) return coll
Creates a new collection for the registered resource and calls load_into_collection_from_stream with it .
58,365
def load_into_collection_from_file ( collection , filename , content_type = None ) : if content_type is None : ext = os . path . splitext ( filename ) [ 1 ] try : content_type = MimeTypeRegistry . get_type_for_extension ( ext ) except KeyError : raise ValueError ( 'Could not infer MIME type for file extension ' '"%s".' % ext ) load_into_collection_from_stream ( collection , open ( filename , 'rU' ) , content_type )
Loads resources from the specified file into the given collection resource .
58,366
def load_collection_from_file ( resource , filename , content_type = None ) : coll = create_staging_collection ( resource ) load_into_collection_from_file ( coll , filename , content_type = content_type ) return coll
Creates a new collection for the registered resource and calls load_into_collection_from_file with it .
58,367
def load_into_collection_from_url ( collection , url , content_type = None ) : parsed = urlparse . urlparse ( url ) scheme = parsed . scheme if scheme == 'file' : load_into_collection_from_file ( collection , parsed . path , content_type = content_type ) else : raise ValueError ( 'Unsupported URL scheme "%s".' % scheme )
Loads resources from the representation contained in the given URL into the given collection resource .
58,368
def load_collection_from_url ( resource , url , content_type = None ) : coll = create_staging_collection ( resource ) load_into_collection_from_url ( coll , url , content_type = content_type ) return coll
Creates a new collection for the registered resource and calls load_into_collection_from_url with it .
58,369
def load_into_collections_from_zipfile ( collections , zipfile ) : with ZipFile ( zipfile ) as zipf : names = zipf . namelist ( ) name_map = dict ( [ ( os . path . splitext ( name ) [ 0 ] , index ) for ( index , name ) in enumerate ( names ) ] ) for coll in collections : coll_name = get_collection_name ( coll ) index = name_map . get ( coll_name ) if index is None : continue coll_fn = names [ index ] ext = os . path . splitext ( coll_fn ) [ 1 ] try : content_type = MimeTypeRegistry . get_type_for_extension ( ext ) except KeyError : raise ValueError ( 'Could not infer MIME type for file ' 'extension "%s".' % ext ) coll_data = DecodingStream ( zipf . open ( coll_fn , 'r' ) ) load_into_collection_from_stream ( coll , coll_data , content_type )
Loads resources contained in the given ZIP archive into each of the given collections .
58,370
def build_resource_dependency_graph ( resource_classes , include_backrefs = False ) : def visit ( mb_cls , grph , path , incl_backrefs ) : for attr_name in get_resource_class_attribute_names ( mb_cls ) : if is_resource_class_terminal_attribute ( mb_cls , attr_name ) : continue child_descr = getattr ( mb_cls , attr_name ) child_mb_cls = get_member_class ( child_descr . attr_type ) if len ( path ) > 0 and child_mb_cls is path [ - 1 ] and not incl_backrefs : continue if not grph . has_node ( child_mb_cls ) : grph . add_node ( child_mb_cls ) path . append ( mb_cls ) visit ( child_mb_cls , grph , path , incl_backrefs ) path . pop ( ) if not grph . has_edge ( ( mb_cls , child_mb_cls ) ) : grph . add_edge ( ( mb_cls , child_mb_cls ) ) dep_grph = digraph ( ) for resource_class in resource_classes : mb_cls = get_member_class ( resource_class ) if not dep_grph . has_node ( mb_cls ) : dep_grph . add_node ( mb_cls ) visit ( mb_cls , dep_grph , [ ] , include_backrefs ) return dep_grph
Builds a graph of dependencies among the given resource classes .
58,371
def build_resource_graph ( resource , dependency_graph = None ) : def visit ( rc , grph , dep_grph ) : mb_cls = type ( rc ) attr_map = get_resource_class_attributes ( mb_cls ) for attr_name , attr in iteritems_ ( attr_map ) : if is_resource_class_terminal_attribute ( mb_cls , attr_name ) : continue child_mb_cls = get_member_class ( attr . attr_type ) if not dep_grph . has_edge ( ( mb_cls , child_mb_cls ) ) : continue child_rc = getattr ( rc , attr_name ) if is_resource_class_collection_attribute ( mb_cls , attr_name ) : for child_mb in child_rc : if not grph . has_node ( child_mb ) : grph . add_node ( child_mb ) grph . add_edge ( ( rc , child_mb ) ) visit ( child_mb , grph , dep_grph ) else : if not grph . has_node ( child_rc ) : grph . add_node ( child_rc ) grph . add_edge ( ( rc , child_rc ) ) visit ( child_rc , grph , dep_grph ) if dependency_graph is None : dependency_graph = build_resource_dependency_graph ( [ get_member_class ( resource ) ] ) graph = ResourceGraph ( ) if provides_member_resource ( resource ) : rcs = [ resource ] else : rcs = resource for rc in rcs : graph . add_node ( rc ) visit ( rc , graph , dependency_graph ) return graph
Traverses the graph of resources that is reachable from the given resource .
58,372
def find_connected_resources ( resource , dependency_graph = None ) : resource_graph = build_resource_graph ( resource , dependency_graph = dependency_graph ) entity_map = OrderedDict ( ) for mb in topological_sorting ( resource_graph ) : mb_cls = get_member_class ( mb ) ents = entity_map . get ( mb_cls ) if ents is None : ents = [ ] entity_map [ mb_cls ] = ents ents . append ( mb . get_entity ( ) ) return entity_map
Collects all resources connected to the given resource and returns a dictionary mapping member resource classes to new collections containing the members found .
58,373
def to_files ( self , resource , directory ) : collections = self . __collect ( resource ) for ( mb_cls , coll ) in iteritems_ ( collections ) : fn = get_write_collection_path ( mb_cls , self . __content_type , directory = directory ) with open_text ( os . path . join ( directory , fn ) ) as strm : dump_resource ( coll , strm , content_type = self . __content_type )
Dumps the given resource and all resources linked to it into a set of representation files in the given directory .
58,374
def to_zipfile ( self , resource , zipfile ) : rpr_map = self . to_strings ( resource ) with ZipFile ( zipfile , 'w' ) as zipf : for ( mb_cls , rpr_string ) in iteritems_ ( rpr_map ) : fn = get_collection_filename ( mb_cls , self . __content_type ) zipf . writestr ( fn , rpr_string , compress_type = ZIP_DEFLATED )
Dumps the given resource and all resources linked to it into the given ZIP file .
58,375
def read ( self ) : p = os . path . join ( self . path , self . name ) try : with open ( p ) as f : json_text = f . read ( ) except FileNotFoundError as e : raise JSONFileError ( e ) from e try : json . loads ( json_text ) except ( json . JSONDecodeError , TypeError ) as e : raise JSONFileError ( f"{e} Got {p}" ) from e return json_text
Returns the file contents as validated JSON text .
58,376
def exists ( self , batch_id = None ) : try : self . model . objects . get ( batch_id = batch_id ) except self . model . DoesNotExist : return False return True
Returns True if batch_id exists in the history .
58,377
def update ( self , filename = None , batch_id = None , prev_batch_id = None , producer = None , count = None , ) : if not filename : raise BatchHistoryError ( "Invalid filename. Got None" ) if not batch_id : raise BatchHistoryError ( "Invalid batch_id. Got None" ) if not prev_batch_id : raise BatchHistoryError ( "Invalid prev_batch_id. Got None" ) if not producer : raise BatchHistoryError ( "Invalid producer. Got None" ) if self . exists ( batch_id = batch_id ) : raise IntegrityError ( "Duplicate batch_id" ) try : obj = self . model . objects . get ( batch_id = batch_id ) except self . model . DoesNotExist : obj = self . model ( filename = filename , batch_id = batch_id , prev_batch_id = prev_batch_id , producer = producer , total = count , ) obj . transaction_file . name = filename obj . save ( ) return obj
Creates an history model instance .
58,378
def populate ( self , deserialized_txs = None , filename = None , retry = None ) : if not deserialized_txs : raise BatchError ( "Failed to populate batch. There are no objects to add." ) self . filename = filename if not self . filename : raise BatchError ( "Invalid filename. Got None" ) try : for deserialized_tx in deserialized_txs : self . peek ( deserialized_tx ) self . objects . append ( deserialized_tx . object ) break for deserialized_tx in deserialized_txs : self . objects . append ( deserialized_tx . object ) except DeserializationError as e : raise BatchDeserializationError ( e ) from e except JSONFileError as e : raise BatchDeserializationError ( e ) from e
Populates the batch with unsaved model instances from a generator of deserialized objects .
58,379
def peek ( self , deserialized_tx ) : self . batch_id = deserialized_tx . object . batch_id self . prev_batch_id = deserialized_tx . object . prev_batch_id self . producer = deserialized_tx . object . producer if self . batch_history . exists ( batch_id = self . batch_id ) : raise BatchAlreadyProcessed ( f"Batch {self.batch_id} has already been processed" ) if self . prev_batch_id != self . batch_id : if not self . batch_history . exists ( batch_id = self . prev_batch_id ) : raise InvalidBatchSequence ( f"Invalid import sequence. History does not exist for prev_batch_id. " f"Got file='{self.filename}', prev_batch_id=" f"{self.prev_batch_id}, batch_id={self.batch_id}." )
Peeks into first tx and sets self attrs or raise .
58,380
def save ( self ) : saved = 0 if not self . objects : raise BatchError ( "Save failed. Batch is empty" ) for deserialized_tx in self . objects : try : self . model . objects . get ( pk = deserialized_tx . pk ) except self . model . DoesNotExist : data = { } for field in self . model . _meta . get_fields ( ) : try : data . update ( { field . name : getattr ( deserialized_tx , field . name ) } ) except AttributeError : pass self . model . objects . create ( ** data ) saved += 1 return saved
Saves all model instances in the batch as model .
58,381
def import_batch ( self , filename ) : batch = self . batch_cls ( ) json_file = self . json_file_cls ( name = filename , path = self . path ) try : deserialized_txs = json_file . deserialized_objects except JSONFileError as e : raise TransactionImporterError ( e ) from e try : batch . populate ( deserialized_txs = deserialized_txs , filename = json_file . name ) except ( BatchDeserializationError , InvalidBatchSequence , BatchAlreadyProcessed , ) as e : raise TransactionImporterError ( e ) from e batch . save ( ) batch . update_history ( ) return batch
Imports the batch of outgoing transactions into model IncomingTransaction .
58,382
def timelimit ( timeout ) : def _1 ( function ) : def _2 ( * args , ** kw ) : class Dispatch ( threading . Thread ) : def __init__ ( self ) : threading . Thread . __init__ ( self ) self . result = None self . error = None self . setDaemon ( True ) self . start ( ) def run ( self ) : try : self . result = function ( * args , ** kw ) except : self . error = sys . exc_info ( ) c = Dispatch ( ) c . join ( timeout ) if c . isAlive ( ) : raise TimeoutError , 'took too long' if c . error : raise c . error [ 0 ] , c . error [ 1 ] return c . result return _2 return _1
borrowed from web . py
58,383
def _populateBuffer ( self , stream , n ) : try : for x in xrange ( n ) : output = stream . next ( ) self . _buffer . write ( output ) except StopIteration , e : self . _deferred . callback ( None ) except Exception , e : self . _deferred . errback ( e ) else : self . delayedCall = reactor . callLater ( CALL_DELAY , self . _populateBuffer , stream , n )
Iterator that returns N steps of the genshi stream .
58,384
def create_message ( json_meta , data , data_type = 0 , version = b'\x00\x01@\x00' ) : __check_data ( data ) meta = __prepare_meta ( json_meta ) data = __compress ( json_meta , data ) header = __create_machine_header ( json_meta , data , data_type , version ) return header + meta + data
Create message containing meta and data in df - envelope format .
58,385
def parse_from_file ( filename , nodata = False ) : header = None with open ( filename , "rb" ) as file : header = read_machine_header ( file ) meta_raw = file . read ( header [ 'meta_len' ] ) meta = __parse_meta ( meta_raw , header ) data = b'' if not nodata : data = __decompress ( meta , file . read ( header [ 'data_len' ] ) ) return header , meta , data
Parse df message from file .
58,386
def parse_message ( message , nodata = False ) : header = read_machine_header ( message ) h_len = __get_machine_header_length ( header ) meta_raw = message [ h_len : h_len + header [ 'meta_len' ] ] meta = __parse_meta ( meta_raw , header ) data_start = h_len + header [ 'meta_len' ] data = b'' if not nodata : data = __decompress ( meta , message [ data_start : data_start + header [ 'data_len' ] ] ) return header , meta , data
Parse df message from bytearray .
58,387
def read_machine_header ( data ) : if isinstance ( data , ( bytes , bytearray ) ) : stream = io . BytesIO ( data ) elif isinstance ( data , io . BufferedReader ) : stream = data else : raise ValueError ( "data should be either bytearray or file 'rb' mode." ) header = dict ( ) header_type = stream . read ( 6 ) if header_type == b"#!\x00\x01@\x00" : header [ 'type' ] = header_type [ 2 : 6 ] header [ 'time' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] header [ 'meta_type' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] header [ 'meta_len' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] header [ 'data_type' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] header [ 'data_len' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] stream . read ( 4 ) elif header_type == b"#~DF02" : header [ 'type' ] = header_type [ 2 : 6 ] header [ 'meta_type' ] = stream . read ( 2 ) header [ 'meta_len' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] header [ 'data_len' ] = struct . unpack ( '>I' , stream . read ( 4 ) ) [ 0 ] stream . read ( 4 ) else : raise NotImplementedError ( "Parser for machine header %s not implemented" % ( header_type . decode ( ) ) ) return header
Parse binary header .
58,388
def get_messages_from_stream ( data ) : messages = [ ] iterator = HEADER_RE . finditer ( data ) last_pos = 0 for match in iterator : pos = match . span ( ) [ 0 ] header = read_machine_header ( data [ pos : ] ) h_len = __get_machine_header_length ( header ) cur_last_pos = pos + h_len + header [ 'meta_len' ] + header [ 'data_len' ] if cur_last_pos > len ( data ) : break header , meta , bin_data = parse_message ( data [ pos : ] ) messages . append ( { 'header' : header , 'meta' : meta , 'data' : bin_data } ) last_pos = cur_last_pos data = data [ last_pos : ] return messages , data
Extract complete messages from stream and cut out them from stream .
58,389
def clone ( self , options = None , attribute_options = None ) : copied_cfg = self . __configurations [ - 1 ] . copy ( ) upd_cfg = type ( copied_cfg ) ( options = options , attribute_options = attribute_options ) copied_cfg . update ( upd_cfg ) return self . __class__ ( self . __mp_reg , self . __mapped_cls , self . __de_cls , copied_cfg )
Returns a clone of this mapping that is configured with the given option and attribute option dictionaries .
58,390
def update ( self , options = None , attribute_options = None ) : attr_map = self . __get_attribute_map ( self . __mapped_cls , None , 0 ) for attributes in attribute_options : for attr_name in attributes : if not attr_name in attr_map : raise AttributeError ( 'Trying to configure non-existing ' 'resource attribute "%s"' % ( attr_name ) ) cfg = RepresenterConfiguration ( options = options , attribute_options = attribute_options ) self . configuration . update ( cfg )
Updates this mapping with the given option and attribute option maps .
58,391
def get_attribute_map ( self , mapped_class = None , key = None ) : if mapped_class is None : mapped_class = self . __mapped_cls if key is None : key = MappedAttributeKey ( ( ) ) return OrderedDict ( [ ( attr . resource_attr , attr ) for attr in self . _attribute_iterator ( mapped_class , key ) ] )
Returns an ordered map of the mapped attributes for the given mapped class and attribute key .
58,392
def create_data_element ( self , mapped_class = None ) : if not mapped_class is None and mapped_class != self . __mapped_cls : mp = self . __mp_reg . find_or_create_mapping ( mapped_class ) data_el = mp . create_data_element ( ) else : data_el = self . __de_cls . create ( ) return data_el
Returns a new data element for the given mapped class .
58,393
def create_linked_data_element ( self , url , kind , id = None , relation = None , title = None ) : mp = self . __mp_reg . find_or_create_mapping ( Link ) return mp . data_element_class . create ( url , kind , id = id , relation = relation , title = title )
Returns a new linked data element for the given url and kind .
58,394
def create_data_element_from_resource ( self , resource ) : mp = self . __mp_reg . find_or_create_mapping ( type ( resource ) ) return mp . data_element_class . create_from_resource ( resource )
Returns a new data element for the given resource object .
58,395
def create_linked_data_element_from_resource ( self , resource ) : mp = self . __mp_reg . find_or_create_mapping ( Link ) return mp . data_element_class . create_from_resource ( resource )
Returns a new linked data element for the given resource object .
58,396
def map_to_resource ( self , data_element , resource = None ) : if not IDataElement . providedBy ( data_element ) : raise ValueError ( 'Expected data element, got %s.' % data_element ) if resource is None : coll = create_staging_collection ( data_element . mapping . mapped_class ) agg = coll . get_aggregate ( ) agg . add ( data_element ) if IMemberDataElement . providedBy ( data_element ) : ent = next ( iter ( agg ) ) resource = data_element . mapping . mapped_class . create_from_entity ( ent ) else : resource = coll else : resource . update ( data_element ) return resource
Maps the given data element to a new resource or updates the given resource .
58,397
def map_to_data_element ( self , resource ) : trv = ResourceTreeTraverser ( resource , self . as_pruning ( ) ) visitor = DataElementBuilderResourceTreeVisitor ( self ) trv . run ( visitor ) return visitor . data_element
Maps the given resource to a data element tree .
58,398
def push_configuration ( self , configuration ) : self . __mapped_attr_cache . clear ( ) self . __configurations . append ( configuration )
Pushes the given configuration object on the stack of configurations managed by this mapping and makes it the active configuration .
58,399
def pop_configuration ( self ) : if len ( self . __configurations ) == 1 : raise IndexError ( 'Can not pop the last configuration from the ' 'stack of configurations.' ) self . __configurations . pop ( ) self . __mapped_attr_cache . clear ( )
Pushes the currently active configuration from the stack of configurations managed by this mapping .