idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
12,400 | def model ( self , ** kwargs ) : for key , td in self . tds . items ( ) : td . model ( ** kwargs ) | Run the forward modeling for all frequencies . |
12,401 | def measurements ( self ) : m_all = np . array ( [ self . tds [ key ] . measurements ( ) for key in sorted ( self . tds . keys ( ) ) ] ) return m_all | Return modeled measurements |
12,402 | def get_measurement_responses ( self ) : configs = self . tds [ sorted ( self . tds . keys ( ) ) [ 0 ] ] . configs . configs measurements = self . measurements ( ) responses = { } for config , sip_measurement in zip ( configs , np . rollaxis ( measurements , 1 ) ) : sip = sip_response ( frequencies = self . frequencies , rmag = sip_measurement [ : , 0 ] , rpha = sip_measurement [ : , 1 ] ) responses [ tuple ( config ) ] = sip return responses | Return a dictionary of sip_responses for the modeled SIP spectra |
12,403 | def create_database ( name , number = 1 , force_clear = False ) : print 'Got:' print 'name' , name , type ( name ) print 'number' , number , type ( number ) print 'force_clear' , force_clear , type ( force_clear ) | Command to create a database |
12,404 | def _get_long_path_name ( path ) : buf = ctypes . create_unicode_buffer ( len ( path ) + 1 ) GetLongPathNameW = ctypes . windll . kernel32 . GetLongPathNameW res = GetLongPathNameW ( path , buf , len ( path ) + 1 ) if res == 0 or res > 260 : return path else : return buf . value | Returns the long path name for a Windows path i . e . the properly cased path of an existing file or directory . |
12,405 | def get_dependency_walker ( ) : for dirname in os . getenv ( 'PATH' , '' ) . split ( os . pathsep ) : filename = os . path . join ( dirname , 'depends.exe' ) if os . path . isfile ( filename ) : logger . info ( 'Dependency Walker found at "{}"' . format ( filename ) ) return filename temp_exe = os . path . join ( tempfile . gettempdir ( ) , 'depends.exe' ) temp_dll = os . path . join ( tempfile . gettempdir ( ) , 'depends.dll' ) if os . path . isfile ( temp_exe ) : logger . info ( 'Dependency Walker found at "{}"' . format ( temp_exe ) ) return temp_exe logger . info ( 'Dependency Walker not found. Downloading ...' ) with urlopen ( 'http://dependencywalker.com/depends22_x64.zip' ) as fp : data = fp . read ( ) logger . info ( 'Extracting Dependency Walker to "{}"' . format ( temp_exe ) ) with zipfile . ZipFile ( io . BytesIO ( data ) ) as fp : with fp . open ( 'depends.exe' ) as src : with open ( temp_exe , 'wb' ) as dst : shutil . copyfileobj ( src , dst ) with fp . open ( 'depends.dll' ) as src : with open ( temp_dll , 'wb' ) as dst : shutil . copyfileobj ( src , dst ) return temp_exe | Checks if depends . exe is in the system PATH . If not it will be downloaded and extracted to a temporary directory . Note that the file will not be deleted afterwards . |
12,406 | def prepare ( self , setup_func ) : assert inspect . isfunction ( setup_func ) argsspec = inspect . getargspec ( setup_func ) if argsspec . args : raise ValueError ( "prepare function shouldn't have any arguments" ) def decorator ( command_func ) : @ functools . wraps ( command_func ) def wrapper ( * args , ** kwgs ) : setup_func ( ) return command_func ( * args , ** kwgs ) return wrapper return decorator | This decorator wrap a function which setup a environment before running a command |
12,407 | def addPort ( n : LNode , intf : Interface ) : d = PortTypeFromDir ( intf . _direction ) ext_p = LayoutExternalPort ( n , name = intf . _name , direction = d , node2lnode = n . _node2lnode ) ext_p . originObj = originObjOfPort ( intf ) n . children . append ( ext_p ) addPortToLNode ( ext_p , intf , reverseDirection = True ) return ext_p | Add LayoutExternalPort for interface |
12,408 | def drawtree ( self ) : self . win . erase ( ) self . line = 0 for child , depth in self . traverse ( ) : child . curline = self . curline child . picked = self . picked child . expanded = self . expanded child . sized = self . sized if depth == 0 : continue if self . line == self . curline : self . color . curline ( child . name , child . picked ) children = child . children name = child . name else : self . color . default ( child . name , child . picked ) if child . name in self . sized and not self . sized [ child . name ] : self . sized [ child . name ] = " [" + du ( child . name ) + "]" child . drawline ( depth , self . line , self . win ) self . line += 1 self . win . refresh ( ) self . mkheader ( name ) self . mkfooter ( name , children ) | Loop over the object process path attribute sets and drawlines based on their current contents . |
12,409 | def import_config ( config_path ) : if not os . path . isfile ( config_path ) : raise ConfigBuilderError ( 'Could not find config file: ' + config_path ) loader = importlib . machinery . SourceFileLoader ( config_path , config_path ) module = loader . load_module ( ) if not hasattr ( module , 'config' ) or not isinstance ( module . config , Config ) : raise ConfigBuilderError ( 'Could not load config file "{}": config files must contain ' 'a variable called "config" that is ' 'assigned to a Config object.' . format ( config_path ) ) return module . config | Import a Config from a given path relative to the current directory . |
12,410 | def grid ( fitness_function , no_dimensions , step_size ) : best_fitness = float ( "-inf" ) best_arguments = None for arguments in make_lists ( no_dimensions , step_size ) : fitness = fitness_function ( tuple ( arguments ) ) if fitness > best_fitness : best_fitness = fitness best_arguments = tuple ( arguments ) return best_arguments | Grid search using a fitness function over a given number of dimensions and a given step size between inclusive limits of 0 and 1 . |
12,411 | def make_lists ( no_dimensions , step_size , centre_steps = True ) : if no_dimensions == 0 : return [ [ ] ] sub_lists = make_lists ( no_dimensions - 1 , step_size , centre_steps = centre_steps ) return [ [ step_size * value + ( 0.5 * step_size if centre_steps else 0 ) ] + sub_list for value in range ( 0 , int ( ( 1 / step_size ) ) ) for sub_list in sub_lists ] | Create a list of lists of floats covering every combination across no_dimensions of points of integer step size between 0 and 1 inclusive . |
12,412 | def portCnt ( port ) : if port . children : return sum ( map ( lambda p : portCnt ( p ) , port . children ) ) else : return 1 | recursively count number of ports without children |
12,413 | def copyPort ( port , targetLNode , reverseDir , topPortName = None ) : newP = _copyPort ( port , targetLNode , reverseDir ) if topPortName is not None : newP . name = topPortName return newP | Create identical port on targetNode |
12,414 | def walkSignalPorts ( rootPort : LPort ) : if rootPort . children : for ch in rootPort . children : yield from walkSignalPorts ( ch ) else : yield rootPort | recursively walk ports without any children |
12,415 | def agent_error ( e : requests . HTTPError , fatal = True ) : try : data = e . response . json ( ) details = data [ 'detail' ] except JSONDecodeError : details = e . response . text or str ( e . response ) lines = ( '[AGENT] {}' . format ( line ) for line in details . splitlines ( ) ) msg = '\n' + '\n' . join ( lines ) if fatal : fatal_error ( msg ) else : error ( msg ) | Prints an agent error and exits |
12,416 | def parse_stack_refs ( stack_references : List [ str ] ) -> List [ str ] : stack_names = [ ] references = list ( stack_references ) references . reverse ( ) while references : current = references . pop ( ) file_path = os . path . abspath ( current ) if os . path . exists ( file_path ) and os . path . isfile ( file_path ) : try : with open ( file_path ) as fd : data = yaml . safe_load ( fd ) current = data [ 'SenzaInfo' ] [ 'StackName' ] except ( KeyError , TypeError , YAMLError ) : raise click . UsageError ( 'Invalid senza definition {}' . format ( current ) ) stack_names . append ( current ) return stack_names | Check if items included in stack_references are Senza definition file paths or stack name reference . If Senza definition file path substitute the definition file path by the stack name in the same position on the list . |
12,417 | def list_stacks ( stack_ref : List [ str ] , all : bool , remote : str , region : str , watch : int , output : str ) : lizzy = setup_lizzy_client ( remote ) stack_references = parse_stack_refs ( stack_ref ) while True : rows = [ ] for stack in lizzy . get_stacks ( stack_references , region = region ) : creation_time = dateutil . parser . parse ( stack [ 'creation_time' ] ) rows . append ( { 'stack_name' : stack [ 'stack_name' ] , 'version' : stack [ 'version' ] , 'status' : stack [ 'status' ] , 'creation_time' : creation_time . timestamp ( ) , 'description' : stack [ 'description' ] } ) rows . sort ( key = lambda x : ( x [ 'stack_name' ] , x [ 'version' ] ) ) with OutputFormat ( output ) : print_table ( 'stack_name version status creation_time description' . split ( ) , rows , styles = STYLES , titles = TITLES ) if watch : time . sleep ( watch ) click . clear ( ) else : break | List Lizzy stacks |
12,418 | def traffic ( stack_name : str , stack_version : Optional [ str ] , percentage : Optional [ int ] , region : Optional [ str ] , remote : Optional [ str ] , output : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) if percentage is None : stack_reference = [ stack_name ] with Action ( 'Requesting traffic info..' ) : stack_weights = [ ] for stack in lizzy . get_stacks ( stack_reference , region = region ) : if stack [ 'status' ] in [ 'CREATE_COMPLETE' , 'UPDATE_COMPLETE' ] : stack_id = '{stack_name}-{version}' . format_map ( stack ) traffic = lizzy . get_traffic ( stack_id , region = region ) stack_weights . append ( { 'stack_name' : stack_name , 'version' : stack [ 'version' ] , 'identifier' : stack_id , 'weight%' : traffic [ 'weight' ] } ) cols = 'stack_name version identifier weight%' . split ( ) with OutputFormat ( output ) : print_table ( cols , sorted ( stack_weights , key = lambda x : x [ 'identifier' ] ) ) else : with Action ( 'Requesting traffic change..' ) : stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) ) lizzy . traffic ( stack_id , percentage , region = region ) | Manage stack traffic |
12,419 | def scale ( stack_name : str , stack_version : Optional [ str ] , new_scale : int , region : Optional [ str ] , remote : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) with Action ( 'Requesting rescale..' ) : stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) ) lizzy . scale ( stack_id , new_scale , region = region ) | Rescale a stack |
12,420 | def delete ( stack_ref : List [ str ] , region : str , dry_run : bool , force : bool , remote : str ) : lizzy = setup_lizzy_client ( remote ) stack_refs = get_stack_refs ( stack_ref ) all_with_version = all ( stack . version is not None for stack in stack_refs ) if ( not all_with_version and not dry_run and not force ) : fatal_error ( 'Error: {} matching stacks found. ' . format ( len ( stack_refs ) ) + 'Please use the "--force" flag if you really want to delete multiple stacks.' ) output = '' for stack in stack_refs : if stack . version is not None : stack_id = '{stack.name}-{stack.version}' . format ( stack = stack ) else : stack_id = stack . name with Action ( "Requesting stack '{stack_id}' deletion.." , stack_id = stack_id ) : output = lizzy . delete ( stack_id , region = region , dry_run = dry_run ) print ( output ) | Delete Cloud Formation stacks |
12,421 | def pydict2xml ( filename , metadata_dict , ** kwargs ) : try : f = open ( filename , 'w' ) f . write ( pydict2xmlstring ( metadata_dict , ** kwargs ) . encode ( 'utf-8' ) ) f . close ( ) except : raise MetadataGeneratorException ( 'Failed to create an XML file. Filename: %s' % ( filename ) ) | Create an XML file . |
12,422 | def pydict2xmlstring ( metadata_dict , ** kwargs ) : ordering = kwargs . get ( 'ordering' , UNTL_XML_ORDER ) root_label = kwargs . get ( 'root_label' , 'metadata' ) root_namespace = kwargs . get ( 'root_namespace' , None ) elements_namespace = kwargs . get ( 'elements_namespace' , None ) namespace_map = kwargs . get ( 'namespace_map' , None ) root_attributes = kwargs . get ( 'root_attributes' , None ) if root_namespace and namespace_map : root = Element ( root_namespace + root_label , nsmap = namespace_map ) elif namespace_map : root = Element ( root_label , nsmap = namespace_map ) else : root = Element ( root_label ) if root_attributes : for key , value in root_attributes . items ( ) : root . attrib [ key ] = value for metadata_key in ordering : if metadata_key in metadata_dict : for element in metadata_dict [ metadata_key ] : if 'content' in element and 'qualifier' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'qualifier' : element [ 'qualifier' ] } , namespace = elements_namespace , ) elif 'content' in element and 'role' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'role' : element [ 'role' ] } , namespace = elements_namespace , ) elif 'content' in element and 'scheme' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , attribs = { 'scheme' : element [ 'scheme' ] } , namespace = elements_namespace , ) elif 'content' in element : create_dict_subelement ( root , metadata_key , element [ 'content' ] , namespace = elements_namespace , ) return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True ) | Create an XML string from a metadata dictionary . |
12,423 | def create_dict_subelement ( root , subelement , content , ** kwargs ) : attribs = kwargs . get ( 'attribs' , None ) namespace = kwargs . get ( 'namespace' , None ) key = subelement if namespace and attribs : subelement = SubElement ( root , namespace + subelement , attribs ) elif namespace : subelement = SubElement ( root , namespace + subelement ) elif attribs : subelement = SubElement ( root , subelement , attribs ) else : subelement = SubElement ( root , subelement ) if not isinstance ( content , dict ) : subelement . text = content elif key == 'degree' : for degree_order_key in DEGREE_ORDER : for descriptor , value in content . items ( ) : if descriptor == degree_order_key : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value else : for descriptor , value in content . items ( ) : sub_descriptors = SubElement ( subelement , descriptor ) sub_descriptors . text = value | Create a XML subelement from a Python dictionary . |
12,424 | def highwiredict2xmlstring ( highwire_elements , ordering = HIGHWIRE_ORDER ) : highwire_elements . sort ( key = lambda obj : ordering . index ( obj . name ) ) root = Element ( 'metadata' ) for element in highwire_elements : attribs = { 'name' : element . name , 'content' : element . content } SubElement ( root , 'meta' , attribs ) return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring ( root , pretty_print = True ) | Create an XML string from the highwire data dictionary . |
12,425 | def get ( binary_name ) : if binary_name not in binaries : raise Exception ( 'binary_name: {0} not found' . format ( binary_name ) ) system = platform . system ( ) binary_list = binaries [ binary_name ] [ system ] for filename in binary_list : valid_file = shutil . which ( filename ) if valid_file : return os . path . abspath ( valid_file ) | return a valid path to the given binary . Return an error if no existing binary can be found . |
12,426 | def get_upgrade_lock ( dbname , connect_str , timeout = LOCK_TIMEOUT ) : engine = sqlalchemy . create_engine ( connect_str ) cursor = engine . execute ( "SELECT GET_LOCK('upgrade_{}', {})" . format ( dbname , timeout ) ) lock = cursor . scalar ( ) cursor . close ( ) while not lock : logger . info ( 'Cannot acquire {} upgrade lock. Sleeping {} seconds.' . format ( dbname , timeout ) ) time . sleep ( timeout ) cursor = engine . execute ( "SELECT GET_LOCK('upgrade_{}', {})" . format ( dbname , timeout ) ) lock = cursor . scalar ( ) cursor . close ( ) logger . info ( 'Acquired {} upgrade lock' . format ( dbname ) ) yield lock cursor = engine . execute ( "SELECT RELEASE_LOCK('upgrade_{}')" . format ( dbname ) ) cursor . close ( ) engine . dispose ( ) logger . info ( 'Released {} upgrade lock' . format ( dbname ) ) | Wait until you can get the lock then yield it and eventually release it . |
12,427 | def upgrade ( dbname , connect_str , alembic_conf ) : if not sqlalchemy_utils . database_exists ( connect_str ) : logger . info ( 'Creating {}' . format ( dbname ) ) try : sqlalchemy_utils . create_database ( connect_str ) except sqlalchemy . exc . ProgrammingError as exc : if not sqlalchemy_utils . database_exists ( connect_str ) : logger . error ( 'Could not create {}' . format ( dbname ) ) raise exc with get_upgrade_lock ( dbname , connect_str ) : alembic_config = alembic . config . Config ( alembic_conf , attributes = { 'configure_logger' : False } ) logger . info ( 'Upgrading {} to head' . format ( dbname ) ) alembic . command . upgrade ( alembic_config , 'head' ) | Get the database s upgrade lock and run alembic . |
12,428 | def write_to_file ( self , filename ) : fid = open ( filename , 'w' ) for key in self . key_order : if ( key == - 1 ) : fid . write ( '\n' ) else : fid . write ( '{0}\n' . format ( self [ key ] ) ) fid . close ( ) | Write the configuration to a file . Use the correct order of values . |
12,429 | def parse ( self , importpath ) : self . native = False self . _prefix = "" self . _package = "" url = re . sub ( r'http://' , '' , importpath ) url = re . sub ( r'https://' , '' , url ) if url . split ( '/' ) [ 0 ] in self . native_packages [ "packages" ] : self . native = True return self for regex in self . known_ipprefixes : match = re . search ( regex , url ) if match : self . _prefix = match . group ( 1 ) if match . group ( 3 ) : self . _package = match . group ( 3 ) return self raise ValueError ( "Import path prefix for '%s' not recognized" % importpath ) | Parse import path . Determine if the path is native or starts with known prefix . |
12,430 | def sub_retab ( match ) : r before = match . group ( 1 ) tabs = len ( match . group ( 2 ) ) return before + ( ' ' * ( TAB_SIZE * tabs - len ( before ) % TAB_SIZE ) ) | r Remove all tabs and convert them into spaces . |
12,431 | def handle_whitespace ( text ) : r text = re_retab . sub ( sub_retab , text ) text = re_whitespace . sub ( '' , text ) . strip ( ) return text | r Handles whitespace cleanup . |
12,432 | def get_variables ( text ) : variables = { var : value for var , value in re_vars . findall ( text ) } text = re_vars . sub ( '' , text ) return text , variables | Extracts variables that can be used in templating engines . |
12,433 | def get_references ( text ) : references = { } for ref_id , link , _ , title in re_references . findall ( text ) : ref_id = re . sub ( r'<(.*?)>' , r'\1' , ref_id ) . lower ( ) . strip ( ) references [ ref_id ] = ( link , title ) text = re_references . sub ( '' , text ) return text , references | Retrieves all link references within the text . |
12,434 | def get_footnote_backreferences ( text , markdown_obj ) : footnotes = OrderedDict ( ) for footnote_id , footnote in re_footnote_backreferences . findall ( text ) : footnote_id = re . sub ( r'<(.*?)>' , r'\1' , footnote_id ) . lower ( ) . strip ( ) footnote = re . sub ( r'^[ ]{0,4}' , '' , footnote , flags = re . M ) footnotes [ footnote_id ] = footnote text = re_footnote_backreferences . sub ( '' , text ) return text , footnotes | Retrieves all footnote backreferences within the text . |
12,435 | def hash_blocks ( text , hashes ) : def sub ( match ) : block = match . group ( 1 ) hashed = hash_text ( block , 'block' ) hashes [ hashed ] = block return '\n\n' + hashed + '\n\n' return re_block . sub ( sub , text ) | Hashes HTML block tags . |
12,436 | def hash_lists ( text , hashes , markdown_obj ) : for style , marker in ( ( 'u' , '[+*-]' ) , ( 'o' , r'\d+\.' ) ) : list_re = re . compile ( re_list % ( marker , marker ) , re . S | re . X ) for match in list_re . finditer ( text ) : if not match : continue lst = match . group ( 1 ) items = re . split ( r'(?:\n|\A) {0,3}%s ' % marker , lst ) [ 1 : ] whole_list = '' for item in items : item = re . sub ( r'^ {1,4}' , '' , item , flags = re . M ) item = markdown_obj . convert ( item ) par_match = re . match ( '<p>(.*?)</p>' , item , flags = re . S ) if par_match and par_match . group ( 0 ) == item . strip ( ) : item = par_match . group ( 1 ) whole_list += '<li>{}</li>\n' . format ( item ) whole_list = '<{0}l>\n{1}\n</{0}l>' . format ( style , re . sub ( '^' , ' ' , whole_list . strip ( ) , flags = re . M ) ) hashed = hash_text ( whole_list , 'list' ) hashes [ hashed ] = whole_list start = text . index ( match . group ( 0 ) ) end = start + len ( match . group ( 0 ) ) text = text [ : start ] + '\n\n' + hashed + '\n\n' + text [ end : ] return text | Hashes ordered and unordered lists . |
12,437 | def hash_blockquotes ( text , hashes , markdown_obj ) : def sub ( match ) : block = match . group ( 1 ) . strip ( ) block = re . sub ( r'(?:(?<=\n)|(?<=\A))> ?' , '' , block ) block = markdown_obj . convert ( block ) block = '<blockquote>{}</blockquote>' . format ( block ) hashed = hash_text ( block , 'blockquote' ) hashes [ hashed ] = block return '\n\n' + hashed + '\n\n' return re_blockquote . sub ( sub , text ) | Hashes block quotes . |
12,438 | def hash_codes ( text , hashes ) : def sub ( match ) : code = '<code>{}</code>' . format ( escape ( match . group ( 2 ) ) ) hashed = hash_text ( code , 'code' ) hashes [ hashed ] = code return hashed return re_code . sub ( sub , text ) | Hashes inline code tags . |
12,439 | def hash_tags ( text , hashes ) : def sub ( match ) : hashed = hash_text ( match . group ( 0 ) , 'tag' ) hashes [ hashed ] = match . group ( 0 ) return hashed return re_tag . sub ( sub , text ) | Hashes any non - block tags . |
12,440 | def unhash ( text , hashes ) : def retrieve_match ( match ) : return hashes [ match . group ( 0 ) ] while re_hash . search ( text ) : text = re_hash . sub ( retrieve_match , text ) text = re_pre_tag . sub ( lambda m : re . sub ( '^' + m . group ( 1 ) , '' , m . group ( 0 ) , flags = re . M ) , text ) return text | Unhashes all hashed entites in the hashes dictionary . |
12,441 | def paragraph_sub ( match ) : text = re . sub ( r' \n' , r'\n<br/>\n' , match . group ( 0 ) . strip ( ) ) return '<p>{}</p>' . format ( text ) | Captures paragraphs . |
12,442 | def truncateGraph ( graph , root_nodes ) : subgraph = Graph ( ) for node in root_nodes : subgraph = GraphUtils . joinGraphs ( subgraph , GraphUtils . getReacheableSubgraph ( graph , node ) ) return subgraph | Create a set of all nodes containg the root_nodes and all nodes reacheable from them |
12,443 | def filterGraph ( graph , node_fnc ) : nodes = filter ( lambda l : node_fnc ( l ) , graph . nodes ( ) ) edges = { } gedges = graph . edges ( ) for u in gedges : if u not in nodes : continue for v in gedges [ u ] : if v not in nodes : continue try : edges [ u ] . append ( v ) except KeyError : edges [ u ] = [ v ] return Graph ( nodes , edges ) | Remove all nodes for with node_fnc does not hold |
12,444 | def listdir ( self , path ) : for f in os . listdir ( path ) : if not f . startswith ( '.' ) : yield f | Return a list of all non dotfiles in a given directory . |
12,445 | def getchildren ( self ) : try : if self . hidden : return [ os . path . join ( self . name , child ) for child in sorted ( self . listdir ( self . name ) ) ] else : return [ os . path . join ( self . name , child ) for child in sorted ( os . listdir ( self . name ) ) ] except OSError : return None | Create list of absolute paths to be used to instantiate path objects for traversal based on whether or not hidden attribute is set . |
12,446 | def getpaths ( self ) : self . children = self . getchildren ( ) if self . children is None : return if self . paths is None : self . paths = [ Paths ( self . screen , os . path . join ( self . name , child ) , self . hidden , self . picked , self . expanded , self . sized ) for child in self . children ] return self . paths | If we have children use a list comprehension to instantiate new paths objects to traverse . |
12,447 | def traverse ( self ) : yield self , 0 if self . name in self . expanded : for path in self . getpaths ( ) : for child , depth in path . traverse ( ) : yield child , depth + 1 | Recursive generator that lazily unfolds the filesystem . |
12,448 | def line_line_intersect ( x , y ) : A = x [ 0 ] * y [ 1 ] - y [ 0 ] * x [ 1 ] B = x [ 2 ] * y [ 3 ] - y [ 2 ] * x [ 4 ] C = ( x [ 0 ] - x [ 1 ] ) * ( y [ 2 ] - y [ 3 ] ) - ( y [ 0 ] - y [ 1 ] ) * ( x [ 2 ] - x [ 3 ] ) Ix = ( A * ( x [ 2 ] - x [ 3 ] ) - ( x [ 0 ] - x [ 1 ] ) * B ) / C Iy = ( A * ( y [ 2 ] - y [ 3 ] ) - ( y [ 0 ] - y [ 1 ] ) * B ) / C return Ix , Iy | Compute the intersection point of two lines |
12,449 | def pkg_data_filename ( resource_name , filename = None ) : resource_filename = pkg_resources . resource_filename ( tripleohelper . __name__ , resource_name ) if filename is not None : resource_filename = os . path . join ( resource_filename , filename ) return resource_filename | Returns the path of a file installed along the package |
12,450 | def merge ( config ) : repo = config . repo active_branch = repo . active_branch if active_branch . name == "master" : error_out ( "You're already on the master branch." ) if repo . is_dirty ( ) : error_out ( 'Repo is "dirty". ({})' . format ( ", " . join ( [ repr ( x . b_path ) for x in repo . index . diff ( None ) ] ) ) ) branch_name = active_branch . name state = read ( config . configfile ) origin_name = state . get ( "ORIGIN_NAME" , "origin" ) upstream_remote = None for remote in repo . remotes : if remote . name == origin_name : upstream_remote = remote break if not upstream_remote : error_out ( "No remote called {!r} found" . format ( origin_name ) ) repo . heads . master . checkout ( ) upstream_remote . pull ( repo . heads . master ) repo . git . merge ( branch_name ) repo . git . branch ( "-d" , branch_name ) success_out ( "Branch {!r} deleted." . format ( branch_name ) ) info_out ( "NOW, you might want to run:\n" ) info_out ( "git push origin master\n\n" ) push_for_you = input ( "Run that push? [Y/n] " ) . lower ( ) . strip ( ) != "n" if push_for_you : upstream_remote . push ( "master" ) success_out ( "Current master pushed to {}" . format ( upstream_remote . name ) ) | Merge the current branch into master . |
12,451 | def chord_task ( * args , ** kwargs ) : u given_backend = kwargs . get ( u'backend' , None ) if not isinstance ( given_backend , ChordableDjangoBackend ) : kwargs [ u'backend' ] = ChordableDjangoBackend ( kwargs . get ( 'app' , current_app ) ) return task ( * args , ** kwargs ) | u Override of the default task decorator to specify use of this backend . |
12,452 | def _cleanup ( self , status , expires_multiplier = 1 ) : u expires = self . expires if isinstance ( self . expires , timedelta ) else timedelta ( seconds = self . expires ) expires = expires * expires_multiplier chords_to_delete = ChordData . objects . filter ( callback_result__date_done__lte = datetime . now ( ) - expires , callback_result__status = status ) . iterator ( ) for _chord in chords_to_delete : subtask_ids = [ subtask . task_id for subtask in _chord . completed_results . all ( ) ] _chord . completed_results . clear ( ) TaskMeta . objects . filter ( task_id__in = subtask_ids ) . delete ( ) _chord . callback_result . delete ( ) _chord . delete ( ) | u Clean up expired records . |
12,453 | def on_chord_part_return ( self , task , state , result , propagate = False ) : u with transaction . atomic ( ) : chord_data = ChordData . objects . select_for_update ( ) . get ( callback_result__task_id = task . request . chord [ u'options' ] [ u'task_id' ] ) _ = TaskMeta . objects . update_or_create ( task_id = task . request . id , defaults = { u'status' : state , u'result' : result } ) if chord_data . is_ready ( ) : self . get_suitable_app ( current_app ) . tasks [ u'celery.backend_cleanup' ] . apply_async ( ) chord_data . execute_callback ( ) | u Update the linking ChordData object and execute callback if needed . |
12,454 | def apply_chord ( self , header , partial_args , group_id , body , ** options ) : u callback_entry = TaskMeta . objects . create ( task_id = body . id ) chord_data = ChordData . objects . create ( callback_result = callback_entry ) for subtask in header : subtask_entry = TaskMeta . objects . create ( task_id = subtask . id ) chord_data . completed_results . add ( subtask_entry ) if body . options . get ( u'use_iterator' , None ) is None : body . options [ u'use_iterator' ] = True chord_data . serialized_callback = json . dumps ( body ) chord_data . save ( ) return header ( * partial_args , task_id = group_id ) | u Instantiate a linking ChordData object before executing subtasks . |
12,455 | def get_suitable_app ( cls , given_app ) : u if not isinstance ( getattr ( given_app , 'backend' , None ) , ChordableDjangoBackend ) : return_app = deepcopy ( given_app ) return_app . backend = ChordableDjangoBackend ( return_app ) return return_app else : return given_app | u Return a clone of given_app with ChordableDjangoBackend if needed . |
12,456 | def linked_model_for_class ( self , cls , make_constants_variable = False , ** kwargs ) : constructor_args = inspect . getfullargspec ( cls ) . args attribute_tuples = self . attribute_tuples new_model = PriorModel ( cls ) for attribute_tuple in attribute_tuples : name = attribute_tuple . name if name in constructor_args or ( is_tuple_like_attribute_name ( name ) and tuple_name ( name ) in constructor_args ) : attribute = kwargs [ name ] if name in kwargs else attribute_tuple . value if make_constants_variable and isinstance ( attribute , Constant ) : new_attribute = getattr ( new_model , name ) if isinstance ( new_attribute , Prior ) : new_attribute . mean = attribute . value continue setattr ( new_model , name , attribute ) return new_model | Create a PriorModel wrapping the specified class with attributes from this instance . Priors can be overridden using keyword arguments . Any constructor arguments of the new class for which there is no attribute associated with this class and no keyword argument are created from config . |
12,457 | def instance_for_arguments ( self , arguments : { Prior : float } ) : for prior , value in arguments . items ( ) : prior . assert_within_limits ( value ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } constant_arguments = { t . name : t . constant . value for t in self . direct_constant_tuples } for tuple_prior in self . tuple_prior_tuples : model_arguments [ tuple_prior . name ] = tuple_prior . prior . value_for_arguments ( arguments ) for prior_model_tuple in self . direct_prior_model_tuples : model_arguments [ prior_model_tuple . name ] = prior_model_tuple . prior_model . instance_for_arguments ( arguments ) return self . cls ( ** { ** model_arguments , ** constant_arguments } ) | Create an instance of the associated class for a set of arguments |
12,458 | def gaussian_prior_model_for_arguments ( self , arguments ) : new_model = copy . deepcopy ( self ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } for tuple_prior_tuple in self . tuple_prior_tuples : setattr ( new_model , tuple_prior_tuple . name , tuple_prior_tuple . prior . gaussian_tuple_prior_for_arguments ( arguments ) ) for prior_tuple in self . direct_prior_tuples : setattr ( new_model , prior_tuple . name , model_arguments [ prior_tuple . name ] ) for constant_tuple in self . constant_tuples : setattr ( new_model , constant_tuple . name , constant_tuple . constant ) for name , prior_model in self . direct_prior_model_tuples : setattr ( new_model , name , prior_model . gaussian_prior_model_for_arguments ( arguments ) ) return new_model | Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \ nonlinear search . |
12,459 | def load_post ( self , wp_post_id ) : path = "sites/{}/posts/{}" . format ( self . site_id , wp_post_id ) response = self . get ( path ) if response . ok and response . text : api_post = response . json ( ) self . get_ref_data_map ( bulk_mode = False ) self . load_wp_post ( api_post , bulk_mode = False ) try : post = Post . objects . get ( site_id = self . site_id , wp_id = wp_post_id ) except Exception as ex : logger . exception ( "Unable to load post with wp_post_id={}:\n{}" . format ( wp_post_id , ex . message ) ) else : return post else : logger . warning ( "Unable to load post with wp_post_id={}:\n{}" . format ( wp_post_id , response . text ) ) | Refresh local content for a single post from the the WordPress REST API . This can be called from a webhook on the WordPress side when a post is updated . |
12,460 | def load_categories ( self , max_pages = 30 ) : logger . info ( "loading categories" ) if self . purge_first : Category . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/categories" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_categories = response . json ( ) . get ( "categories" ) if not api_categories : break categories = [ ] for api_category in api_categories : existing_category = Category . objects . filter ( site_id = self . site_id , wp_id = api_category [ "ID" ] ) . first ( ) if existing_category : self . update_existing_category ( existing_category , api_category ) else : categories . append ( self . get_new_category ( api_category ) ) if categories : Category . objects . bulk_create ( categories ) elif not self . full : break page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return | Load all WordPress categories from the given site . |
12,461 | def get_new_category ( self , api_category ) : return Category ( site_id = self . site_id , wp_id = api_category [ "ID" ] , ** self . api_object_data ( "category" , api_category ) ) | Instantiate a new Category from api data . |
12,462 | def load_tags ( self , max_pages = 30 ) : logger . info ( "loading tags" ) if self . purge_first : Tag . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/tags" . format ( self . site_id ) params = { "number" : 1000 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_tags = response . json ( ) . get ( "tags" ) if not api_tags : break tags = [ ] for api_tag in api_tags : existing_tag = Tag . objects . filter ( site_id = self . site_id , wp_id = api_tag [ "ID" ] ) . first ( ) if existing_tag : self . update_existing_tag ( existing_tag , api_tag ) else : tags . append ( self . get_new_tag ( api_tag ) ) if tags : Tag . objects . bulk_create ( tags ) elif not self . full : break page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return | Load all WordPress tags from the given site . |
12,463 | def get_new_tag ( self , api_tag ) : return Tag ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , ** self . api_object_data ( "tag" , api_tag ) ) | Instantiate a new Tag from api data . |
12,464 | def load_authors ( self , max_pages = 10 ) : logger . info ( "loading authors" ) if self . purge_first : Author . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/users" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_users = response . json ( ) . get ( "users" ) if not api_users : break authors = [ ] for api_author in api_users : existing_author = Author . objects . filter ( site_id = self . site_id , wp_id = api_author [ "ID" ] ) . first ( ) if existing_author : self . update_existing_author ( existing_author , api_author ) else : authors . append ( self . get_new_author ( api_author ) ) if authors : Author . objects . bulk_create ( authors ) elif not self . full : break params [ "offset" ] = page * 100 page += 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return | Load all WordPress authors from the given site . |
12,465 | def get_new_author ( self , api_author ) : return Author ( site_id = self . site_id , wp_id = api_author [ "ID" ] , ** self . api_object_data ( "author" , api_author ) ) | Instantiate a new Author from api data . |
12,466 | def load_media ( self , max_pages = 150 ) : logger . info ( "loading media" ) if self . purge_first : logger . warning ( "purging ALL media from site %s" , self . site_id ) Media . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/media" . format ( self . site_id ) params = { "number" : 100 } self . set_media_params_after ( params ) page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < max_pages : logger . info ( " - page: %d" , page ) api_medias = response . json ( ) . get ( "media" ) if not api_medias : break medias = [ ] for api_media in api_medias : if api_media [ "post_ID" ] != 0 : existing_media = Media . objects . filter ( site_id = self . site_id , wp_id = api_media [ "ID" ] ) . first ( ) if existing_media : self . update_existing_media ( existing_media , api_media ) else : medias . append ( self . get_new_media ( api_media ) ) if medias : Media . objects . bulk_create ( medias ) page += 1 params [ "page" ] = page response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return | Load all WordPress media from the given site . |
12,467 | def get_new_media ( self , api_media ) : return Media ( site_id = self . site_id , wp_id = api_media [ "ID" ] , ** self . api_object_data ( "media" , api_media ) ) | Instantiate a new Media from api data . |
12,468 | def get_ref_data_map ( self , bulk_mode = True ) : if bulk_mode : self . ref_data_map = { "authors" : { a . wp_id : a for a in Author . objects . filter ( site_id = self . site_id ) } , "categories" : { c . wp_id : c for c in Category . objects . filter ( site_id = self . site_id ) } , "tags" : { t . wp_id : t for t in Tag . objects . filter ( site_id = self . site_id ) } , "media" : { m . wp_id : m for m in Media . objects . filter ( site_id = self . site_id ) } } else : self . ref_data_map = { "authors" : { } , "categories" : { } , "tags" : { } , "media" : { } } | Get referential data from the local db into the self . ref_data_map dictionary . This allows for fast FK lookups when looping through posts . |
12,469 | def load_posts ( self , post_type = None , max_pages = 200 , status = None ) : logger . info ( "loading posts with post_type=%s" , post_type ) if self . purge_first : Post . objects . filter ( site_id = self . site_id , post_type = post_type ) . delete ( ) path = "sites/{}/posts" . format ( self . site_id ) if not post_type : post_type = "post" if not status : status = "publish" params = { "number" : self . batch_size , "type" : post_type , "status" : status } self . set_posts_param_modified_after ( params , post_type , status ) response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) self . process_posts_response ( response , path , params , max_pages ) | Load all WordPress posts of a given post_type from a site . |
12,470 | def set_posts_param_modified_after ( self , params , post_type , status ) : if not self . purge_first and not self . full and not self . modified_after : if status == "any" : latest = Post . objects . filter ( post_type = post_type ) . order_by ( "-modified" ) . first ( ) else : latest = Post . objects . filter ( post_type = post_type , status = status ) . order_by ( "-modified" ) . first ( ) if latest : self . modified_after = latest . modified if self . modified_after : params [ "modified_after" ] = self . modified_after . isoformat ( ) logger . info ( "getting posts after: %s" , params [ "modified_after" ] ) | Set modified_after date to continue where we left off if appropriate |
12,471 | def load_wp_post ( self , api_post , bulk_mode = True , post_categories = None , post_tags = None , post_media_attachments = None , posts = None ) : if post_categories is None : post_categories = { } if post_tags is None : post_tags = { } if post_media_attachments is None : post_media_attachments = { } if posts is None : posts = [ ] author = None if api_post [ "author" ] . get ( "ID" ) : author = self . process_post_author ( bulk_mode , api_post [ "author" ] ) self . process_post_categories ( bulk_mode , api_post , post_categories ) self . process_post_tags ( bulk_mode , api_post , post_tags ) self . process_post_media_attachments ( bulk_mode , api_post , post_media_attachments ) existing_post = Post . objects . filter ( site_id = self . site_id , wp_id = api_post [ "ID" ] ) . first ( ) if existing_post : self . process_existing_post ( existing_post , api_post , author , post_categories , post_tags , post_media_attachments ) else : self . process_new_post ( bulk_mode , api_post , posts , author , post_categories , post_tags , post_media_attachments ) if api_post [ "type" ] == "post" : self . sync_deleted_attachments ( api_post ) | Load a single post from API data . |
12,472 | def process_post_author ( self , bulk_mode , api_author ) : if bulk_mode : author = self . ref_data_map [ "authors" ] . get ( api_author [ "ID" ] ) if author : self . update_existing_author ( author , api_author ) else : author = Author . objects . create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , ** self . api_object_data ( "author" , api_author ) ) else : author , created = self . get_or_create_author ( api_author ) if author and not created : self . update_existing_author ( author , api_author ) if author : self . ref_data_map [ "authors" ] [ api_author [ "ID" ] ] = author return author | Create or update an Author related to a post . |
12,473 | def get_or_create_author ( self , api_author ) : return Author . objects . get_or_create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , defaults = self . api_object_data ( "author" , api_author ) ) | Find or create an Author object given API data . |
12,474 | def process_post_categories ( self , bulk_mode , api_post , post_categories ) : post_categories [ api_post [ "ID" ] ] = [ ] for api_category in six . itervalues ( api_post [ "categories" ] ) : category = self . process_post_category ( bulk_mode , api_category ) if category : post_categories [ api_post [ "ID" ] ] . append ( category ) | Create or update Categories related to a post . |
12,475 | def process_post_category ( self , bulk_mode , api_category ) : category = None if bulk_mode : category = self . ref_data_map [ "categories" ] . get ( api_category [ "ID" ] ) if not category : category , created = Category . objects . get_or_create ( site_id = self . site_id , wp_id = api_category [ "ID" ] , defaults = self . api_object_data ( "category" , api_category ) ) if category and not created : self . update_existing_category ( category , api_category ) if category : self . ref_data_map [ "categories" ] [ api_category [ "ID" ] ] = category return category | Create or update a Category related to a post . |
12,476 | def process_post_tags ( self , bulk_mode , api_post , post_tags ) : post_tags [ api_post [ "ID" ] ] = [ ] for api_tag in six . itervalues ( api_post [ "tags" ] ) : tag = self . process_post_tag ( bulk_mode , api_tag ) if tag : post_tags [ api_post [ "ID" ] ] . append ( tag ) | Create or update Tags related to a post . |
12,477 | def process_post_tag ( self , bulk_mode , api_tag ) : tag = None if bulk_mode : tag = self . ref_data_map [ "tags" ] . get ( api_tag [ "ID" ] ) if not tag : tag , created = Tag . objects . get_or_create ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , defaults = self . api_object_data ( "tag" , api_tag ) ) if tag and not created : self . update_existing_tag ( tag , api_tag ) if tag : self . ref_data_map [ "tags" ] [ api_tag [ "ID" ] ] = tag return tag | Create or update a Tag related to a post . |
12,478 | def process_post_media_attachments ( self , bulk_mode , api_post , post_media_attachments ) : post_media_attachments [ api_post [ "ID" ] ] = [ ] for api_attachment in six . itervalues ( api_post [ "attachments" ] ) : attachment = self . process_post_media_attachment ( bulk_mode , api_attachment ) if attachment : post_media_attachments [ api_post [ "ID" ] ] . append ( attachment ) | Create or update Media objects related to a post . |
12,479 | def process_post_media_attachment ( self , bulk_mode , api_media_attachment ) : attachment = None if bulk_mode : attachment = self . ref_data_map [ "media" ] . get ( api_media_attachment [ "ID" ] ) if not attachment : attachment , created = self . get_or_create_media ( api_media_attachment ) if attachment and not created : self . update_existing_media ( attachment , api_media_attachment ) if attachment : self . ref_data_map [ "media" ] [ api_media_attachment [ "ID" ] ] = attachment return attachment | Create or update a Media attached to a post . |
12,480 | def get_or_create_media ( self , api_media ) : return Media . objects . get_or_create ( site_id = self . site_id , wp_id = api_media [ "ID" ] , defaults = self . api_object_data ( "media" , api_media ) ) | Find or create a Media object given API data . |
12,481 | def process_existing_post ( existing_post , api_post , author , post_categories , post_tags , post_media_attachments ) : existing_post . author = author existing_post . post_date = api_post [ "date" ] existing_post . modified = api_post [ "modified" ] existing_post . title = api_post [ "title" ] existing_post . url = api_post [ "URL" ] existing_post . short_url = api_post [ "short_URL" ] existing_post . content = api_post [ "content" ] existing_post . excerpt = api_post [ "excerpt" ] existing_post . slug = api_post [ "slug" ] existing_post . guid = api_post [ "guid" ] existing_post . status = api_post [ "status" ] existing_post . sticky = api_post [ "sticky" ] existing_post . password = api_post [ "password" ] existing_post . parent = api_post [ "parent" ] existing_post . post_type = api_post [ "type" ] existing_post . likes_enabled = api_post [ "likes_enabled" ] existing_post . sharing_enabled = api_post [ "sharing_enabled" ] existing_post . like_count = api_post [ "like_count" ] existing_post . global_ID = api_post [ "global_ID" ] existing_post . featured_image = api_post [ "featured_image" ] existing_post . format = api_post [ "format" ] existing_post . menu_order = api_post [ "menu_order" ] existing_post . metadata = api_post [ "metadata" ] existing_post . post_thumbnail = api_post [ "post_thumbnail" ] WPAPILoader . process_post_many_to_many_field ( existing_post , "categories" , post_categories ) WPAPILoader . process_post_many_to_many_field ( existing_post , "tags" , post_tags ) WPAPILoader . process_post_many_to_many_field ( existing_post , "attachments" , post_media_attachments ) existing_post . save ( ) | Sync attributes for a single post from WP API data . |
12,482 | def process_post_many_to_many_field ( existing_post , field , related_objects ) : to_add = set ( related_objects . get ( existing_post . wp_id , set ( ) ) ) - set ( getattr ( existing_post , field ) . all ( ) ) to_remove = set ( getattr ( existing_post , field ) . all ( ) ) - set ( related_objects . get ( existing_post . wp_id , set ( ) ) ) if to_add : getattr ( existing_post , field ) . add ( * to_add ) if to_remove : getattr ( existing_post , field ) . remove ( * to_remove ) | Sync data for a many - to - many field related to a post using set differences . |
12,483 | def bulk_create_posts ( self , posts , post_categories , post_tags , post_media_attachments ) : Post . objects . bulk_create ( posts ) for post_wp_id , categories in six . iteritems ( post_categories ) : Post . objects . get ( site_id = self . site_id , wp_id = post_wp_id ) . categories . add ( * categories ) for post_id , tags in six . iteritems ( post_tags ) : Post . objects . get ( site_id = self . site_id , wp_id = post_id ) . tags . add ( * tags ) for post_id , attachments in six . iteritems ( post_media_attachments ) : Post . objects . get ( site_id = self . site_id , wp_id = post_id ) . attachments . add ( * attachments ) | Actually do a db bulk creation of posts and link up the many - to - many fields |
12,484 | def sync_deleted_attachments ( self , api_post ) : existing_IDs = set ( Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) ) . values_list ( "wp_id" , flat = True ) ) if existing_IDs : api_IDs = set ( ) path = "sites/{}/posts/" . format ( self . site_id ) params = { "type" : "attachment" , "parent_id" : api_post [ "ID" ] , "fields" : "ID" , "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) while response . ok and response . text and page < 10 : api_json = response . json ( ) api_attachments = api_json . get ( "posts" , [ ] ) api_IDs |= set ( a [ "ID" ] for a in api_attachments ) page += 1 next_page_handle = api_json . get ( "meta" , { } ) . get ( "next_page" ) if next_page_handle : params [ "page_handle" ] = next_page_handle else : break response = self . get ( path , params ) if not response . ok : logger . warning ( "Response NOT OK! status_code=%s\n%s" , response . status_code , response . text ) return to_remove = existing_IDs - api_IDs if to_remove : Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) , wp_id__in = list ( to_remove ) ) . delete ( ) | Remove Posts with post_type = attachment that have been removed from the given Post on the WordPress side . |
12,485 | def nextparent ( self , parent , depth ) : if depth > 1 : pdir = os . path . dirname ( self . name ) line = 0 for c , d in parent . traverse ( ) : if line > parent . curline and c . name . startswith ( pdir ) : parent . curline += 1 line += 1 else : line = - 1 for c , d in parent . traverse ( ) : if line > parent . curline : parent . curline += 1 if os . path . isdir ( c . name ) and c . name in parent . children [ 0 : ] : break line += 1 | Add lines to current line by traversing the grandparent object again and once we reach our current line counting every line that is prefixed with the parent directory . |
12,486 | def prevparent ( self , parent , depth ) : pdir = os . path . dirname ( self . name ) if depth > 1 : for c , d in parent . traverse ( ) : if c . name == self . name : break if c . name . startswith ( pdir ) : parent . curline -= 1 else : pdir = self . name line = - 1 for c , d in parent . traverse ( ) : if c . name == self . name : break if os . path . isdir ( c . name ) and c . name in parent . children [ 0 : ] : parent . curline = line line += 1 return pdir | Subtract lines from our curline if the name of a node is prefixed with the parent directory when traversing the grandparent object . |
12,487 | def token ( config , token ) : if not token : info_out ( "To generate a personal API token, go to:\n\n\t" "https://github.com/settings/tokens\n\n" "To read more about it, go to:\n\n\t" "https://help.github.com/articles/creating-an-access" "-token-for-command-line-use/\n\n" 'Remember to enable "repo" in the scopes.' ) token = getpass . getpass ( "GitHub API Token: " ) . strip ( ) url = urllib . parse . urljoin ( config . github_url , "/user" ) assert url . startswith ( "https://" ) , url response = requests . get ( url , headers = { "Authorization" : "token {}" . format ( token ) } ) if response . status_code == 200 : update ( config . configfile , { "GITHUB" : { "github_url" : config . github_url , "token" : token , "login" : response . json ( ) [ "login" ] , } } , ) name = response . json ( ) [ "name" ] or response . json ( ) [ "login" ] success_out ( "Hi! {}" . format ( name ) ) else : error_out ( "Failed - {} ({})" . format ( response . status_code , response . content ) ) | Store and fetch a GitHub access token |
12,488 | def log_response ( handler ) : content_type = handler . _headers . get ( 'Content-Type' , None ) headers_str = handler . _generate_headers ( ) block = 'Response Infomations:\n' + headers_str . strip ( ) if content_type and ( 'text' in content_type or 'json' in content_type ) : limit = 0 if 'LOG_RESPONSE_LINE_LIMIT' in settings : limit = settings [ 'LOG_RESPONSE_LINE_LIMIT' ] def cut ( s ) : if limit and len ( s ) > limit : return [ s [ : limit ] ] + cut ( s [ limit : ] ) else : return [ s ] body = '' . join ( handler . _write_buffer ) lines = [ ] for i in body . split ( '\n' ) : lines += [ '| ' + j for j in cut ( i ) ] block += '\nBody:\n' + '\n' . join ( lines ) app_log . info ( block ) | Acturally logging response is not a server s responsibility you should use http tools like Chrome Developer Tools to analyse the response . |
12,489 | def log_request ( handler ) : block = 'Request Infomations:\n' + _format_headers_log ( handler . request . headers ) if handler . request . arguments : block += '+----Arguments----+\n' for k , v in handler . request . arguments . items ( ) : block += '| {0:<15} | {1:<15} \n' . format ( repr ( k ) , repr ( v ) ) app_log . info ( block ) | Logging request is opposite to response sometime its necessary feel free to enable it . |
12,490 | def _exception_default_handler ( self , e ) : if isinstance ( e , HTTPError ) : if e . log_message : format = "%d %s: " + e . log_message args = [ e . status_code , self . _request_summary ( ) ] + list ( e . args ) app_log . warning ( format , * args ) if e . status_code not in httplib . responses : app_log . error ( "Bad HTTP status code: %d" , e . status_code ) self . send_error ( 500 , exc_info = sys . exc_info ( ) ) else : self . send_error ( e . status_code , exc_info = sys . exc_info ( ) ) else : app_log . error ( "Uncaught exception %s\n%r" , self . _request_summary ( ) , self . request , exc_info = True ) self . send_error ( 500 , exc_info = sys . exc_info ( ) ) | This method is a copy of tornado . web . RequestHandler . _handle_request_exception |
12,491 | def _handle_request_exception ( self , e ) : handle_func = self . _exception_default_handler if self . EXCEPTION_HANDLERS : for excs , func_name in self . EXCEPTION_HANDLERS . items ( ) : if isinstance ( e , excs ) : handle_func = getattr ( self , func_name ) break handle_func ( e ) if not self . _finished : self . finish ( ) | This method handle HTTPError exceptions the same as how tornado does leave other exceptions to be handled by user defined handler function maped in class attribute EXCEPTION_HANDLERS |
12,492 | def flush ( self , * args , ** kwgs ) : if settings [ 'LOG_RESPONSE' ] and not self . _status_code == 500 : log_response ( self ) super ( BaseHandler , self ) . flush ( * args , ** kwgs ) | Before RequestHandler . flush was called we got the final _write_buffer . |
12,493 | def write_json ( self , chunk , code = None , headers = None ) : assert chunk is not None , 'None cound not be written in write_json' self . set_header ( "Content-Type" , "application/json; charset=UTF-8" ) if isinstance ( chunk , dict ) or isinstance ( chunk , list ) : chunk = self . json_encode ( chunk ) try : chunk = utf8 ( chunk ) except Exception : app_log . error ( 'chunk encoding error, repr: %s' % repr ( chunk ) ) raise_exc_info ( sys . exc_info ( ) ) self . write ( chunk ) if code : self . set_status ( code ) if headers : for k , v in headers . items ( ) : self . set_header ( k , v ) | A convenient method that binds chunk code headers together |
12,494 | def write_file ( self , file_path , mime_type = None ) : if not os . path . exists ( file_path ) : raise HTTPError ( 404 ) if not os . path . isfile ( file_path ) : raise HTTPError ( 403 , "%s is not a file" , file_path ) stat_result = os . stat ( file_path ) modified = datetime . datetime . fromtimestamp ( stat_result [ stat . ST_MTIME ] ) self . set_header ( "Last-Modified" , modified ) if not mime_type : mime_type , _encoding = mimetypes . guess_type ( file_path ) if mime_type : self . set_header ( "Content-Type" , mime_type ) ims_value = self . request . headers . get ( "If-Modified-Since" ) if ims_value is not None : date_tuple = email . utils . parsedate ( ims_value ) if_since = datetime . datetime . fromtimestamp ( time . mktime ( date_tuple ) ) if if_since >= modified : self . set_status ( 304 ) return with open ( file_path , "rb" ) as file : data = file . read ( ) hasher = hashlib . sha1 ( ) hasher . update ( data ) self . set_header ( "Etag" , '"%s"' % hasher . hexdigest ( ) ) self . write ( data ) | Copy from tornado . web . StaticFileHandler |
12,495 | def prepare ( self ) : if settings [ 'LOG_REQUEST' ] : log_request ( self ) for i in self . PREPARES : getattr ( self , 'prepare_' + i ) ( ) if self . _finished : return | Behaves like a middleware between raw request and handling process |
12,496 | def csv_to_dicts ( file , header = None ) : with open ( file ) as csvfile : return [ row for row in csv . DictReader ( csvfile , fieldnames = header ) ] | Reads a csv and returns a List of Dicts with keys given by header row . |
12,497 | def cli ( config , configfile , verbose ) : config . verbose = verbose config . configfile = configfile if not os . path . isfile ( configfile ) : state . write ( configfile , { } ) | A glorious command line tool to make your life with git GitHub and Bugzilla much easier . |
12,498 | def parse_uri ( self , uri = None ) : if not uri : return rdflib . term . URIRef ( self . root ) elif type ( uri ) == str : if type ( uri ) == str and not uri . startswith ( 'http' ) : return rdflib . term . URIRef ( "%s%s" % ( self . root , uri ) ) else : return rdflib . term . URIRef ( uri ) elif type ( uri ) == rdflib . term . URIRef : return uri else : raise TypeError ( 'invalid URI input' ) | parses and cleans up possible uri inputs return instance of rdflib . term . URIRef |
12,499 | def create_resource ( self , resource_type = None , uri = None ) : if resource_type in [ NonRDFSource , Binary , BasicContainer , DirectContainer , IndirectContainer ] : return resource_type ( self , uri ) else : raise TypeError ( "expecting Resource type, such as BasicContainer or NonRDFSource" ) | Convenience method for creating a new resource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.