idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
12,400
def model ( self , ** kwargs ) : for key , td in self . tds . items ( ) : td . model ( ** kwargs )
Run the forward modeling for all frequencies .
12,401
def measurements ( self ) : m_all = np . array ( [ self . tds [ key ] . measurements ( ) for key in sorted ( self . tds . keys ( ) ) ] ) return m_all
Return modeled measurements
12,402
def get_measurement_responses ( self ) : configs = self . tds [ sorted ( self . tds . keys ( ) ) [ 0 ] ] . configs . configs measurements = self . measurements ( ) responses = { } for config , sip_measurement in zip ( configs , np . rollaxis ( measurements , 1 ) ) : sip = sip_response ( frequencies = self . frequencies...
Return a dictionary of sip_responses for the modeled SIP spectra
12,403
def create_database ( name , number = 1 , force_clear = False ) : print 'Got:' print 'name' , name , type ( name ) print 'number' , number , type ( number ) print 'force_clear' , force_clear , type ( force_clear )
Command to create a database
12,404
def _get_long_path_name ( path ) : buf = ctypes . create_unicode_buffer ( len ( path ) + 1 ) GetLongPathNameW = ctypes . windll . kernel32 . GetLongPathNameW res = GetLongPathNameW ( path , buf , len ( path ) + 1 ) if res == 0 or res > 260 : return path else : return buf . value
Returns the long path name for a Windows path i . e . the properly cased path of an existing file or directory .
12,405
def get_dependency_walker ( ) : for dirname in os . getenv ( 'PATH' , '' ) . split ( os . pathsep ) : filename = os . path . join ( dirname , 'depends.exe' ) if os . path . isfile ( filename ) : logger . info ( 'Dependency Walker found at "{}"' . format ( filename ) ) return filename temp_exe = os . path . join ( tempf...
Checks if depends . exe is in the system PATH . If not it will be downloaded and extracted to a temporary directory . Note that the file will not be deleted afterwards .
12,406
def prepare ( self , setup_func ) : assert inspect . isfunction ( setup_func ) argsspec = inspect . getargspec ( setup_func ) if argsspec . args : raise ValueError ( "prepare function shouldn't have any arguments" ) def decorator ( command_func ) : @ functools . wraps ( command_func ) def wrapper ( * args , ** kwgs ) :...
This decorator wrap a function which setup a environment before running a command
12,407
def addPort ( n : LNode , intf : Interface ) : d = PortTypeFromDir ( intf . _direction ) ext_p = LayoutExternalPort ( n , name = intf . _name , direction = d , node2lnode = n . _node2lnode ) ext_p . originObj = originObjOfPort ( intf ) n . children . append ( ext_p ) addPortToLNode ( ext_p , intf , reverseDirection = T...
Add LayoutExternalPort for interface
12,408
def drawtree ( self ) : self . win . erase ( ) self . line = 0 for child , depth in self . traverse ( ) : child . curline = self . curline child . picked = self . picked child . expanded = self . expanded child . sized = self . sized if depth == 0 : continue if self . line == self . curline : self . color . curline ( c...
Loop over the object process path attribute sets and drawlines based on their current contents .
12,409
def import_config ( config_path ) : if not os . path . isfile ( config_path ) : raise ConfigBuilderError ( 'Could not find config file: ' + config_path ) loader = importlib . machinery . SourceFileLoader ( config_path , config_path ) module = loader . load_module ( ) if not hasattr ( module , 'config' ) or not isinstan...
Import a Config from a given path relative to the current directory .
12,410
def grid ( fitness_function , no_dimensions , step_size ) : best_fitness = float ( "-inf" ) best_arguments = None for arguments in make_lists ( no_dimensions , step_size ) : fitness = fitness_function ( tuple ( arguments ) ) if fitness > best_fitness : best_fitness = fitness best_arguments = tuple ( arguments ) return ...
Grid search using a fitness function over a given number of dimensions and a given step size between inclusive limits of 0 and 1 .
12,411
def make_lists ( no_dimensions , step_size , centre_steps = True ) : if no_dimensions == 0 : return [ [ ] ] sub_lists = make_lists ( no_dimensions - 1 , step_size , centre_steps = centre_steps ) return [ [ step_size * value + ( 0.5 * step_size if centre_steps else 0 ) ] + sub_list for value in range ( 0 , int ( ( 1 / s...
Create a list of lists of floats covering every combination across no_dimensions of points of integer step size between 0 and 1 inclusive .
12,412
def portCnt ( port ) : if port . children : return sum ( map ( lambda p : portCnt ( p ) , port . children ) ) else : return 1
recursively count number of ports without children
12,413
def copyPort ( port , targetLNode , reverseDir , topPortName = None ) : newP = _copyPort ( port , targetLNode , reverseDir ) if topPortName is not None : newP . name = topPortName return newP
Create identical port on targetNode
12,414
def walkSignalPorts ( rootPort : LPort ) : if rootPort . children : for ch in rootPort . children : yield from walkSignalPorts ( ch ) else : yield rootPort
recursively walk ports without any children
12,415
def agent_error ( e : requests . HTTPError , fatal = True ) : try : data = e . response . json ( ) details = data [ 'detail' ] except JSONDecodeError : details = e . response . text or str ( e . response ) lines = ( '[AGENT] {}' . format ( line ) for line in details . splitlines ( ) ) msg = '\n' + '\n' . join ( lines )...
Prints an agent error and exits
12,416
def parse_stack_refs ( stack_references : List [ str ] ) -> List [ str ] : stack_names = [ ] references = list ( stack_references ) references . reverse ( ) while references : current = references . pop ( ) file_path = os . path . abspath ( current ) if os . path . exists ( file_path ) and os . path . isfile ( file_pat...
Check if items included in stack_references are Senza definition file paths or stack name reference . If Senza definition file path substitute the definition file path by the stack name in the same position on the list .
12,417
def list_stacks ( stack_ref : List [ str ] , all : bool , remote : str , region : str , watch : int , output : str ) : lizzy = setup_lizzy_client ( remote ) stack_references = parse_stack_refs ( stack_ref ) while True : rows = [ ] for stack in lizzy . get_stacks ( stack_references , region = region ) : creation_time = ...
List Lizzy stacks
12,418
def traffic ( stack_name : str , stack_version : Optional [ str ] , percentage : Optional [ int ] , region : Optional [ str ] , remote : Optional [ str ] , output : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) if percentage is None : stack_reference = [ stack_name ] with Action ( 'Requesting traffic info....
Manage stack traffic
12,419
def scale ( stack_name : str , stack_version : Optional [ str ] , new_scale : int , region : Optional [ str ] , remote : Optional [ str ] ) : lizzy = setup_lizzy_client ( remote ) with Action ( 'Requesting rescale..' ) : stack_id = '{stack_name}-{stack_version}' . format_map ( locals ( ) ) lizzy . scale ( stack_id , ne...
Rescale a stack
12,420
def delete ( stack_ref : List [ str ] , region : str , dry_run : bool , force : bool , remote : str ) : lizzy = setup_lizzy_client ( remote ) stack_refs = get_stack_refs ( stack_ref ) all_with_version = all ( stack . version is not None for stack in stack_refs ) if ( not all_with_version and not dry_run and not force )...
Delete Cloud Formation stacks
12,421
def pydict2xml ( filename , metadata_dict , ** kwargs ) : try : f = open ( filename , 'w' ) f . write ( pydict2xmlstring ( metadata_dict , ** kwargs ) . encode ( 'utf-8' ) ) f . close ( ) except : raise MetadataGeneratorException ( 'Failed to create an XML file. Filename: %s' % ( filename ) )
Create an XML file .
12,422
def pydict2xmlstring ( metadata_dict , ** kwargs ) : ordering = kwargs . get ( 'ordering' , UNTL_XML_ORDER ) root_label = kwargs . get ( 'root_label' , 'metadata' ) root_namespace = kwargs . get ( 'root_namespace' , None ) elements_namespace = kwargs . get ( 'elements_namespace' , None ) namespace_map = kwargs . get ( ...
Create an XML string from a metadata dictionary .
12,423
def create_dict_subelement ( root , subelement , content , ** kwargs ) : attribs = kwargs . get ( 'attribs' , None ) namespace = kwargs . get ( 'namespace' , None ) key = subelement if namespace and attribs : subelement = SubElement ( root , namespace + subelement , attribs ) elif namespace : subelement = SubElement ( ...
Create a XML subelement from a Python dictionary .
12,424
def highwiredict2xmlstring ( highwire_elements , ordering = HIGHWIRE_ORDER ) : highwire_elements . sort ( key = lambda obj : ordering . index ( obj . name ) ) root = Element ( 'metadata' ) for element in highwire_elements : attribs = { 'name' : element . name , 'content' : element . content } SubElement ( root , 'meta'...
Create an XML string from the highwire data dictionary .
12,425
def get ( binary_name ) : if binary_name not in binaries : raise Exception ( 'binary_name: {0} not found' . format ( binary_name ) ) system = platform . system ( ) binary_list = binaries [ binary_name ] [ system ] for filename in binary_list : valid_file = shutil . which ( filename ) if valid_file : return os . path . ...
return a valid path to the given binary . Return an error if no existing binary can be found .
12,426
def get_upgrade_lock ( dbname , connect_str , timeout = LOCK_TIMEOUT ) : engine = sqlalchemy . create_engine ( connect_str ) cursor = engine . execute ( "SELECT GET_LOCK('upgrade_{}', {})" . format ( dbname , timeout ) ) lock = cursor . scalar ( ) cursor . close ( ) while not lock : logger . info ( 'Cannot acquire {} u...
Wait until you can get the lock then yield it and eventually release it .
12,427
def upgrade ( dbname , connect_str , alembic_conf ) : if not sqlalchemy_utils . database_exists ( connect_str ) : logger . info ( 'Creating {}' . format ( dbname ) ) try : sqlalchemy_utils . create_database ( connect_str ) except sqlalchemy . exc . ProgrammingError as exc : if not sqlalchemy_utils . database_exists ( c...
Get the database s upgrade lock and run alembic .
12,428
def write_to_file ( self , filename ) : fid = open ( filename , 'w' ) for key in self . key_order : if ( key == - 1 ) : fid . write ( '\n' ) else : fid . write ( '{0}\n' . format ( self [ key ] ) ) fid . close ( )
Write the configuration to a file . Use the correct order of values .
12,429
def parse ( self , importpath ) : self . native = False self . _prefix = "" self . _package = "" url = re . sub ( r'http://' , '' , importpath ) url = re . sub ( r'https://' , '' , url ) if url . split ( '/' ) [ 0 ] in self . native_packages [ "packages" ] : self . native = True return self for regex in self . known_ip...
Parse import path . Determine if the path is native or starts with known prefix .
12,430
def sub_retab ( match ) : r before = match . group ( 1 ) tabs = len ( match . group ( 2 ) ) return before + ( ' ' * ( TAB_SIZE * tabs - len ( before ) % TAB_SIZE ) )
r Remove all tabs and convert them into spaces .
12,431
def handle_whitespace ( text ) : r text = re_retab . sub ( sub_retab , text ) text = re_whitespace . sub ( '' , text ) . strip ( ) return text
r Handles whitespace cleanup .
12,432
def get_variables ( text ) : variables = { var : value for var , value in re_vars . findall ( text ) } text = re_vars . sub ( '' , text ) return text , variables
Extracts variables that can be used in templating engines .
12,433
def get_references ( text ) : references = { } for ref_id , link , _ , title in re_references . findall ( text ) : ref_id = re . sub ( r'<(.*?)>' , r'\1' , ref_id ) . lower ( ) . strip ( ) references [ ref_id ] = ( link , title ) text = re_references . sub ( '' , text ) return text , references
Retrieves all link references within the text .
12,434
def get_footnote_backreferences ( text , markdown_obj ) : footnotes = OrderedDict ( ) for footnote_id , footnote in re_footnote_backreferences . findall ( text ) : footnote_id = re . sub ( r'<(.*?)>' , r'\1' , footnote_id ) . lower ( ) . strip ( ) footnote = re . sub ( r'^[ ]{0,4}' , '' , footnote , flags = re . M ) fo...
Retrieves all footnote backreferences within the text .
12,435
def hash_blocks ( text , hashes ) : def sub ( match ) : block = match . group ( 1 ) hashed = hash_text ( block , 'block' ) hashes [ hashed ] = block return '\n\n' + hashed + '\n\n' return re_block . sub ( sub , text )
Hashes HTML block tags .
12,436
def hash_lists ( text , hashes , markdown_obj ) : for style , marker in ( ( 'u' , '[+*-]' ) , ( 'o' , r'\d+\.' ) ) : list_re = re . compile ( re_list % ( marker , marker ) , re . S | re . X ) for match in list_re . finditer ( text ) : if not match : continue lst = match . group ( 1 ) items = re . split ( r'(?:\n|\A) {0...
Hashes ordered and unordered lists .
12,437
def hash_blockquotes ( text , hashes , markdown_obj ) : def sub ( match ) : block = match . group ( 1 ) . strip ( ) block = re . sub ( r'(?:(?<=\n)|(?<=\A))> ?' , '' , block ) block = markdown_obj . convert ( block ) block = '<blockquote>{}</blockquote>' . format ( block ) hashed = hash_text ( block , 'blockquote' ) ha...
Hashes block quotes .
12,438
def hash_codes ( text , hashes ) : def sub ( match ) : code = '<code>{}</code>' . format ( escape ( match . group ( 2 ) ) ) hashed = hash_text ( code , 'code' ) hashes [ hashed ] = code return hashed return re_code . sub ( sub , text )
Hashes inline code tags .
12,439
def hash_tags ( text , hashes ) : def sub ( match ) : hashed = hash_text ( match . group ( 0 ) , 'tag' ) hashes [ hashed ] = match . group ( 0 ) return hashed return re_tag . sub ( sub , text )
Hashes any non - block tags .
12,440
def unhash ( text , hashes ) : def retrieve_match ( match ) : return hashes [ match . group ( 0 ) ] while re_hash . search ( text ) : text = re_hash . sub ( retrieve_match , text ) text = re_pre_tag . sub ( lambda m : re . sub ( '^' + m . group ( 1 ) , '' , m . group ( 0 ) , flags = re . M ) , text ) return text
Unhashes all hashed entites in the hashes dictionary .
12,441
def paragraph_sub ( match ) : text = re . sub ( r' \n' , r'\n<br/>\n' , match . group ( 0 ) . strip ( ) ) return '<p>{}</p>' . format ( text )
Captures paragraphs .
12,442
def truncateGraph ( graph , root_nodes ) : subgraph = Graph ( ) for node in root_nodes : subgraph = GraphUtils . joinGraphs ( subgraph , GraphUtils . getReacheableSubgraph ( graph , node ) ) return subgraph
Create a set of all nodes containg the root_nodes and all nodes reacheable from them
12,443
def filterGraph ( graph , node_fnc ) : nodes = filter ( lambda l : node_fnc ( l ) , graph . nodes ( ) ) edges = { } gedges = graph . edges ( ) for u in gedges : if u not in nodes : continue for v in gedges [ u ] : if v not in nodes : continue try : edges [ u ] . append ( v ) except KeyError : edges [ u ] = [ v ] return...
Remove all nodes for with node_fnc does not hold
12,444
def listdir ( self , path ) : for f in os . listdir ( path ) : if not f . startswith ( '.' ) : yield f
Return a list of all non dotfiles in a given directory .
12,445
def getchildren ( self ) : try : if self . hidden : return [ os . path . join ( self . name , child ) for child in sorted ( self . listdir ( self . name ) ) ] else : return [ os . path . join ( self . name , child ) for child in sorted ( os . listdir ( self . name ) ) ] except OSError : return None
Create list of absolute paths to be used to instantiate path objects for traversal based on whether or not hidden attribute is set .
12,446
def getpaths ( self ) : self . children = self . getchildren ( ) if self . children is None : return if self . paths is None : self . paths = [ Paths ( self . screen , os . path . join ( self . name , child ) , self . hidden , self . picked , self . expanded , self . sized ) for child in self . children ] return self ....
If we have children use a list comprehension to instantiate new paths objects to traverse .
12,447
def traverse ( self ) : yield self , 0 if self . name in self . expanded : for path in self . getpaths ( ) : for child , depth in path . traverse ( ) : yield child , depth + 1
Recursive generator that lazily unfolds the filesystem .
12,448
def line_line_intersect ( x , y ) : A = x [ 0 ] * y [ 1 ] - y [ 0 ] * x [ 1 ] B = x [ 2 ] * y [ 3 ] - y [ 2 ] * x [ 4 ] C = ( x [ 0 ] - x [ 1 ] ) * ( y [ 2 ] - y [ 3 ] ) - ( y [ 0 ] - y [ 1 ] ) * ( x [ 2 ] - x [ 3 ] ) Ix = ( A * ( x [ 2 ] - x [ 3 ] ) - ( x [ 0 ] - x [ 1 ] ) * B ) / C Iy = ( A * ( y [ 2 ] - y [ 3 ] ) - ...
Compute the intersection point of two lines
12,449
def pkg_data_filename ( resource_name , filename = None ) : resource_filename = pkg_resources . resource_filename ( tripleohelper . __name__ , resource_name ) if filename is not None : resource_filename = os . path . join ( resource_filename , filename ) return resource_filename
Returns the path of a file installed along the package
12,450
def merge ( config ) : repo = config . repo active_branch = repo . active_branch if active_branch . name == "master" : error_out ( "You're already on the master branch." ) if repo . is_dirty ( ) : error_out ( 'Repo is "dirty". ({})' . format ( ", " . join ( [ repr ( x . b_path ) for x in repo . index . diff ( None ) ] ...
Merge the current branch into master .
12,451
def chord_task ( * args , ** kwargs ) : u given_backend = kwargs . get ( u'backend' , None ) if not isinstance ( given_backend , ChordableDjangoBackend ) : kwargs [ u'backend' ] = ChordableDjangoBackend ( kwargs . get ( 'app' , current_app ) ) return task ( * args , ** kwargs )
u Override of the default task decorator to specify use of this backend .
12,452
def _cleanup ( self , status , expires_multiplier = 1 ) : u expires = self . expires if isinstance ( self . expires , timedelta ) else timedelta ( seconds = self . expires ) expires = expires * expires_multiplier chords_to_delete = ChordData . objects . filter ( callback_result__date_done__lte = datetime . now ( ) - ex...
u Clean up expired records .
12,453
def on_chord_part_return ( self , task , state , result , propagate = False ) : u with transaction . atomic ( ) : chord_data = ChordData . objects . select_for_update ( ) . get ( callback_result__task_id = task . request . chord [ u'options' ] [ u'task_id' ] ) _ = TaskMeta . objects . update_or_create ( task_id = task ...
u Update the linking ChordData object and execute callback if needed .
12,454
def apply_chord ( self , header , partial_args , group_id , body , ** options ) : u callback_entry = TaskMeta . objects . create ( task_id = body . id ) chord_data = ChordData . objects . create ( callback_result = callback_entry ) for subtask in header : subtask_entry = TaskMeta . objects . create ( task_id = subtask ...
u Instantiate a linking ChordData object before executing subtasks .
12,455
def get_suitable_app ( cls , given_app ) : u if not isinstance ( getattr ( given_app , 'backend' , None ) , ChordableDjangoBackend ) : return_app = deepcopy ( given_app ) return_app . backend = ChordableDjangoBackend ( return_app ) return return_app else : return given_app
u Return a clone of given_app with ChordableDjangoBackend if needed .
12,456
def linked_model_for_class ( self , cls , make_constants_variable = False , ** kwargs ) : constructor_args = inspect . getfullargspec ( cls ) . args attribute_tuples = self . attribute_tuples new_model = PriorModel ( cls ) for attribute_tuple in attribute_tuples : name = attribute_tuple . name if name in constructor_ar...
Create a PriorModel wrapping the specified class with attributes from this instance . Priors can be overridden using keyword arguments . Any constructor arguments of the new class for which there is no attribute associated with this class and no keyword argument are created from config .
12,457
def instance_for_arguments ( self , arguments : { Prior : float } ) : for prior , value in arguments . items ( ) : prior . assert_within_limits ( value ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } constant_arguments = { t . name : t . constant . value for t in self . di...
Create an instance of the associated class for a set of arguments
12,458
def gaussian_prior_model_for_arguments ( self , arguments ) : new_model = copy . deepcopy ( self ) model_arguments = { t . name : arguments [ t . prior ] for t in self . direct_prior_tuples } for tuple_prior_tuple in self . tuple_prior_tuples : setattr ( new_model , tuple_prior_tuple . name , tuple_prior_tuple . prior ...
Create a new instance of model mapper with a set of Gaussian priors based on tuples provided by a previous \ nonlinear search .
12,459
def load_post ( self , wp_post_id ) : path = "sites/{}/posts/{}" . format ( self . site_id , wp_post_id ) response = self . get ( path ) if response . ok and response . text : api_post = response . json ( ) self . get_ref_data_map ( bulk_mode = False ) self . load_wp_post ( api_post , bulk_mode = False ) try : post = P...
Refresh local content for a single post from the the WordPress REST API . This can be called from a webhook on the WordPress side when a post is updated .
12,460
def load_categories ( self , max_pages = 30 ) : logger . info ( "loading categories" ) if self . purge_first : Category . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/categories" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if no...
Load all WordPress categories from the given site .
12,461
def get_new_category ( self , api_category ) : return Category ( site_id = self . site_id , wp_id = api_category [ "ID" ] , ** self . api_object_data ( "category" , api_category ) )
Instantiate a new Category from api data .
12,462
def load_tags ( self , max_pages = 30 ) : logger . info ( "loading tags" ) if self . purge_first : Tag . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/tags" . format ( self . site_id ) params = { "number" : 1000 } page = 1 response = self . get ( path , params ) if not response . ok : logg...
Load all WordPress tags from the given site .
12,463
def get_new_tag ( self , api_tag ) : return Tag ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , ** self . api_object_data ( "tag" , api_tag ) )
Instantiate a new Tag from api data .
12,464
def load_authors ( self , max_pages = 10 ) : logger . info ( "loading authors" ) if self . purge_first : Author . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/users" . format ( self . site_id ) params = { "number" : 100 } page = 1 response = self . get ( path , params ) if not response . ...
Load all WordPress authors from the given site .
12,465
def get_new_author ( self , api_author ) : return Author ( site_id = self . site_id , wp_id = api_author [ "ID" ] , ** self . api_object_data ( "author" , api_author ) )
Instantiate a new Author from api data .
12,466
def load_media ( self , max_pages = 150 ) : logger . info ( "loading media" ) if self . purge_first : logger . warning ( "purging ALL media from site %s" , self . site_id ) Media . objects . filter ( site_id = self . site_id ) . delete ( ) path = "sites/{}/media" . format ( self . site_id ) params = { "number" : 100 } ...
Load all WordPress media from the given site .
12,467
def get_new_media ( self , api_media ) : return Media ( site_id = self . site_id , wp_id = api_media [ "ID" ] , ** self . api_object_data ( "media" , api_media ) )
Instantiate a new Media from api data .
12,468
def get_ref_data_map ( self , bulk_mode = True ) : if bulk_mode : self . ref_data_map = { "authors" : { a . wp_id : a for a in Author . objects . filter ( site_id = self . site_id ) } , "categories" : { c . wp_id : c for c in Category . objects . filter ( site_id = self . site_id ) } , "tags" : { t . wp_id : t for t in...
Get referential data from the local db into the self . ref_data_map dictionary . This allows for fast FK lookups when looping through posts .
12,469
def load_posts ( self , post_type = None , max_pages = 200 , status = None ) : logger . info ( "loading posts with post_type=%s" , post_type ) if self . purge_first : Post . objects . filter ( site_id = self . site_id , post_type = post_type ) . delete ( ) path = "sites/{}/posts" . format ( self . site_id ) if not post...
Load all WordPress posts of a given post_type from a site .
12,470
def set_posts_param_modified_after ( self , params , post_type , status ) : if not self . purge_first and not self . full and not self . modified_after : if status == "any" : latest = Post . objects . filter ( post_type = post_type ) . order_by ( "-modified" ) . first ( ) else : latest = Post . objects . filter ( post_...
Set modified_after date to continue where we left off if appropriate
12,471
def load_wp_post ( self , api_post , bulk_mode = True , post_categories = None , post_tags = None , post_media_attachments = None , posts = None ) : if post_categories is None : post_categories = { } if post_tags is None : post_tags = { } if post_media_attachments is None : post_media_attachments = { } if posts is None...
Load a single post from API data .
12,472
def process_post_author ( self , bulk_mode , api_author ) : if bulk_mode : author = self . ref_data_map [ "authors" ] . get ( api_author [ "ID" ] ) if author : self . update_existing_author ( author , api_author ) else : author = Author . objects . create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , ** se...
Create or update an Author related to a post .
12,473
def get_or_create_author ( self , api_author ) : return Author . objects . get_or_create ( site_id = self . site_id , wp_id = api_author [ "ID" ] , defaults = self . api_object_data ( "author" , api_author ) )
Find or create an Author object given API data .
12,474
def process_post_categories ( self , bulk_mode , api_post , post_categories ) : post_categories [ api_post [ "ID" ] ] = [ ] for api_category in six . itervalues ( api_post [ "categories" ] ) : category = self . process_post_category ( bulk_mode , api_category ) if category : post_categories [ api_post [ "ID" ] ] . appe...
Create or update Categories related to a post .
12,475
def process_post_category ( self , bulk_mode , api_category ) : category = None if bulk_mode : category = self . ref_data_map [ "categories" ] . get ( api_category [ "ID" ] ) if not category : category , created = Category . objects . get_or_create ( site_id = self . site_id , wp_id = api_category [ "ID" ] , defaults =...
Create or update a Category related to a post .
12,476
def process_post_tags ( self , bulk_mode , api_post , post_tags ) : post_tags [ api_post [ "ID" ] ] = [ ] for api_tag in six . itervalues ( api_post [ "tags" ] ) : tag = self . process_post_tag ( bulk_mode , api_tag ) if tag : post_tags [ api_post [ "ID" ] ] . append ( tag )
Create or update Tags related to a post .
12,477
def process_post_tag ( self , bulk_mode , api_tag ) : tag = None if bulk_mode : tag = self . ref_data_map [ "tags" ] . get ( api_tag [ "ID" ] ) if not tag : tag , created = Tag . objects . get_or_create ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , defaults = self . api_object_data ( "tag" , api_tag ) ) if ta...
Create or update a Tag related to a post .
12,478
def process_post_media_attachments ( self , bulk_mode , api_post , post_media_attachments ) : post_media_attachments [ api_post [ "ID" ] ] = [ ] for api_attachment in six . itervalues ( api_post [ "attachments" ] ) : attachment = self . process_post_media_attachment ( bulk_mode , api_attachment ) if attachment : post_m...
Create or update Media objects related to a post .
12,479
def process_post_media_attachment ( self , bulk_mode , api_media_attachment ) : attachment = None if bulk_mode : attachment = self . ref_data_map [ "media" ] . get ( api_media_attachment [ "ID" ] ) if not attachment : attachment , created = self . get_or_create_media ( api_media_attachment ) if attachment and not creat...
Create or update a Media attached to a post .
12,480
def get_or_create_media ( self , api_media ) : return Media . objects . get_or_create ( site_id = self . site_id , wp_id = api_media [ "ID" ] , defaults = self . api_object_data ( "media" , api_media ) )
Find or create a Media object given API data .
12,481
def process_existing_post ( existing_post , api_post , author , post_categories , post_tags , post_media_attachments ) : existing_post . author = author existing_post . post_date = api_post [ "date" ] existing_post . modified = api_post [ "modified" ] existing_post . title = api_post [ "title" ] existing_post . url = a...
Sync attributes for a single post from WP API data .
12,482
def process_post_many_to_many_field ( existing_post , field , related_objects ) : to_add = set ( related_objects . get ( existing_post . wp_id , set ( ) ) ) - set ( getattr ( existing_post , field ) . all ( ) ) to_remove = set ( getattr ( existing_post , field ) . all ( ) ) - set ( related_objects . get ( existing_post...
Sync data for a many - to - many field related to a post using set differences .
12,483
def bulk_create_posts ( self , posts , post_categories , post_tags , post_media_attachments ) : Post . objects . bulk_create ( posts ) for post_wp_id , categories in six . iteritems ( post_categories ) : Post . objects . get ( site_id = self . site_id , wp_id = post_wp_id ) . categories . add ( * categories ) for post_...
Actually do a db bulk creation of posts and link up the many - to - many fields
12,484
def sync_deleted_attachments ( self , api_post ) : existing_IDs = set ( Post . objects . filter ( site_id = self . site_id , post_type = "attachment" , parent__icontains = '"ID":{}' . format ( api_post [ "ID" ] ) ) . values_list ( "wp_id" , flat = True ) ) if existing_IDs : api_IDs = set ( ) path = "sites/{}/posts/" . ...
Remove Posts with post_type = attachment that have been removed from the given Post on the WordPress side .
12,485
def nextparent ( self , parent , depth ) : if depth > 1 : pdir = os . path . dirname ( self . name ) line = 0 for c , d in parent . traverse ( ) : if line > parent . curline and c . name . startswith ( pdir ) : parent . curline += 1 line += 1 else : line = - 1 for c , d in parent . traverse ( ) : if line > parent . cur...
Add lines to current line by traversing the grandparent object again and once we reach our current line counting every line that is prefixed with the parent directory .
12,486
def prevparent ( self , parent , depth ) : pdir = os . path . dirname ( self . name ) if depth > 1 : for c , d in parent . traverse ( ) : if c . name == self . name : break if c . name . startswith ( pdir ) : parent . curline -= 1 else : pdir = self . name line = - 1 for c , d in parent . traverse ( ) : if c . name == ...
Subtract lines from our curline if the name of a node is prefixed with the parent directory when traversing the grandparent object .
12,487
def token ( config , token ) : if not token : info_out ( "To generate a personal API token, go to:\n\n\t" "https://github.com/settings/tokens\n\n" "To read more about it, go to:\n\n\t" "https://help.github.com/articles/creating-an-access" "-token-for-command-line-use/\n\n" 'Remember to enable "repo" in the scopes.' ) t...
Store and fetch a GitHub access token
12,488
def log_response ( handler ) : content_type = handler . _headers . get ( 'Content-Type' , None ) headers_str = handler . _generate_headers ( ) block = 'Response Infomations:\n' + headers_str . strip ( ) if content_type and ( 'text' in content_type or 'json' in content_type ) : limit = 0 if 'LOG_RESPONSE_LINE_LIMIT' in ...
Acturally logging response is not a server s responsibility you should use http tools like Chrome Developer Tools to analyse the response .
12,489
def log_request ( handler ) : block = 'Request Infomations:\n' + _format_headers_log ( handler . request . headers ) if handler . request . arguments : block += '+----Arguments----+\n' for k , v in handler . request . arguments . items ( ) : block += '| {0:<15} | {1:<15} \n' . format ( repr ( k ) , repr ( v ) ) app_log...
Logging request is opposite to response sometime its necessary feel free to enable it .
12,490
def _exception_default_handler ( self , e ) : if isinstance ( e , HTTPError ) : if e . log_message : format = "%d %s: " + e . log_message args = [ e . status_code , self . _request_summary ( ) ] + list ( e . args ) app_log . warning ( format , * args ) if e . status_code not in httplib . responses : app_log . error ( "...
This method is a copy of tornado . web . RequestHandler . _handle_request_exception
12,491
def _handle_request_exception ( self , e ) : handle_func = self . _exception_default_handler if self . EXCEPTION_HANDLERS : for excs , func_name in self . EXCEPTION_HANDLERS . items ( ) : if isinstance ( e , excs ) : handle_func = getattr ( self , func_name ) break handle_func ( e ) if not self . _finished : self . fin...
This method handle HTTPError exceptions the same as how tornado does leave other exceptions to be handled by user defined handler function maped in class attribute EXCEPTION_HANDLERS
12,492
def flush ( self , * args , ** kwgs ) : if settings [ 'LOG_RESPONSE' ] and not self . _status_code == 500 : log_response ( self ) super ( BaseHandler , self ) . flush ( * args , ** kwgs )
Before RequestHandler . flush was called we got the final _write_buffer .
12,493
def write_json ( self , chunk , code = None , headers = None ) : assert chunk is not None , 'None cound not be written in write_json' self . set_header ( "Content-Type" , "application/json; charset=UTF-8" ) if isinstance ( chunk , dict ) or isinstance ( chunk , list ) : chunk = self . json_encode ( chunk ) try : chunk ...
A convenient method that binds chunk code headers together
12,494
def write_file ( self , file_path , mime_type = None ) : if not os . path . exists ( file_path ) : raise HTTPError ( 404 ) if not os . path . isfile ( file_path ) : raise HTTPError ( 403 , "%s is not a file" , file_path ) stat_result = os . stat ( file_path ) modified = datetime . datetime . fromtimestamp ( stat_result...
Copy from tornado . web . StaticFileHandler
12,495
def prepare ( self ) : if settings [ 'LOG_REQUEST' ] : log_request ( self ) for i in self . PREPARES : getattr ( self , 'prepare_' + i ) ( ) if self . _finished : return
Behaves like a middleware between raw request and handling process
12,496
def csv_to_dicts ( file , header = None ) : with open ( file ) as csvfile : return [ row for row in csv . DictReader ( csvfile , fieldnames = header ) ]
Reads a csv and returns a List of Dicts with keys given by header row .
12,497
def cli ( config , configfile , verbose ) : config . verbose = verbose config . configfile = configfile if not os . path . isfile ( configfile ) : state . write ( configfile , { } )
A glorious command line tool to make your life with git GitHub and Bugzilla much easier .
12,498
def parse_uri ( self , uri = None ) : if not uri : return rdflib . term . URIRef ( self . root ) elif type ( uri ) == str : if type ( uri ) == str and not uri . startswith ( 'http' ) : return rdflib . term . URIRef ( "%s%s" % ( self . root , uri ) ) else : return rdflib . term . URIRef ( uri ) elif type ( uri ) == rdfl...
parses and cleans up possible uri inputs return instance of rdflib . term . URIRef
12,499
def create_resource ( self , resource_type = None , uri = None ) : if resource_type in [ NonRDFSource , Binary , BasicContainer , DirectContainer , IndirectContainer ] : return resource_type ( self , uri ) else : raise TypeError ( "expecting Resource type, such as BasicContainer or NonRDFSource" )
Convenience method for creating a new resource