idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
51,200
def bincount ( dig , weight , minlength ) : if numpy . isscalar ( weight ) : return numpy . bincount ( dig , minlength = minlength ) * weight else : return numpy . bincount ( dig , weight , minlength )
bincount supporting scalar and vector weight
51,201
def reset_network ( message ) : for command in settings . RESTART_NETWORK : try : subprocess . check_call ( command ) except : pass print ( message )
Resets the users network to make changes take effect
51,202
def improve ( ) : with open ( settings . HOSTS_FILE , "r+" ) as hosts_file : contents = hosts_file . read ( ) if not settings . START_TOKEN in contents and not settings . END_TOKEN in contents : hosts_file . write ( settings . START_TOKEN + "\n" ) for site in set ( settings . DISTRACTORS ) : hosts_file . write ( "{0}\t{1}\n" . format ( settings . REDIRECT_TO , site ) ) for sub_domain in settings . SUB_DOMAINS : hosts_file . write ( "{0}\t{1}.{2}\n" . format ( settings . REDIRECT_TO , sub_domain , site ) ) hosts_file . write ( settings . END_TOKEN + "\n" ) reset_network ( "Concentration is now improved :D!" )
Disables access to websites that are defined as distractors
51,203
def lose ( ) : changed = False with open ( settings . HOSTS_FILE , "r" ) as hosts_file : new_file = [ ] in_block = False for line in hosts_file : if in_block : if line . strip ( ) == settings . END_TOKEN : in_block = False changed = True elif line . strip ( ) == settings . START_TOKEN : in_block = True else : new_file . append ( line ) if changed : with open ( settings . HOSTS_FILE , "w" ) as hosts_file : hosts_file . write ( "" . join ( new_file ) ) reset_network ( "Concentration is now lost :(." )
Enables access to websites that are defined as distractors
51,204
def take_break ( minutes : hug . types . number = 5 ) : print ( "" ) print ( "######################################### ARE YOU SURE? #####################################" ) try : for remaining in range ( 60 , - 1 , - 1 ) : sys . stdout . write ( "\r" ) sys . stdout . write ( "{:2d} seconds to change your mind. Won't you prefer programming? Or a book?" . format ( remaining ) ) sys . stdout . flush ( ) time . sleep ( 1 ) except KeyboardInterrupt : print ( "" ) print ( "" ) print ( ":D :D :D\nGood on you! <3" ) return lose ( ) print ( "" ) print ( "######################################### TAKING A BREAK ####################################" ) try : for remaining in range ( minutes * 60 , - 1 , - 1 ) : sys . stdout . write ( "\r" ) sys . stdout . write ( "{:2d} seconds remaining without concentration." . format ( remaining ) ) sys . stdout . flush ( ) time . sleep ( 1 ) except KeyboardInterrupt : pass finally : sys . stdout . write ( "\rEnough distraction! \n" ) print ( "######################################### BREAK OVER :) #####################################" ) print ( "" ) improve ( )
Enables temporarily breaking concentration
51,205
def _si ( number ) : prefixes = [ '' , 'K' , 'M' , 'G' , 'T' , 'P' , 'E' , 'Z' , 'Y' ] while number > 1024 : number /= 1024.0 prefixes . pop ( 0 ) return '%0.2f%s' % ( number , prefixes . pop ( 0 ) )
Format a number using base - 2 SI prefixes
51,206
def _get_url ( url ) : try : data = HTTP_SESSION . get ( url , stream = True ) data . raise_for_status ( ) except requests . exceptions . RequestException as exc : raise FetcherException ( exc ) return data
Retrieve requested URL
51,207
def _extract_file ( zip_fp , info , path ) : zip_fp . extract ( info . filename , path = path ) out_path = os . path . join ( path , info . filename ) perm = info . external_attr >> 16 perm |= stat . S_IREAD os . chmod ( out_path , perm )
Extract files while explicitly setting the proper permissions
51,208
def build_string ( self ) : return ( ( '-ccov' if self . coverage else '' ) + ( '-fuzzing' if self . fuzzing else '' ) + ( '-asan' if self . asan else '' ) + ( '-valgrind' if self . valgrind else '' ) + ( '-debug' if self . debug else '-opt' ) )
Taskcluster denotes builds in one of two formats - i . e . linux64 - asan or linux64 - asan - opt The latter is generated . If it fails the caller should try the former .
51,209
def auto_name_prefix ( self ) : native_system = std_platform . system ( ) native_machine = self . CPU_ALIASES . get ( std_platform . machine ( ) , std_platform . machine ( ) ) if native_system == self . system and native_machine == self . machine : return '' platform = { 'linux' : 'linux32' , 'android-api-16' : 'android-arm' , 'android-aarch64' : 'android-arm64' , } . get ( self . gecko_platform , self . gecko_platform ) return platform + '-'
Generate platform prefix for cross - platform downloads .
51,210
def iterall ( cls , build , branch , flags , platform = None ) : if platform is None : platform = Platform ( ) target_platform = platform . gecko_platform is_namespace = False if cls . RE_DATE . match ( build ) : task_urls = map ( '' . join , itertools . product ( cls . _pushdate_urls ( build . replace ( '-' , '.' ) , branch , target_platform ) , ( flags . build_string ( ) , ) ) ) elif cls . RE_REV . match ( build ) : task_urls = ( cls . _revision_url ( build . lower ( ) , branch , target_platform ) + flags . build_string ( ) , ) elif build == 'latest' : namespace = 'gecko.v2.mozilla-' + branch + '.latest' product = 'mobile' if 'android' in target_platform else 'firefox' task_urls = ( cls . URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform + flags . build_string ( ) , ) else : task_urls = ( cls . URL_BASE + '/task/' + build , ) is_namespace = True for ( url , try_wo_opt ) in itertools . product ( task_urls , ( False , True ) ) : if try_wo_opt : if '-opt' not in url or is_namespace : continue url = url . replace ( '-opt' , '' ) try : data = HTTP_SESSION . get ( url ) data . raise_for_status ( ) except requests . exceptions . RequestException : continue obj = cls ( None , None , None , _blank = True ) obj . url = url obj . _data = data . json ( ) LOG . debug ( 'Found archive for %s' , cls . _debug_str ( build ) ) yield obj
Generator for all possible BuildTasks with these parameters
51,211
def _pushdate_urls ( cls , pushdate , branch , target_platform ) : url_base = cls . URL_BASE + '/namespaces/gecko.v2.mozilla-' + branch + '.pushdate.' + pushdate try : base = HTTP_SESSION . post ( url_base , json = { } ) base . raise_for_status ( ) except requests . exceptions . RequestException as exc : raise FetcherException ( exc ) product = 'mobile' if 'android' in target_platform else 'firefox' json = base . json ( ) for namespace in sorted ( json [ 'namespaces' ] , key = lambda x : x [ 'name' ] ) : yield cls . URL_BASE + '/task/' + namespace [ 'namespace' ] + '.' + product + '.' + target_platform
Multiple entries exist per push date . Iterate over all until a working entry is found
51,212
def _revision_url ( cls , rev , branch , target_platform ) : namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev product = 'mobile' if 'android' in target_platform else 'firefox' return cls . URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform
Retrieve the URL for revision based builds
51,213
def iterall ( cls , target , branch , build , flags , platform = None ) : flags = BuildFlags ( * flags ) for task in BuildTask . iterall ( build , branch , flags , platform ) : yield cls ( target , branch , task , flags , platform )
Return an iterable for all available builds matching a particular build type
51,214
def _artifacts ( self ) : if '_artifacts' not in self . _memo : json = _get_url ( self . _artifacts_url ) . json ( ) self . _memo [ '_artifacts' ] = json [ 'artifacts' ] return self . _memo [ '_artifacts' ]
Retrieve the artifacts json object
51,215
def _artifact_base ( self ) : if '_artifact_base' not in self . _memo : for artifact in self . _artifacts : if self . re_target . search ( artifact [ 'name' ] ) is not None : artifact_base = os . path . splitext ( artifact [ 'name' ] ) [ 0 ] break else : raise FetcherException ( 'Could not find build info in artifacts' ) self . _memo [ '_artifact_base' ] = artifact_base return self . _memo [ '_artifact_base' ]
Build the artifact basename Builds are base . tar . bz2 info is base . json shell is base . jsshell . zip ...
51,216
def build_info ( self ) : if 'build_info' not in self . _memo : self . _memo [ 'build_info' ] = _get_url ( self . artifact_url ( 'json' ) ) . json ( ) return self . _memo [ 'build_info' ]
Return the build s info
51,217
def moz_info ( self ) : if 'moz_info' not in self . _memo : self . _memo [ 'moz_info' ] = _get_url ( self . artifact_url ( 'mozinfo.json' ) ) . json ( ) return self . _memo [ 'moz_info' ]
Return the build s mozinfo
51,218
def _layout_for_domfuzz ( self , path ) : old_dir = os . getcwd ( ) os . chdir ( os . path . join ( path ) ) try : os . mkdir ( 'dist' ) link_name = os . path . join ( 'dist' , 'bin' ) if self . _platform . system == 'Darwin' and self . _target == 'firefox' : ff_loc = glob . glob ( '*.app/Contents/MacOS/firefox' ) assert len ( ff_loc ) == 1 os . symlink ( os . path . join ( os . pardir , os . path . dirname ( ff_loc [ 0 ] ) ) , link_name ) os . symlink ( os . path . join ( os . pardir , os . pardir , os . pardir , 'symbols' ) , os . path . join ( os . path . dirname ( ff_loc [ 0 ] ) , 'symbols' ) ) elif self . _platform . system == 'Linux' : os . symlink ( os . pardir , link_name ) elif self . _platform . system == 'Windows' : junction_path . symlink ( os . curdir , link_name ) finally : os . chdir ( old_dir )
Update directory to work with DOMFuzz
51,219
def _write_fuzzmanagerconf ( self , path ) : output = configparser . RawConfigParser ( ) output . add_section ( 'Main' ) output . set ( 'Main' , 'platform' , self . moz_info [ 'processor' ] . replace ( '_' , '-' ) ) output . set ( 'Main' , 'product' , 'mozilla-' + self . _branch ) output . set ( 'Main' , 'product_version' , '%.8s-%.12s' % ( self . build_id , self . changeset ) ) os_name = self . moz_info [ 'os' ] . lower ( ) if os_name . startswith ( 'android' ) : output . set ( 'Main' , 'os' , 'android' ) elif os_name . startswith ( 'lin' ) : output . set ( 'Main' , 'os' , 'linux' ) elif os_name . startswith ( 'mac' ) : output . set ( 'Main' , 'os' , 'macosx' ) elif os_name . startswith ( 'win' ) : output . set ( 'Main' , 'os' , 'windows' ) else : output . set ( 'Main' , 'os' , self . moz_info [ 'os' ] ) output . add_section ( 'Metadata' ) output . set ( 'Metadata' , 'pathPrefix' , self . moz_info [ 'topsrcdir' ] ) output . set ( 'Metadata' , 'buildFlags' , self . _flags . build_string ( ) . lstrip ( '-' ) ) if self . _platform . system == "Windows" : fm_name = self . _target + '.exe.fuzzmanagerconf' conf_path = os . path . join ( path , 'dist' , 'bin' , fm_name ) elif self . _platform . system == "Android" : conf_path = os . path . join ( path , 'target.apk.fuzzmanagerconf' ) else : fm_name = self . _target + '.fuzzmanagerconf' conf_path = os . path . join ( path , 'dist' , 'bin' , fm_name ) with open ( conf_path , 'w' ) as conf_fp : output . write ( conf_fp )
Write fuzzmanager config file for selected build
51,220
def extract_zip ( self , suffix , path = '.' ) : zip_fd , zip_fn = tempfile . mkstemp ( prefix = 'fuzzfetch-' , suffix = '.zip' ) os . close ( zip_fd ) try : _download_url ( self . artifact_url ( suffix ) , zip_fn ) LOG . info ( '.. extracting' ) with zipfile . ZipFile ( zip_fn ) as zip_fp : for info in zip_fp . infolist ( ) : _extract_file ( zip_fp , info , path ) finally : os . unlink ( zip_fn )
Download and extract a zip artifact
51,221
def download_apk ( self , path = '.' ) : apk_fd , apk_fn = tempfile . mkstemp ( prefix = 'fuzzfetch-' , suffix = '.apk' ) os . close ( apk_fd ) try : _download_url ( self . artifact_url ( 'apk' ) , apk_fn ) shutil . copy ( apk_fn , os . path . join ( path , 'target.apk' ) ) finally : os . unlink ( apk_fn )
Download Android . apk
51,222
def extract_dmg ( self , path = '.' ) : dmg_fd , dmg_fn = tempfile . mkstemp ( prefix = 'fuzzfetch-' , suffix = '.dmg' ) os . close ( dmg_fd ) out_tmp = tempfile . mkdtemp ( prefix = 'fuzzfetch-' , suffix = '.tmp' ) try : _download_url ( self . artifact_url ( 'dmg' ) , dmg_fn ) if std_platform . system ( ) == 'Darwin' : LOG . info ( '.. extracting' ) subprocess . check_call ( [ 'hdiutil' , 'attach' , '-quiet' , '-mountpoint' , out_tmp , dmg_fn ] ) try : apps = [ mt for mt in os . listdir ( out_tmp ) if mt . endswith ( 'app' ) ] assert len ( apps ) == 1 shutil . copytree ( os . path . join ( out_tmp , apps [ 0 ] ) , os . path . join ( path , apps [ 0 ] ) , symlinks = True ) finally : subprocess . check_call ( [ 'hdiutil' , 'detach' , '-quiet' , out_tmp ] ) else : LOG . warning ( '.. can\'t extract target.dmg on %s' , std_platform . system ( ) ) shutil . copy ( dmg_fn , os . path . join ( path , 'target.dmg' ) ) finally : shutil . rmtree ( out_tmp , onerror = onerror ) os . unlink ( dmg_fn )
Extract builds with . dmg extension
51,223
def main ( cls ) : log_level = logging . INFO log_fmt = '[%(asctime)s] %(message)s' if bool ( os . getenv ( 'DEBUG' ) ) : log_level = logging . DEBUG log_fmt = '%(levelname).1s %(name)s [%(asctime)s] %(message)s' logging . basicConfig ( format = log_fmt , datefmt = '%Y-%m-%d %H:%M:%S' , level = log_level ) logging . getLogger ( 'requests' ) . setLevel ( logging . WARNING ) obj , extract_args = cls . from_args ( ) LOG . info ( 'Identified task: %s' , obj . task_url ) LOG . info ( '> Task ID: %s' , obj . task_id ) LOG . info ( '> Rank: %s' , obj . rank ) LOG . info ( '> Changeset: %s' , obj . changeset ) LOG . info ( '> Build ID: %s' , obj . build_id ) if extract_args [ 'dry_run' ] : return out = extract_args [ 'out' ] os . mkdir ( out ) try : obj . extract_build ( out , tests = extract_args [ 'tests' ] , full_symbols = extract_args [ 'full_symbols' ] ) os . makedirs ( os . path . join ( out , 'download' ) ) with open ( os . path . join ( out , 'download' , 'firefox-temp.txt' ) , 'a' ) as dl_fd : dl_fd . write ( 'buildID=' + obj . build_id + os . linesep ) except : if os . path . isdir ( out ) : junction_path . rmtree ( out ) raise
fuzzfetch main entry point
51,224
def sort_url_qsl ( cls , raw_url , ** kwargs ) : parsed_url = urlparse ( raw_url ) qsl = parse_qsl ( parsed_url . query ) return cls . _join_url ( parsed_url , sorted ( qsl , ** kwargs ) )
Do nothing but sort the params of url .
51,225
def clean_url ( self ) : raw_url = self . request [ 'url' ] parsed_url = urlparse ( raw_url ) qsl = parse_qsl ( parsed_url . query ) for qs in qsl : new_url = self . _join_url ( parsed_url , [ i for i in qsl if i is not qs ] ) new_request = deepcopy ( self . request ) new_request [ 'url' ] = new_url self . _add_task ( 'qsl' , qs , new_request ) return self
Only clean the url params and return self .
51,226
def clean_cookie ( self ) : if not self . is_cookie_necessary : return self headers = self . request . get ( 'headers' , { } ) cookies = SimpleCookie ( headers [ 'Cookie' ] ) for k , v in cookies . items ( ) : new_cookie = '; ' . join ( [ i . OutputString ( ) for i in cookies . values ( ) if i != v ] ) new_request = deepcopy ( self . request ) new_request [ 'headers' ] [ 'Cookie' ] = new_cookie self . _add_task ( 'Cookie' , k , new_request ) return self
Only clean the cookie from headers and return self .
51,227
def reset_new_request ( self ) : raw_url = self . new_request [ 'url' ] parsed_url = urlparse ( raw_url ) qsl = parse_qsl ( parsed_url . query ) new_url = self . _join_url ( parsed_url , [ i for i in qsl if i not in self . ignore [ 'qsl' ] ] ) self . new_request [ 'url' ] = new_url self . logger_function ( 'ignore: %s' % self . ignore ) for key in self . ignore [ 'headers' ] : self . new_request [ 'headers' ] . pop ( key ) if not self . new_request . get ( 'headers' ) : self . new_request . pop ( 'headers' , None ) if self . ignore [ 'Cookie' ] and 'Cookie' not in self . ignore [ 'headers' ] : headers = self . new_request [ 'headers' ] headers = { key . title ( ) : headers [ key ] for key in headers } if 'Cookie' in headers : cookies = SimpleCookie ( headers [ 'Cookie' ] ) new_cookie = '; ' . join ( [ i [ 1 ] . OutputString ( ) for i in cookies . items ( ) if i [ 0 ] not in self . ignore [ 'Cookie' ] ] ) self . new_request [ 'headers' ] [ 'Cookie' ] = new_cookie if self . new_request [ 'method' ] == 'post' : data = self . new_request . get ( 'data' ) if data : if isinstance ( data , dict ) : for key in self . ignore [ 'form_data' ] : data . pop ( key ) if ( not data ) or self . ignore [ 'total_data' ] : self . new_request . pop ( 'data' , None ) if self . has_json_data and 'data' in self . new_request : json_data = json . loads ( data . decode ( self . encoding ) ) for key in self . ignore [ 'json_data' ] : json_data . pop ( key ) self . new_request [ 'data' ] = json . dumps ( json_data ) . encode ( self . encoding ) return self . new_request
Remove the non - sense args from the self . ignore return self . new_request
51,228
def result ( self ) : if not self . tasks : self . clean_all ( ) tasks_length = len ( self . tasks ) self . logger_function ( '%s tasks of request, will cost at least %s seconds.' % ( tasks_length , round ( self . req . interval / self . req . n * tasks_length , 2 ) ) ) self . req . x for task in self . tasks : key , value , fut = task if fut . x and fut . cx : self . ignore [ key ] . append ( value ) return self . reset_new_request ( )
Whole task clean_all + reset_new_request return self . new_request .
51,229
def main ( ) : parser = get_parser ( ) subparsers = get_subparsers ( parser ) def help ( return_code = 0 ) : version = helpme . __version__ bot . custom ( message = 'Command Line Tool v%s' % version , prefix = '\n[HelpMe] ' , color = 'CYAN' ) parser . print_help ( ) sys . exit ( return_code ) if len ( sys . argv ) == 1 : help ( ) try : args , unknown = parser . parse_known_args ( ) except : sys . exit ( 0 ) extras = None if args . command in HELPME_HELPERS and len ( unknown ) > 0 : extras = unknown if args . debug is False : os . environ [ 'MESSAGELEVEL' ] = "INFO" if args . version is True : print ( helpme . __version__ ) sys . exit ( 0 ) if args . command == "config" : from . config import main if args . command == "list" : from . list import main if args . command in HELPME_HELPERS : from . help import main return_code = 0 try : main ( args , extras ) sys . exit ( return_code ) except UnboundLocalError : return_code = 1 help ( return_code )
the main entry point for the HelpMe Command line application . Currently the user can request help or set config values for a particular helper .
51,230
def saveOverlayToDicomCopy ( input_dcmfilelist , output_dicom_dir , overlays , crinfo , orig_shape ) : from . import datawriter as dwriter if not os . path . exists ( output_dicom_dir ) : os . makedirs ( output_dicom_dir ) import imtools . image_manipulation for key in overlays : overlays [ key ] = imtools . image_manipulation . uncrop ( overlays [ key ] , crinfo , orig_shape ) dw = dwriter . DataWriter ( ) dw . DataCopyWithOverlay ( input_dcmfilelist , output_dicom_dir , overlays )
Save overlay to dicom .
51,231
def __get_segmentation_path ( self , path ) : startpath , ext = os . path . splitext ( path ) segmentation_path = startpath + "_segmentation" + ext return segmentation_path
Create path with _segmentation suffix and keep extension .
51,232
def add_overlay_to_slice_file ( self , filename , overlay , i_overlay , filename_out = None ) : if filename_out is None : filename_out = filename filename = op . expanduser ( filename ) data = dicom . read_file ( filename ) data = self . encode_overlay_slice ( data , overlay , i_overlay ) data . save_as ( filename_out )
Function adds overlay to existing file .
51,233
def deprecated ( instructions ) : def decorator ( func ) : @ functools . wraps ( func ) def wrapper ( * args , ** kwargs ) : message = 'Call to deprecated function {}. {}' . format ( func . __name__ , instructions ) frame = inspect . currentframe ( ) . f_back warnings . warn_explicit ( message , category = DeprecatedWarning , filename = inspect . getfile ( frame . f_code ) , lineno = frame . f_lineno ) return func ( * args , ** kwargs ) return wrapper return decorator
Flags a method as deprecated .
51,234
def log_verbose ( self , message ) : if self . get_verbosity ( ) >= Output . VERBOSITY_VERBOSE : self . writeln ( message )
Logs a message only when logging level is verbose .
51,235
def log_very_verbose ( self , message ) : if self . get_verbosity ( ) >= Output . VERBOSITY_VERY_VERBOSE : self . writeln ( message )
Logs a message only when logging level is very verbose .
51,236
async def get_feed ( config , url ) : LOGGER . debug ( "++WAIT: cache get feed %s" , url ) previous = config . cache . get ( 'feed' , url , schema_version = SCHEMA_VERSION ) if config . cache else None LOGGER . debug ( "++DONE: cache get feed %s" , url ) headers = previous . caching if previous else None LOGGER . debug ( "++WAIT: request get %s" , url ) request = await utils . retry_get ( config , url , headers = headers ) LOGGER . debug ( "++DONE: request get %s" , url ) if not request or not request . success : LOGGER . error ( "Could not get feed %s: %d" , url , request . status if request else - 1 ) return None , previous , False if request . cached : LOGGER . debug ( "%s: Reusing cached version" , url ) return previous , previous , False current = Feed ( request ) if config . cache : LOGGER . debug ( "%s: Saving to cache" , url ) LOGGER . debug ( "++WAIT: cache set feed %s" , url ) config . cache . set ( 'feed' , url , current ) LOGGER . debug ( "++DONE: cache set feed %s" , url ) LOGGER . debug ( "%s: Returning new content" , url ) return current , previous , ( not previous or current . digest != previous . digest or current . status != previous . status )
Get a feed
51,237
def archive_namespace ( self ) : try : for ns_prefix , url in self . feed . namespaces . items ( ) : if url == 'http://purl.org/syndication/history/1.0' : return ns_prefix except AttributeError : pass return None
Returns the known namespace of the RFC5005 extension if any
51,238
def is_archive ( self ) : ns_prefix = self . archive_namespace if ns_prefix : if ns_prefix + '_archive' in self . feed . feed : return True if ns_prefix + '_current' in self . feed . feed : return False rels = collections . defaultdict ( list ) for link in self . feed . feed . links : rels [ link . rel ] . append ( link . href ) return ( 'current' in rels and ( 'self' not in rels or rels [ 'self' ] != rels [ 'current' ] ) )
Given a parsed feed returns True if this is an archive feed
51,239
async def update_websub ( self , config , hub ) : try : LOGGER . debug ( "WebSub: Notifying %s of %s" , hub , self . url ) request = await utils . retry_post ( config , hub , data = { 'hub.mode' : 'publish' , 'hub.url' : self . url } ) if request . success : LOGGER . info ( "%s: WebSub notification sent to %s" , self . url , hub ) else : LOGGER . warning ( "%s: Hub %s returned status code %s: %s" , self . url , hub , request . status , request . text ) except Exception as err : LOGGER . warning ( "WebSub %s: got %s: %s" , hub , err . __class__ . __name__ , err )
Update WebSub hub to know about this feed
51,240
def get_tags ( self , name ) : tags = list ( ) for tag in self . _tags : if tag [ 0 ] == name : tags . append ( tag [ 1 ] ) return tags
Returns a list of tags .
51,241
def __clean_doc_block ( self ) : if not self . _comment : return for i in range ( 1 , len ( self . _comment ) - 1 ) : self . _comment [ i ] = re . sub ( r'^\s*\*' , '' , self . _comment [ i ] ) self . _comment [ 0 ] = re . sub ( r'^\s*/\*\*' , '' , self . _comment [ 0 ] ) self . _comment [ - 1 ] = re . sub ( r'\*/\s*$' , '' , self . _comment [ - 1 ] ) for i , line in enumerate ( self . _comment ) : self . _comment [ i ] = line . strip ( ) self . _comment = self . __remove_leading_empty_lines ( self . _comment ) self . _comment = self . __remove_trailing_empty_lines ( self . _comment )
Cleans the DocBlock from leading and trailing white space and comment tokens .
51,242
def __extract_description ( self ) : tmp = list ( ) for line in self . _comment : if len ( line ) >= 1 and line [ 0 ] == '@' : break tmp . append ( line ) tmp = self . __remove_trailing_empty_lines ( tmp ) self . _description = os . linesep . join ( tmp )
Extracts the description from the DocBlock . The description start at the first line and stops at the first tag or the end of the DocBlock .
51,243
def __extract_tags ( self ) : tags = list ( ) current = None for line in self . _comment : parts = re . match ( r'^@(\w+)' , line ) if parts : current = ( parts . group ( 1 ) , list ( ) ) tags . append ( current ) if current : if line == '' : current = None else : current [ 1 ] . append ( line ) for tag in tags : self . _tags . append ( ( tag [ 0 ] , os . linesep . join ( tag [ 1 ] ) ) )
Extract tags from the DocBlock .
51,244
def handle ( self ) : self . output = PyStratumStyle ( self . input , self . output ) config_file = self . input . get_argument ( 'config_file' ) self . run_command ( config_file )
Executes constants command when PyStratumCommand is activated .
51,245
def _format_key_name ( self ) -> str : key_name = 'ld:{0}:{1}:features' . format ( self . project_key , self . environment_key ) return key_name
Return formatted redis key name .
51,246
def connection_string_parser ( uri : str ) -> list : redis_connections = [ ] raw_connections = uri . split ( ',' ) connections = [ connection for connection in raw_connections if len ( connection ) > 0 ] for connection in connections : raw_connection = connection . split ( ':' ) if len ( raw_connection ) == 1 : host = raw_connection [ 0 ] . strip ( ) port = _DEFAULT_REDIS_PORT elif len ( raw_connection ) == 2 : host = raw_connection [ 0 ] . strip ( ) port = int ( raw_connection [ 1 ] ) else : raise RuntimeError ( "Unable to parse redis connection string: {0}" . format ( raw_connection ) ) redis_connection = _RedisConnection ( host , port ) redis_connections . append ( redis_connection ) return redis_connections
Parse Connection string to extract host and port .
51,247
def get_flag_record ( self , feature_key : str ) -> str : key_name = self . _format_key_name ( ) flag = self . redis . hget ( key_name , feature_key ) if flag is None : raise KeyError ( 'Redis key: {0} not found.' . format ( key_name ) ) return flag
Get feature flag record from redis .
51,248
def update_flag_record ( self , state : str , feature_key : str ) -> None : key_name = self . _format_key_name ( ) try : parsed_flag = json . loads ( self . get_flag_record ( feature_key ) . decode ( 'utf-8' ) ) parsed_flag [ 'on' ] = state parsed_flag [ 'version' ] += 1 updated_flag = json . dumps ( parsed_flag ) . encode ( 'utf-8' ) except KeyError as ex : LOG . error ( ex ) sys . exit ( 1 ) LOG . info ( 'updating %s to %s' , feature_key , state ) self . redis . hset ( key_name , feature_key , updated_flag )
Update redis record with new state .
51,249
def Asyncme ( func , n = None , interval = 0 , default_callback = None , loop = None ) : return coros ( n , interval , default_callback , loop ) ( func )
Wrap coro_function into the function return NewTask .
51,250
def coros ( n = None , interval = 0 , default_callback = None , loop = None ) : submitter = Loop ( n = n , interval = interval , default_callback = default_callback , loop = loop ) . submitter return submitter
Decorator for wrap coro_function into the function return NewTask .
51,251
def wrap_callback ( function ) : @ wraps ( function ) def wrapped ( task ) : task . _callback_result = function ( task ) return task . _callback_result return wrapped
Set the callback s result as self . _callback_result .
51,252
def callback_result ( self ) : if self . _state == self . _PENDING : self . _loop . run_until_complete ( self ) if self . _callbacks : result = self . _callback_result else : result = self . result ( ) return result
Blocking until the task finish and return the callback_result . until
51,253
def run_coroutine_threadsafe ( self , coro , loop = None , callback = None ) : if not asyncio . iscoroutine ( coro ) : raise TypeError ( "A await in coroutines. object is required" ) loop = loop or self . loop future = NewFuture ( callback = callback ) def callback_func ( ) : try : asyncio . futures . _chain_future ( NewTask ( coro , loop = loop ) , future ) except Exception as exc : if future . set_running_or_notify_cancel ( ) : future . set_exception ( exc ) raise loop . call_soon_threadsafe ( callback_func ) return future
Be used when loop running in a single non - main thread .
51,254
def submit ( self , coro , callback = None ) : callback = callback or self . default_callback if self . async_running : return self . run_coroutine_threadsafe ( coro , callback = callback ) else : return NewTask ( coro , loop = self . loop , callback = callback )
Submit a coro as NewTask to self . loop without loop . frequncy control .
51,255
def submitter ( self , f ) : f = self . _wrap_coro_function_with_sem ( f ) @ wraps ( f ) def wrapped ( * args , ** kwargs ) : return self . submit ( f ( * args , ** kwargs ) ) return wrapped
Decorator to submit a coro - function as NewTask to self . loop with sem control . Use default_callback frequency of loop .
51,256
def todo_tasks ( self ) : tasks = [ task for task in self . all_tasks if task . _state == NewTask . _PENDING ] return tasks
Return tasks in loop which its state is pending .
51,257
def done_tasks ( self ) : tasks = [ task for task in self . all_tasks if task . _state != NewTask . _PENDING ] return tasks
Return tasks in loop which its state is not pending .
51,258
def run ( self , tasks = None , timeout = None ) : timeout = self . _timeout if timeout is None else timeout if self . async_running or self . loop . is_running ( ) : return self . wait_all_tasks_done ( timeout ) else : tasks = tasks or self . todo_tasks return self . loop . run_until_complete ( asyncio . gather ( * tasks , loop = self . loop ) )
Block run loop until all tasks completed .
51,259
def wait_all_tasks_done ( self , timeout = None , delay = 0.5 , interval = 0.1 ) : timeout = self . _timeout if timeout is None else timeout timeout = timeout or float ( "inf" ) start_time = time . time ( ) time . sleep ( delay ) while 1 : if not self . todo_tasks : return self . all_tasks if time . time ( ) - start_time > timeout : return self . done_tasks time . sleep ( interval )
Block only be used while loop running in a single non - main thread .
51,260
def ensure_frequencies ( self , frequencies ) : if not frequencies : return { } if not isinstance ( frequencies , dict ) : raise ValueError ( "frequencies should be dict" ) frequencies = { host : Frequency . ensure_frequency ( frequencies [ host ] ) for host in frequencies } return frequencies
Ensure frequencies is dict of host - frequencies .
51,261
def set_frequency ( self , host , sem = None , interval = None ) : sem = sem or self . sem interval = self . interval if interval is None else interval frequency = Frequency ( sem , interval , host ) frequencies = { host : frequency } self . update_frequency ( frequencies ) return frequency
Set frequency for host with sem and interval .
51,262
def openstack_exception ( func ) : async def wrap ( * args , ** kwargs ) : try : return await func ( * args , ** kwargs ) except Exception as e : logging . error ( e ) raise IaasException return wrap
Openstack exceptions decorator
51,263
def initialize_openstack ( func ) : async def wrap ( self , * args , ** kwargs ) : if not hasattr ( self , 'auth' ) or not self . auth . is_token_valid ( ) : self . auth = AuthPassword ( auth_url = self . config [ 'auth_url' ] , username = self . config [ 'username' ] , password = self . config [ 'password' ] , project_name = self . config [ 'project_name' ] , user_domain_name = self . config [ 'user_domain_name' ] , project_domain_name = self . config [ 'project_domain_name' ] ) self . nova = NovaClient ( session = self . auth ) self . glance = GlanceClient ( session = self . auth ) await self . nova . init_api ( timeout = self . config . get ( 'http_timeout' , 10 ) ) await self . glance . init_api ( timeout = self . config . get ( 'http_timeout' , 10 ) ) if not hasattr ( self , 'last_init' ) or self . last_init < ( time . time ( ) - 60 ) : await self . initialize ( ) self . last_init = time . time ( ) return await func ( self , * args , ** kwargs ) return wrap
Initialize and refresh openstack connection
51,264
async def initialize ( self ) : flavors = await self . _list_flavors ( ) images = await self . _list_images ( ) self . flavors_map = bidict ( ) self . images_map = bidict ( ) self . images_details = { } for flavor in flavors : self . flavors_map . put ( flavor [ 'id' ] , flavor [ 'name' ] , on_dup_key = 'OVERWRITE' , on_dup_val = 'OVERWRITE' ) for image in images : self . images_details [ image [ 'id' ] ] = { 'name' : image [ 'name' ] , 'created_at' : image [ 'created_at' ] , 'latest' : 'latest' in image [ 'tags' ] } self . images_map . put ( image [ 'id' ] , image [ 'name' ] , on_dup_key = 'OVERWRITE' , on_dup_val = 'OVERWRITE' )
Initialize static data like images and flavores and set it as object property
51,265
def _set_label_text ( obj , text , tooltip = None , replace_all = False ) : dlab = str ( obj . text ( ) ) index_of_colon = dlab . find ( ': ' ) if index_of_colon == - 1 : index_of_colon = 0 else : index_of_colon += 2 if replace_all : index_of_colon = 0 obj . setText ( dlab [ : index_of_colon ] + '%s' % text ) if tooltip is not None : obj . setToolTip ( tooltip )
Keep text before first colon and replace the rest with new text .
51,266
def main ( self , config_filename ) : self . _read_configuration_file ( config_filename ) if self . _wrapper_class_name : self . _io . title ( 'Wrapper' ) self . __generate_wrapper_class ( ) else : self . _io . log_verbose ( 'Wrapper not enabled' ) return 0
The main of the wrapper generator . Returns 0 on success 1 if one or more errors occurred .
51,267
def __generate_wrapper_class ( self ) : routines = self . _read_routine_metadata ( ) self . _write_class_header ( ) if routines : for routine_name in sorted ( routines ) : if routines [ routine_name ] [ 'designation' ] != 'hidden' : self . _write_routine_function ( routines [ routine_name ] ) else : self . _io . error ( 'No files with stored routines found' ) self . _write_class_trailer ( ) Util . write_two_phases ( self . _wrapper_filename , self . _code , self . _io )
Generates the wrapper class .
51,268
def _read_routine_metadata ( self ) : metadata = { } if os . path . isfile ( self . _metadata_filename ) : with open ( self . _metadata_filename , 'r' ) as file : metadata = json . load ( file ) return metadata
Returns the metadata of stored routines .
51,269
def _write_class_header ( self ) : self . _write_line ( 'from {0!s} import {1!s}' . format ( self . _parent_class_namespace , self . _parent_class_name ) ) self . _write_line ( ) self . _write_line ( ) self . _write_line ( '# ' + ( '-' * 118 ) ) self . _write_line ( 'class {0!s}({1!s}):' . format ( self . _wrapper_class_name , self . _parent_class_name ) ) self . _write_line ( ' ' )
Generate a class header for stored routine wrapper .
51,270
def _write_line ( self , text = '' ) : if text : self . _code += str ( text ) + "\n" else : self . _code += "\n"
Writes a line with Python code to the generate code buffer .
51,271
def Async ( f , n = None , timeout = None ) : return threads ( n = n , timeout = timeout ) ( f )
Concise usage for pool . submit .
51,272
def get_results_generator ( future_list , timeout = None , sort_by_completed = False ) : try : if sort_by_completed : for future in as_completed ( future_list , timeout = timeout ) : yield future . x else : for future in future_list : yield future . x except TimeoutError : return
Return as a generator of tasks order by completed sequence .
51,273
def run_after_async ( seconds , func , * args , ** kwargs ) : t = Timer ( seconds , func , args , kwargs ) t . daemon = True t . start ( ) return t
Run the function after seconds asynchronously .
51,274
def async_func ( self , function ) : @ wraps ( function ) def wrapped ( * args , ** kwargs ) : return self . submit ( function , * args , ** kwargs ) return wrapped
Decorator for let a normal function return the NewFuture
51,275
def _get_cpu_count ( self ) : try : from multiprocessing import cpu_count return cpu_count ( ) except Exception as e : Config . main_logger . error ( "_get_cpu_count failed for %s" % e )
Get the cpu count .
51,276
def _invoke_callbacks ( self ) : self . task_end_time = time . time ( ) self . task_cost_time = self . task_end_time - self . task_start_time with self . _condition : for callback in self . _done_callbacks : try : result = callback ( self ) if callback in self . _user_callbacks : self . _callback_result = result except Exception as e : Config . main_logger . error ( "exception calling callback for %s" % e ) self . _condition . notify_all ( )
Record the task_end_time & task_cost_time set result for self . _callback_result .
51,277
def callback_result ( self ) : if self . _state in [ PENDING , RUNNING ] : self . x if self . _user_callbacks : return self . _callback_result else : return self . x
Block the main thead until future finish return the future . callback_result .
51,278
def close ( self , wait = False ) : self . session . close ( ) self . pool . shutdown ( wait = wait )
Close session shutdown pool .
51,279
def request ( self , method , url , callback = None , retry = 0 , ** kwargs ) : return self . pool . submit ( self . _request , method = method , url = url , retry = retry , callback = callback or self . default_callback , ** kwargs )
Similar to requests . request but return as NewFuture .
51,280
def get ( self , url , params = None , callback = None , retry = 0 , ** kwargs ) : kwargs . setdefault ( "allow_redirects" , True ) return self . request ( "get" , url = url , params = params , callback = callback , retry = retry , ** kwargs )
Similar to requests . get but return as NewFuture .
51,281
def post ( self , url , data = None , json = None , callback = None , retry = 0 , ** kwargs ) : return self . request ( "post" , url = url , data = data , json = json , callback = callback , retry = retry , ** kwargs )
Similar to requests . post but return as NewFuture .
51,282
def head ( self , url , callback = None , retry = 0 , ** kwargs ) : kwargs . setdefault ( "allow_redirects" , False ) return self . request ( "head" , url = url , callback = callback , retry = retry , ** kwargs )
Similar to requests . head but return as NewFuture .
51,283
def options ( self , url , callback = None , retry = 0 , ** kwargs ) : kwargs . setdefault ( "allow_redirects" , True ) return self . request ( "options" , url = url , callback = callback , retry = retry , ** kwargs )
Similar to requests . options but return as NewFuture .
51,284
def getLinkInfo ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getLinkInfo" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetInfo" , timeout = timeout ) return WanLinkInfo ( results )
Execute GetInfo action to get basic WAN link information s .
51,285
def getLinkProperties ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getLinkProperties" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetCommonLinkProperties" , timeout = timeout ) return WanLinkProperties ( results )
Execute GetCommonLinkProperties action to get WAN link properties .
51,286
def getADSLInfo ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getADSLInfo" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetInfo" , timeout = timeout ) return ADSLInfo ( results )
Execute GetInfo action to get basic ADSL information s .
51,287
def getEthernetLinkStatus ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getEthernetLinkStatus" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetEthernetLinkStatus" , timeout = timeout ) return results [ "NewEthernetLinkStatus" ]
Execute GetEthernetLinkStatus action to get the status of the ethernet link .
51,288
def getByteStatistic ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getByteStatistic" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetTotalBytesSent" , timeout = timeout ) results2 = self . execute ( uri , namespace , "GetTotalBytesReceived" , timeout = timeout ) return [ int ( results [ "NewTotalBytesSent" ] ) , int ( results2 [ "NewTotalBytesReceived" ] ) ]
Execute GetTotalBytesSent&GetTotalBytesReceived actions to get WAN statistics .
51,289
def getConnectionInfo ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "getConnectionInfo" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) results = self . execute ( uri , namespace , "GetInfo" , timeout = timeout ) return ConnectionInfo ( results )
Execute GetInfo action to get WAN connection information s .
51,290
def setEnable ( self , status , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "setEnable" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) if status : setStatus = 1 else : setStatus = 0 self . execute ( uri , namespace , "SetEnable" , timeout = timeout , NewEnable = setStatus )
Set enable status for a WAN interface be careful you don t cut yourself off .
51,291
def requestConnection ( self , wanInterfaceId = 1 , timeout = 1 ) : namespace = Wan . getServiceType ( "requestConnection" ) + str ( wanInterfaceId ) uri = self . getControlURL ( namespace ) self . execute ( uri , namespace , "RequestConnection" , timeout = timeout )
Request the connection to be established
51,292
def write_config ( filename , config , mode = "w" ) : with open ( filename , mode ) as filey : config . write ( filey ) return filename
use configparser to write a config object to filename
51,293
def copyfile ( source , destination , force = True ) : if os . path . exists ( destination ) and force is True : os . remove ( destination ) shutil . copyfile ( source , destination ) return destination
copy a file from a source to its destination .
51,294
def full_inventory ( self ) : if self . _full_inventory : return self . _full_inventory resp , inventory = self . get ( 'Inventory' ) keys = [ 'host_id' , 'hostname' , 'ip_address' , 'chassis' , 'used_count' , 'current_state' , 'comment' , 'distro' , 'rel' , 'centos_version' , 'architecture' , 'node_pool' , 'console_port' , 'flavor' ] real_inventory = dict ( ) for host in inventory : real_inventory [ host [ 1 ] ] = dict ( ) for key in keys : real_inventory [ host [ 1 ] ] [ key ] = host [ keys . index ( key ) ] self . _full_inventory = real_inventory return self . _full_inventory
Returns a full inventory Some additional work required to provide consistent and consumable output . Inventory output only contains values no keys - Add the keys to the output so that it can be consumed more easily .
51,295
def self_inventory ( self ) : if self . api_key is None : return { } if self . _self_inventory : return self . _self_inventory resp , self_inventory = self . get ( 'Inventory?key=%s' % self . api_key ) real_self_inventory = dict ( ) for host in self_inventory : real_self_inventory [ host [ 0 ] ] = self . full_inventory [ host [ 0 ] ] self . _self_inventory = real_self_inventory return self . _self_inventory
Inventory output will only contain the server name and the session ID when a key is provided . Provide the same format as with the full inventory instead for consistency .
51,296
def _ssid_inventory ( self , inventory , ssid ) : matching_hosts = { } for host in inventory : if inventory [ host ] [ 'comment' ] == ssid : matching_hosts [ host ] = inventory [ host ] return matching_hosts
Filters an inventory to only return servers matching ssid
51,297
def inventory ( self , all = False , ssid = None ) : if all or self . api_key is None : if ssid is not None : return self . _ssid_inventory ( self . full_inventory , ssid ) else : return self . full_inventory else : if ssid is not None : return self . _ssid_inventory ( self . self_inventory , ssid ) else : return self . self_inventory
Returns a node inventory . If an API key is specified only the nodes provisioned by this key will be returned .
51,298
def node_get ( self , arch = None , ver = None , flavor = None , count = 1 , retry_count = 1 , retry_interval = 10 ) : if self . api_key is None : raise exceptions . ApiKeyRequired args = "key=%s" % self . api_key if arch is not None : args += "&arch=%s" % arch if ver is not None : args += "&ver=%s" % ver if flavor is not None : args += "&flavor=%s" % flavor args += "&count=%s" % count resp , body = self . get ( 'Node/get?%s' % args ) if not body : for _ in range ( retry_count ) : time . sleep ( retry_interval ) resp , body = self . get ( 'Node/get?%s' % args ) if body : break if not body : raise exceptions . NoInventory requested_hosts = dict ( ) for host in self . full_inventory : for full_host in body [ 'hosts' ] : if host in full_host : requested_hosts [ host ] = self . full_inventory [ host ] return requested_hosts , body [ 'ssid' ]
Requests specified number of nodes with the provided parameters .
51,299
def node_done ( self , ssid = None ) : if self . api_key is None : raise exceptions . ApiKeyRequired if ssid is None : raise exceptions . SsidRequired requested_hosts = dict ( ) for host in self . self_inventory : if ssid == self . self_inventory [ host ] [ 'comment' ] : requested_hosts [ host ] = self . full_inventory [ host ] args = "key={key}&ssid={ssid}" . format ( key = self . api_key , ssid = ssid ) resp , body = self . get ( 'Node/done?%s' % args ) return requested_hosts
Release the servers for the specified ssid . The API doesn t provide any kind of output try to be helpful by providing the list of servers to be released .