idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
14,500
def subscribe ( self , topic = b'' ) : self . sockets [ zmq . SUB ] . setsockopt ( zmq . SUBSCRIBE , topic ) poller = self . pollers [ zmq . SUB ] return poller
subscribe to the SUB socket to listen for incomming variables return a stream that can be listened to .
14,501
def download ( remote_location , remotes = None , prefix = "" , dry_run = False ) : if remotes is None : remotes , _ = _resources_files ( abs_paths = remote_location . startswith ( 's3://' ) ) if remote_location . startswith ( 's3://' ) : from . s3 import S3Backend backend = S3Backend ( remote_location , dry_run = dry_run ) backend . download ( list_local ( remotes , prefix ) , prefix ) else : dest_root = '.' shell_command ( [ '/usr/bin/rsync' , '-thrRvz' , '--rsync-path' , '/usr/bin/rsync' , '%s/./' % remote_location , dest_root ] , dry_run = dry_run )
Download resources from a stage server .
14,502
def upload ( remote_location , remotes = None , ignores = None , static_root = "/static/" , prefix = "" , dry_run = False ) : if remotes is None : remotes , ignores = _resources_files ( abs_paths = remote_location . startswith ( 's3://' ) ) if remote_location . startswith ( 's3://' ) : from deployutils . s3 import S3Backend backend = S3Backend ( remote_location , static_root = static_root , dry_run = dry_run ) backend . upload ( list_local ( remotes , prefix ) , prefix ) else : excludes = [ ] if ignores : for ignore in ignores : excludes += [ '--exclude' , ignore ] shell_command ( [ '/usr/bin/rsync' ] + excludes + [ '-pOthrRvz' , '--rsync-path' , '/usr/bin/rsync' ] + remotes + [ remote_location ] , dry_run = dry_run )
Upload resources to a stage server .
14,503
def json_dumps ( obj ) : try : return json . dumps ( obj , indent = 2 , sort_keys = True , allow_nan = False ) except ValueError : pass json_str = json . dumps ( obj , indent = 2 , sort_keys = True , allow_nan = True ) json_obj = json . loads ( json_str ) def do_map ( obj ) : if obj is None : return None if isinstance ( obj , basestring ) : return obj if isinstance ( obj , dict ) : res = { } for ( key , value ) in obj . items ( ) : res [ key ] = do_map ( value ) return res if isinstance ( obj , collections . Iterable ) : res = [ ] for el in obj : res . append ( do_map ( el ) ) return res if math . isnan ( obj ) : return "NaN" if math . isinf ( obj ) : return "Infinity" if obj > 0 else "-Infinity" return obj return json . dumps ( do_map ( json_obj ) , indent = 2 , sort_keys = True , allow_nan = False )
A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer .
14,504
def msg ( message , * args , ** kwargs ) : global log_file if log_file is None : log_file = sys . stderr if long_msg : file_name , line = caller_trace ( ) file_name , file_type = os . path . splitext ( file_name ) if file_name . endswith ( '/__init__' ) : file_name = os . path . basename ( os . path . dirname ( file_name ) ) elif file_name . endswith ( '/__main__' ) : file_name = "(-m) {0}" . format ( os . path . basename ( os . path . dirname ( file_name ) ) ) else : file_name = os . path . basename ( file_name ) head = '{0}{1} ({2}): ' . format ( file_name , file_type , line ) else : head = '[SERVER] ' out = StringIO ( ) for line in message . format ( * args , ** kwargs ) . split ( '\n' ) : out . write ( '{0}{1}\n' . format ( head , line ) ) out . flush ( ) out . seek ( 0 ) if _msg_stderr : sys . stderr . write ( out . read ( ) ) sys . stderr . flush ( ) else : log_file . write ( out . read ( ) ) log_file . flush ( ) out . close ( )
Prints a message from the server to the log file .
14,505
def setup_restart ( ) : exit_code = os . environ . get ( 'QUICK_SERVER_RESTART' , None ) if exit_code is None : try : atexit . unregister ( _on_exit ) except AttributeError : atexit . _exithandlers = filter ( lambda exit_hnd : exit_hnd [ 0 ] != _on_exit , atexit . _exithandlers ) _start_restart_loop ( None , in_atexit = False )
Sets up restart functionality that doesn t keep the first process alive . The function needs to be called before the actual process starts but after loading the program . It will restart the program in a child process and immediately returns in the child process . The call in the parent process never returns . Calling this function is not necessary for using restart functionality but avoids potential errors originating from rogue threads .
14,506
def convert_argmap ( self , query ) : res = { } if isinstance ( query , bytes ) : query = query . decode ( 'utf8' ) for section in query . split ( '&' ) : eqs = section . split ( '=' , 1 ) name = urlparse_unquote ( eqs [ 0 ] ) if len ( eqs ) > 1 : res [ name ] = urlparse_unquote ( eqs [ 1 ] ) else : res [ name ] = True return res
Converts the query string of an URL to a map .
14,507
def convert_args ( self , rem_path , args ) : fragment_split = rem_path . split ( '#' , 1 ) query_split = fragment_split [ 0 ] . split ( '?' , 1 ) segs = filter ( lambda p : len ( p ) and p != '.' , os . path . normpath ( query_split [ 0 ] ) . split ( '/' ) ) paths = [ urlparse_unquote ( p ) for p in segs ] query = self . convert_argmap ( query_split [ 1 ] ) if len ( query_split ) > 1 else { } args [ 'paths' ] = paths args [ 'query' ] = query args [ 'fragment' ] = urlparse_unquote ( fragment_split [ 1 ] ) . decode ( 'utf8' ) if len ( fragment_split ) > 1 else '' return args
Splits the rest of a URL into its argument parts . The URL is assumed to start with the dynamic request prefix already removed .
14,508
def handle_special ( self , send_body , method_str ) : ongoing = True if self . server . report_slow_requests : path = self . path def do_report ( ) : if not ongoing : return msg ( "request takes longer than expected: \"{0} {1}\"" , method_str , path ) alarm = threading . Timer ( 5.0 , do_report ) alarm . start ( ) else : alarm = None try : return self . _handle_special ( send_body , method_str ) finally : if alarm is not None : alarm . cancel ( ) ongoing = False
Handles a dynamic request . If this method returns False the request is interpreted as static file request . Methods can be registered using the add_TYPE_METHOD_mask methods of QuickServer .
14,509
def check_cache ( self , e_tag , match ) : if e_tag != match : return False self . send_response ( 304 ) self . send_header ( "ETag" , e_tag ) self . send_header ( "Cache-Control" , "max-age={0}" . format ( self . server . max_age ) ) self . end_headers ( ) thread_local . size = 0 return True
Checks the ETag and sends a cache match response if it matches .
14,510
def handle_error ( self ) : if self . server . can_ignore_error ( self ) : return if thread_local . status_code is None : msg ( "ERROR: Cannot send error status code! " + "Header already sent!\n{0}" , traceback . format_exc ( ) ) else : msg ( "ERROR: Error while processing request:\n{0}" , traceback . format_exc ( ) ) try : self . send_error ( 500 , "Internal Error" ) except : if self . server . can_ignore_error ( self ) : return msg ( "ERROR: Cannot send error status code:\n{0}" , traceback . format_exc ( ) )
Tries to send an 500 error after encountering an exception .
14,511
def cross_origin_headers ( self ) : if not self . is_cross_origin ( ) : return False self . send_header ( "Access-Control-Allow-Methods" , "GET, POST, PUT, DELETE, HEAD" ) allow_headers = _getheader ( self . headers , 'access-control-request-headers' ) if allow_headers is not None : self . send_header ( "Access-Control-Allow-Headers" , allow_headers ) self . send_header ( "Access-Control-Allow-Origin" , "*" ) self . send_header ( "Access-Control-Allow-Credentials" , "true" ) return allow_headers is not None
Sends cross origin headers .
14,512
def do_OPTIONS ( self ) : thread_local . clock_start = get_time ( ) thread_local . status_code = 200 thread_local . message = None thread_local . headers = [ ] thread_local . end_headers = [ ] thread_local . size = - 1 thread_local . method = 'OPTIONS' self . send_response ( 200 ) if self . is_cross_origin ( ) : no_caching = self . cross_origin_headers ( ) self . send_header ( "Access-Control-Max-Age" , 0 if no_caching else 10 * 60 ) self . send_header ( "Content-Length" , 0 ) self . end_headers ( ) thread_local . size = 0
Handles an OPTIONS request .
14,513
def do_GET ( self ) : thread_local . clock_start = get_time ( ) thread_local . status_code = 200 thread_local . message = None thread_local . headers = [ ] thread_local . end_headers = [ ] thread_local . size = - 1 thread_local . method = 'GET' try : self . cross_origin_headers ( ) if self . handle_special ( True , 'GET' ) : return SimpleHTTPRequestHandler . do_GET ( self ) except PreventDefaultResponse as pdr : if pdr . code : self . send_error ( pdr . code , pdr . msg ) except ( KeyboardInterrupt , SystemExit ) : raise except Exception : self . handle_error ( )
Handles a GET request .
14,514
def log_request ( self , code = '-' , size = '-' ) : print_size = getattr ( thread_local , 'size' , - 1 ) if size != '-' : size_str = ' (%s)' % size elif print_size >= 0 : size_str = self . log_size_string ( print_size ) + ' ' else : size_str = '' if not self . server . suppress_noise or ( code != 200 and code != 304 ) : self . log_message ( '%s"%s" %s' , size_str , self . requestline , str ( code ) ) if print_size >= 0 : thread_local . size = - 1
Logs the current request .
14,515
def _process_request ( self , request , client_address ) : try : self . finish_request ( request , client_address ) except Exception : self . handle_error ( request , client_address ) finally : self . shutdown_request ( request )
Actually processes the request .
14,516
def process_request ( self , request , client_address ) : if not self . _parallel : self . _process_request ( request , client_address ) return t = self . _thread_factory ( target = self . _process_request , args = ( request , client_address ) ) t . daemon = True t . start ( )
Processes the request by delegating to _process_request .
14,517
def add_file_patterns ( self , patterns , blacklist ) : bl = self . _pattern_black if blacklist else self . _pattern_white for pattern in patterns : bl . append ( pattern )
Adds a list of file patterns to either the black - or white - list . Note that this pattern is applied to the absolute path of the file that will be delivered . For including or excluding folders use add_folder_mask or add_folder_fallback .
14,518
def bind_path ( self , name , folder ) : if not len ( name ) or name [ 0 ] != '/' or name [ - 1 ] != '/' : raise ValueError ( "name must start and end with '/': {0}" . format ( name ) ) self . _folder_masks . insert ( 0 , ( name , folder ) )
Adds a mask that maps to a given folder relative to base_path .
14,519
def bind_path_fallback ( self , name , folder ) : if not len ( name ) or name [ 0 ] != '/' or name [ - 1 ] != '/' : raise ValueError ( "name must start and end with '/': {0}" . format ( name ) ) self . _folder_masks . append ( ( name , folder ) )
Adds a fallback for a given folder relative to base_path .
14,520
def bind_proxy ( self , name , proxy ) : if not len ( name ) or name [ 0 ] != '/' or name [ - 1 ] != '/' : raise ValueError ( "name must start and end with '/': {0}" . format ( name ) ) self . _folder_proxys . insert ( 0 , ( name , proxy ) )
Adds a mask that maps to a given proxy .
14,521
def add_cmd_method ( self , name , method , argc = None , complete = None ) : if ' ' in name : raise ValueError ( "' ' cannot be in command name {0}" . format ( name ) ) self . _cmd_methods [ name ] = method self . _cmd_argc [ name ] = argc self . _cmd_complete [ name ] = complete
Adds a command to the command line interface loop .
14,522
def _add_file_mask ( self , start , method_str , method ) : fm = self . _f_mask . get ( method_str , [ ] ) fm . append ( ( start , method ) ) fm . sort ( key = lambda k : len ( k [ 0 ] ) , reverse = True ) self . _f_mask [ method_str ] = fm self . _f_argc [ method_str ] = None
Adds a raw file mask for dynamic requests .
14,523
def add_json_mask ( self , start , method_str , json_producer ) : def send_json ( drh , rem_path ) : obj = json_producer ( drh , rem_path ) if not isinstance ( obj , Response ) : obj = Response ( obj ) ctype = obj . get_ctype ( "application/json" ) code = obj . code obj = obj . response if obj is None : drh . send_error ( 404 , "File not found" ) return None f = BytesIO ( ) json_str = json_dumps ( obj ) if isinstance ( json_str , ( str , unicode ) ) : try : json_str = json_str . decode ( 'utf8' ) except AttributeError : pass json_str = json_str . encode ( 'utf8' ) f . write ( json_str ) f . flush ( ) size = f . tell ( ) f . seek ( 0 ) if drh . request_version >= "HTTP/1.1" : e_tag = "{0:x}" . format ( zlib . crc32 ( f . read ( ) ) & 0xFFFFFFFF ) f . seek ( 0 ) match = _getheader ( drh . headers , 'if-none-match' ) if match is not None : if drh . check_cache ( e_tag , match ) : f . close ( ) return None drh . send_header ( "ETag" , e_tag , end_header = True ) drh . send_header ( "Cache-Control" , "max-age={0}" . format ( self . max_age ) , end_header = True ) drh . send_response ( code ) drh . send_header ( "Content-Type" , ctype ) drh . send_header ( "Content-Length" , size ) drh . end_headers ( ) return f self . _add_file_mask ( start , method_str , send_json )
Adds a handler that produces a JSON response .
14,524
def add_text_mask ( self , start , method_str , text_producer ) : def send_text ( drh , rem_path ) : text = text_producer ( drh , rem_path ) if not isinstance ( text , Response ) : text = Response ( text ) ctype = text . get_ctype ( "text/plain" ) code = text . code text = text . response if text is None : drh . send_error ( 404 , "File not found" ) return None f = BytesIO ( ) if isinstance ( text , ( str , unicode ) ) : try : text = text . decode ( 'utf8' ) except AttributeError : pass text = text . encode ( 'utf8' ) f . write ( text ) f . flush ( ) size = f . tell ( ) f . seek ( 0 ) if drh . request_version >= "HTTP/1.1" : e_tag = "{0:x}" . format ( zlib . crc32 ( f . read ( ) ) & 0xFFFFFFFF ) f . seek ( 0 ) match = _getheader ( drh . headers , 'if-none-match' ) if match is not None : if drh . check_cache ( e_tag , match ) : f . close ( ) return None drh . send_header ( "ETag" , e_tag , end_header = True ) drh . send_header ( "Cache-Control" , "max-age={0}" . format ( self . max_age ) , end_header = True ) drh . send_response ( code ) drh . send_header ( "Content-Type" , ctype ) drh . send_header ( "Content-Length" , size ) drh . end_headers ( ) return f self . _add_file_mask ( start , method_str , send_text )
Adds a handler that produces a plain text response .
14,525
def add_special_file ( self , mask , path , from_quick_server , ctype = None ) : full_path = path if not from_quick_server else os . path . join ( os . path . dirname ( __file__ ) , path ) def read_file ( _req , _args ) : with open ( full_path , 'rb' ) as f_out : return Response ( f_out . read ( ) , ctype = ctype ) self . add_text_get_mask ( mask , read_file ) self . set_file_argc ( mask , 0 )
Adds a special file that might have a different actual path than its address .
14,526
def mirror_file ( self , path_to , path_from , from_quick_server = True ) : full_path = path_from if not from_quick_server else os . path . join ( os . path . dirname ( __file__ ) , path_from ) if self . _mirror is None : if not self . _symlink_mirror ( path_to , full_path , init = True ) : self . _poll_mirror ( path_to , full_path , init = True ) return impl = self . _mirror [ "impl" ] if impl == "symlink" : self . _symlink_mirror ( path_to , full_path , init = False ) elif impl == "poll" : self . _poll_mirror ( path_to , full_path , init = False ) else : raise ValueError ( "unknown mirror implementation: {0}" . format ( impl ) )
Mirrors a file to a different location . Each time the file changes while the process is running it will be copied to path_to overwriting the destination .
14,527
def link_empty_favicon_fallback ( self ) : self . favicon_fallback = os . path . join ( os . path . dirname ( __file__ ) , 'favicon.ico' )
Links the empty favicon as default favicon .
14,528
def get_token_obj ( self , token , expire = _token_default ) : if expire == _token_default : expire = self . get_default_token_expiration ( ) now = get_time ( ) until = now + expire if expire is not None else None with self . _token_lock : first_valid = None for ( pos , k ) in enumerate ( self . _token_timings ) : t = self . _token_map [ k ] [ 0 ] if t is None or t > now : first_valid = pos break if first_valid is None : self . _token_map = { } self . _token_timings = [ ] else : for k in self . _token_timings [ : first_valid ] : del self . _token_map [ k ] self . _token_timings = self . _token_timings [ first_valid : ] if until is None or until > now : if token not in self . _token_map : self . _token_map [ token ] = ( until , { } ) self . _token_timings . append ( token ) else : self . _token_map [ token ] = ( until , self . _token_map [ token ] [ 1 ] ) self . _token_timings . sort ( key = lambda k : ( 1 if self . _token_map [ k ] [ 0 ] is None else 0 , self . _token_map [ k ] [ 0 ] ) ) return self . _token_map [ token ] [ 1 ] else : if token in self . _token_map : self . _token_timings = [ k for k in self . _token_timings if k != token ] del self . _token_map [ token ] return { }
Returns or creates the object associaten with the given token .
14,529
def handle_cmd ( self , cmd ) : cmd = cmd . strip ( ) segments = [ ] for s in cmd . split ( ) : if s . startswith ( '#' ) : break segments . append ( s ) args = [ ] if not len ( segments ) : return while segments : cur_cmd = "_" . join ( segments ) if cur_cmd in self . _cmd_methods : argc = self . _cmd_argc [ cur_cmd ] if argc is not None and len ( args ) != argc : msg ( 'command {0} expects {1} argument(s), got {2}' , " " . join ( segments ) , argc , len ( args ) ) return self . _cmd_methods [ cur_cmd ] ( args ) return args . insert ( 0 , segments . pop ( ) ) prefix = '_' . join ( args ) + '_' matches = filter ( lambda cmd : cmd . startswith ( prefix ) , self . _cmd_methods . keys ( ) ) candidates = set ( [ ] ) for m in matches : if len ( m ) <= len ( prefix ) : continue m = m [ len ( prefix ) : ] if '_' in m : m = m [ : m . index ( '_' ) ] candidates . add ( m ) if len ( candidates ) : msg ( 'command "{0}" needs more arguments:' , ' ' . join ( args ) ) for c in candidates : msg ( ' {0}' , c ) else : msg ( 'command "{0}" invalid; type ' + 'help or use <TAB> for a list of commands' , ' ' . join ( args ) )
Handles a single server command .
14,530
def handle_request ( self ) : timeout = self . socket . gettimeout ( ) if timeout is None : timeout = self . timeout elif self . timeout is not None : timeout = min ( timeout , self . timeout ) ctime = get_time ( ) done_req = False shutdown_latency = self . shutdown_latency if timeout is not None : shutdown_latency = min ( shutdown_latency , timeout ) if shutdown_latency is not None else timeout while not ( self . done or done_req ) and ( timeout is None or timeout == 0 or ( get_time ( ) - ctime ) < timeout ) : try : fd_sets = select . select ( [ self ] , [ ] , [ ] , shutdown_latency ) except ( OSError , select . error ) as e : if e . args [ 0 ] != errno . EINTR : raise fd_sets = [ [ ] , [ ] , [ ] ] for _fd in fd_sets [ 0 ] : done_req = True self . _handle_request_noblock ( ) if timeout == 0 : break if not ( self . done or done_req ) : self . handle_timeout ( )
Handles an HTTP request . The actual HTTP request is handled using a different thread .
14,531
def serve_forever ( self ) : self . start_cmd_loop ( ) try : while not self . done : self . handle_request ( ) except KeyboardInterrupt : if log_file == sys . stderr : log_file . write ( "\n" ) finally : if self . _clean_up_call is not None : self . _clean_up_call ( ) self . done = True
Starts the server handling commands and HTTP requests . The server will loop until done is True or a KeyboardInterrupt is received .
14,532
def can_ignore_error ( self , reqhnd = None ) : value = sys . exc_info ( ) [ 1 ] try : if isinstance ( value , BrokenPipeError ) or isinstance ( value , ConnectionResetError ) : return True except NameError : pass if not self . done : return False if not isinstance ( value , socket . error ) : return False need_close = value . errno == 9 if need_close and reqhnd is not None : reqhnd . close_connection = 1 return need_close
Tests if the error is worth reporting .
14,533
def handle_error ( self , request , client_address ) : if self . can_ignore_error ( ) : return thread = threading . current_thread ( ) msg ( "Error in request ({0}): {1} in {2}\n{3}" , client_address , repr ( request ) , thread . name , traceback . format_exc ( ) )
Handle an error gracefully .
14,534
def _findRow ( subNo , model ) : items = model . findItems ( str ( subNo ) ) if len ( items ) == 0 : return None if len ( items ) > 1 : raise IndexError ( "Too many items with sub number %s" % subNo ) return items [ 0 ] . row ( )
Finds a row in a given model which has a column with a given number .
14,535
def _subtitlesAdded ( self , path , subNos ) : def action ( current , count , model , row ) : _setSubNo ( current + count , model , row ) def count ( current , nos ) : ret = 0 for no in nos : if current >= no : ret += 1 current += 1 return ret self . _changeSubNos ( path , subNos , count , action )
When subtitle is added all syncPoints greater or equal than a new subtitle are incremented .
14,536
def _subtitlesRemoved ( self , path , subNos ) : def action ( current , count , model , row ) : if count . equal > 0 : model . removeRow ( row ) else : _setSubNo ( current - count . greater_equal , model , row ) def count ( current , nos ) : return _GtEqCount ( current , nos ) self . _changeSubNos ( path , subNos , count , action )
When subtitle is removed all syncPoints greater than removed subtitle are decremented . SyncPoint equal to removed subtitle is also removed .
14,537
def _get_csv_fieldnames ( csv_reader ) : fieldnames = [ ] for row in csv_reader : for col in row : field = ( col . strip ( ) . replace ( '"' , "" ) . replace ( " " , "" ) . replace ( "(" , "" ) . replace ( ")" , "" ) . lower ( ) ) fieldnames . append ( field ) if "id" in fieldnames : break else : del fieldnames [ : ] if not fieldnames : return None while True : field = fieldnames . pop ( ) if field : fieldnames . append ( field ) break suffix = 1 for index , field in enumerate ( fieldnames ) : if not field : fieldnames [ index ] = "field{}" . format ( suffix ) suffix += 1 return fieldnames
Finds fieldnames in Polarion exported csv file .
14,538
def _get_results ( csv_reader , fieldnames ) : fieldnames_count = len ( fieldnames ) results = [ ] for row in csv_reader : for col in row : if col : break else : continue record = OrderedDict ( list ( zip ( fieldnames , row ) ) ) if record . get ( "exported" ) == "yes" : continue row_len = len ( row ) if fieldnames_count > row_len : for key in fieldnames [ row_len : ] : record [ key ] = None results . append ( record ) return results
Maps data to fieldnames .
14,539
def get_imported_data ( csv_file , ** kwargs ) : open_args = [ ] open_kwargs = { } try : unicode open_args . append ( "rb" ) except NameError : open_kwargs [ "encoding" ] = "utf-8" with open ( os . path . expanduser ( csv_file ) , * open_args , ** open_kwargs ) as input_file : reader = _get_csv_reader ( input_file ) fieldnames = _get_csv_fieldnames ( reader ) if not fieldnames : raise Dump2PolarionException ( "Cannot find field names in CSV file '{}'" . format ( csv_file ) ) results = _get_results ( reader , fieldnames ) if not results : raise Dump2PolarionException ( "No results read from CSV file '{}'" . format ( csv_file ) ) testrun = _get_testrun_from_csv ( input_file , reader ) return xunit_exporter . ImportedData ( results = results , testrun = testrun )
Reads the content of the Polarion exported csv file and returns imported data .
14,540
def import_csv ( csv_file , ** kwargs ) : records = get_imported_data ( csv_file , ** kwargs ) _check_required_columns ( csv_file , records . results ) return records
Imports data and checks that all required columns are there .
14,541
def load_config ( filename ) : if filename is None : filename = '' abs_filename = os . path . join ( os . getcwd ( ) , filename ) global FILE if os . path . isfile ( filename ) : FILE = filename elif os . path . isfile ( abs_filename ) : FILE = abs_filename elif os . path . isfile ( FILE ) : pass else : if os . path . dirname ( filename ) : file_not_found = filename else : file_not_found = abs_filename file_not_found_message ( file_not_found ) init ( FILE )
Load data from config file to cfg that can be accessed by get set afterwards .
14,542
def init ( FILE ) : try : cfg . read ( FILE ) global _loaded _loaded = True except : file_not_found_message ( FILE )
Read config file
14,543
def get ( section , key ) : if not _loaded : init ( FILE ) try : return cfg . getfloat ( section , key ) except Exception : try : return cfg . getint ( section , key ) except : try : return cfg . getboolean ( section , key ) except : return cfg . get ( section , key )
returns the value of a given key of a given section of the main config file .
14,544
def to_internal_value ( self , data ) : if "session_event" in data : data [ "helper_metadata" ] [ "session_event" ] = data [ "session_event" ] return super ( InboundSerializer , self ) . to_internal_value ( data )
Adds extra data to the helper_metadata field .
14,545
def acceptAlias ( decoratedFunction ) : def wrapper ( self , * args , ** kwargs ) : SubAssert ( isinstance ( self , AliasBase ) ) if len ( args ) > 0 : key = args [ 0 ] if args [ 0 ] in self . _aliases . keys ( ) : key = self . _aliases [ args [ 0 ] ] return decoratedFunction ( self , key , * args [ 1 : ] , ** kwargs ) return decoratedFunction ( self , * args , ** kwargs ) return wrapper
This function should be used as a decorator . Each class method that is decorated will be able to accept alias or original names as a first function positional parameter .
14,546
def h ( values ) : ent = np . true_divide ( values , np . sum ( values ) ) return - np . sum ( np . multiply ( ent , np . log2 ( ent ) ) )
Function calculates entropy .
14,547
def info_gain_nominal ( x , y , separate_max ) : x_vals = np . unique ( x ) if len ( x_vals ) < 3 : return None y_dist = Counter ( y ) h_y = h ( y_dist . values ( ) ) dist , splits = nominal_splits ( x , y , x_vals , y_dist , separate_max ) indices , repeat = ( range ( 1 , len ( dist ) ) , 1 ) if len ( dist ) < 50 else ( range ( 1 , len ( dist ) , len ( dist ) / 10 ) , 3 ) interval = len ( dist ) / 10 max_ig , max_i , iteration = 0 , 1 , 0 while iteration < repeat : for i in indices : dist0 = np . sum ( [ el for el in dist [ : i ] ] ) dist1 = np . sum ( [ el for el in dist [ i : ] ] ) coef = np . true_divide ( [ np . sum ( dist0 . values ( ) ) , np . sum ( dist1 . values ( ) ) ] , len ( y ) ) ig = h_y - np . dot ( coef , [ h ( dist0 . values ( ) ) , h ( dist1 . values ( ) ) ] ) if ig > max_ig : max_ig , max_i = ig , i iteration += 1 if repeat > 1 : interval = int ( interval * 0.5 ) if max_i in indices and interval > 0 : middle_index = indices . index ( max_i ) else : break min_index = middle_index if middle_index == 0 else middle_index - 1 max_index = middle_index if middle_index == len ( indices ) - 1 else middle_index + 1 indices = range ( indices [ min_index ] , indices [ max_index ] , interval ) return float ( max_ig ) , [ splits [ : max_i ] , splits [ max_i : ] ]
Function calculates information gain for discrete features . If feature is continuous it is firstly discretized .
14,548
def multinomLog2 ( selectors ) : ln2 = 0.69314718055994528622 noAll = sum ( selectors ) lgNf = math . lgamma ( noAll + 1.0 ) / ln2 lgnFac = [ ] for selector in selectors : if selector == 0 or selector == 1 : lgnFac . append ( 0.0 ) elif selector == 2 : lgnFac . append ( 1.0 ) elif selector == noAll : lgnFac . append ( lgNf ) else : lgnFac . append ( math . lgamma ( selector + 1.0 ) / ln2 ) return lgNf - sum ( lgnFac )
Function calculates logarithm 2 of a kind of multinom .
14,549
def calc_mdl ( yx_dist , y_dist ) : prior = multinomLog2 ( y_dist . values ( ) ) prior += multinomLog2 ( [ len ( y_dist . keys ( ) ) - 1 , sum ( y_dist . values ( ) ) ] ) post = 0 for x_val in yx_dist : post += multinomLog2 ( [ x_val . get ( c , 0 ) for c in y_dist . keys ( ) ] ) post += multinomLog2 ( [ len ( y_dist . keys ( ) ) - 1 , sum ( x_val . values ( ) ) ] ) return ( prior - post ) / float ( sum ( y_dist . values ( ) ) )
Function calculates mdl with given label distributions .
14,550
def mdl_nominal ( x , y , separate_max ) : x_vals = np . unique ( x ) if len ( x_vals ) == 1 : return None y_dist = Counter ( y ) dist , splits = nominal_splits ( x , y , x_vals , y_dist , separate_max ) prior_mdl = calc_mdl ( dist , y_dist ) max_mdl , max_i = 0 , 1 for i in range ( 1 , len ( dist ) ) : dist0_x = [ el for el in dist [ : i ] ] dist0_y = np . sum ( dist0_x ) post_mdl0 = calc_mdl ( dist0_x , dist0_y ) dist1_x = [ el for el in dist [ i : ] ] dist1_y = np . sum ( dist1_x ) post_mdl1 = calc_mdl ( dist1_x , dist1_y ) coef = np . true_divide ( [ sum ( dist0_y . values ( ) ) , sum ( dist1_y . values ( ) ) ] , len ( x ) ) mdl_val = prior_mdl - np . dot ( coef , [ post_mdl0 , post_mdl1 ] ) if mdl_val > max_mdl : max_mdl , max_i = mdl_val , i split = [ splits [ : max_i ] , splits [ max_i : ] ] return ( max_mdl , split )
Function calculates minimum description length for discrete features . If feature is continuous it is firstly discretized .
14,551
def url ( section = "postGIS" , config_file = None ) : cfg . load_config ( config_file ) try : pw = keyring . get_password ( cfg . get ( section , "database" ) , cfg . get ( section , "username" ) ) except NoSectionError as e : print ( "There is no section {section} in your config file. Please " "choose one available section from your config file or " "specify a new one!" . format ( section = section ) ) exit ( - 1 ) if pw is None : try : pw = cfg . get ( section , "pw" ) except option : pw = getpass . getpass ( prompt = "No password available in your " "keyring for database {database}. " "\n\nEnter your password to " "store it in " "keyring:" . format ( database = section ) ) keyring . set_password ( section , cfg . get ( section , "username" ) , pw ) except NoSectionError : print ( "Unable to find the 'postGIS' section in oemof's config." + "\nExiting." ) exit ( - 1 ) return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}" . format ( user = cfg . get ( section , "username" ) , passwd = pw , host = cfg . get ( section , "host" ) , db = cfg . get ( section , "database" ) , port = int ( cfg . get ( section , "port" ) ) )
Retrieve the URL used to connect to the database .
14,552
def get_endpoints_using_raw_json_emission ( domain ) : uri = "http://{0}/data.json" . format ( domain ) r = requests . get ( uri ) r . raise_for_status ( ) return r . json ( )
Implements a raw HTTP GET against the entire Socrata portal for the domain in question . This method uses the first of the two ways of getting this information the raw JSON endpoint .
14,553
def get_endpoints_using_catalog_api ( domain , token ) : headers = { "X-App-Token" : token } uri = "http://api.us.socrata.com/api/catalog/v1?domains={0}&offset={1}&limit=1000" ret = [ ] endpoints_thus_far = set ( ) offset = 0 while True : try : r = requests . get ( uri . format ( domain , offset ) , headers = headers ) r . raise_for_status ( ) except requests . HTTPError : raise requests . HTTPError ( "An HTTP error was raised during Socrata API ingestion." . format ( domain ) ) data = r . json ( ) endpoints_returned = { r [ 'resource' ] [ 'id' ] for r in data [ 'results' ] } new_endpoints = endpoints_returned . difference ( endpoints_thus_far ) if len ( new_endpoints ) >= 999 : ret += data [ 'results' ] endpoints_thus_far . update ( new_endpoints ) offset += 1000 continue else : ret += [ r for r in data [ 'results' ] if r [ 'resource' ] [ 'id' ] in new_endpoints ] break return ret
Implements a raw HTTP GET against the entire Socrata portal for the domain in question . This method uses the second of the two ways of getting this information the catalog API .
14,554
def count_resources ( domain , token ) : resources = get_resources ( domain , token ) return dict ( Counter ( [ r [ 'resource' ] [ 'type' ] for r in resources if r [ 'resource' ] [ 'type' ] != 'story' ] ) )
Given the domain in question generates counts for that domain of each of the different data types .
14,555
def stratify_by_features ( features , n_strata , ** kwargs ) : n_items = features . shape [ 0 ] km = KMeans ( n_clusters = n_strata , ** kwargs ) allocations = km . fit_predict ( X = features ) return Strata ( allocations )
Stratify by clustering the items in feature space
14,556
def _heuristic_bin_width ( obs ) : IQR = sp . percentile ( obs , 75 ) - sp . percentile ( obs , 25 ) N = len ( obs ) return 2 * IQR * N ** ( - 1 / 3 )
Optimal histogram bin width based on the Freedman - Diaconis rule
14,557
def stratify_by_scores ( scores , goal_n_strata = 'auto' , method = 'cum_sqrt_F' , n_bins = 'auto' ) : available_methods = [ 'equal_size' , 'cum_sqrt_F' ] if method not in available_methods : raise ValueError ( "method argument is invalid" ) if ( method == 'cum_sqrt_F' ) or ( goal_n_strata == 'auto' ) : if n_bins == 'auto' : width_score = _heuristic_bin_width ( scores ) n_bins = np . ceil ( sp . ptp ( scores ) / width_score ) . astype ( int ) print ( "Automatically setting n_bins = {}." . format ( n_bins ) ) counts , score_bins = np . histogram ( scores , bins = n_bins ) sqrt_counts = np . sqrt ( counts ) csf = np . cumsum ( sqrt_counts ) if goal_n_strata == 'auto' : width_csf = _heuristic_bin_width ( csf ) goal_n_strata = np . ceil ( sp . ptp ( csf ) / width_csf ) . astype ( int ) print ( "Automatically setting goal_n_strata = {}." . format ( goal_n_strata ) ) elif method == 'cum_sqrt_F' : width_csf = csf [ - 1 ] / goal_n_strata if method == 'equal_size' : sorted_ids = scores . argsort ( ) n_items = len ( sorted_ids ) quotient = n_items // goal_n_strata remainder = n_items % goal_n_strata allocations = np . empty ( n_items , dtype = 'int' ) st_pops = [ quotient for i in range ( goal_n_strata ) ] for i in range ( remainder ) : st_pops [ i ] += 1 j = 0 for k , nk in enumerate ( st_pops ) : start = j end = j + nk allocations [ sorted_ids [ start : end ] ] = k j = end if method == 'cum_sqrt_F' : if goal_n_strata > n_bins : warnings . warn ( "goal_n_strata > n_bins. " "Consider increasing n_bins." ) csf_bins = [ x * width_csf for x in np . arange ( goal_n_strata + 1 ) ] j = 0 new_bins = [ ] for ( idx , value ) in enumerate ( csf ) : if j == ( len ( csf_bins ) - 1 ) or idx == ( len ( csf ) - 1 ) : new_bins . append ( score_bins [ - 1 ] ) break if value >= csf_bins [ j ] : new_bins . append ( score_bins [ idx ] ) j += 1 new_bins [ 0 ] -= 0.01 new_bins [ - 1 ] += 0.01 allocations = np . digitize ( scores , bins = new_bins , right = True ) - 1 nonempty_ids = np . unique ( allocations ) n_strata = len ( nonempty_ids ) indices = np . arange ( n_strata ) allocations = np . digitize ( allocations , nonempty_ids , right = True ) if n_strata < goal_n_strata : warnings . warn ( "Failed to create {} strata" . format ( goal_n_strata ) ) return Strata ( allocations )
Stratify by binning the items based on their scores
14,558
def auto_stratify ( scores , ** kwargs ) : if 'stratification_method' in kwargs : method = kwargs [ 'stratification_method' ] else : method = 'cum_sqrt_F' if 'stratification_n_strata' in kwargs : n_strata = kwargs [ 'stratification_n_strata' ] else : n_strata = 'auto' if 'stratification_n_bins' in kwargs : n_bins = kwargs [ 'stratification_n_bins' ] strata = stratify_by_scores ( scores , n_strata , method = method , n_bins = n_bins ) else : strata = stratify_by_scores ( scores , n_strata , method = method ) return strata
Generate Strata instance automatically
14,559
def _sample_stratum ( self , pmf = None , replace = True ) : if pmf is None : pmf = self . weights_ if not replace : empty = ( self . _n_sampled >= self . sizes_ ) if np . any ( empty ) : pmf = copy . copy ( pmf ) pmf [ empty ] = 0 if np . sum ( pmf ) == 0 : raise ( RuntimeError ) pmf /= np . sum ( pmf ) return np . random . choice ( self . indices_ , p = pmf )
Sample a stratum
14,560
def _sample_in_stratum ( self , stratum_idx , replace = True ) : if replace : stratum_loc = np . random . choice ( self . sizes_ [ stratum_idx ] ) else : stratum_locs = np . where ( ~ self . _sampled [ stratum_idx ] ) [ 0 ] stratum_loc = np . random . choice ( stratum_locs ) self . _sampled [ stratum_idx ] [ stratum_loc ] = True self . _n_sampled [ stratum_idx ] += 1 loc = self . allocations_ [ stratum_idx ] [ stratum_loc ] return loc
Sample an item uniformly from a stratum
14,561
def intra_mean ( self , values ) : if values . ndim > 1 : return np . array ( [ np . mean ( values [ x , : ] , axis = 0 ) for x in self . allocations_ ] ) else : return np . array ( [ np . mean ( values [ x ] ) for x in self . allocations_ ] )
Calculate the mean of a quantity within strata
14,562
def reset ( self ) : self . _sampled = [ np . repeat ( False , x ) for x in self . sizes_ ] self . _n_sampled = np . zeros ( self . n_strata_ , dtype = int )
Reset the instance to begin sampling from scratch
14,563
async def bluetooth_scan ( ) : devices = { } async with aiohttp . ClientSession ( ) as session : ghlocalapi = NetworkScan ( LOOP , session ) result = await ghlocalapi . scan_for_units ( IPRANGE ) for host in result : if host [ 'assistant_supported' ] : async with aiohttp . ClientSession ( ) as session : ghlocalapi = DeviceInfo ( LOOP , session , host [ 'host' ] ) await ghlocalapi . get_device_info ( ) ghname = ghlocalapi . device_info . get ( 'name' ) async with aiohttp . ClientSession ( ) as session : ghlocalapi = Bluetooth ( LOOP , session , host [ 'host' ] ) await ghlocalapi . scan_for_devices_multi_run ( ) await ghlocalapi . get_scan_result ( ) for device in ghlocalapi . devices : mac = device [ 'mac_address' ] if not devices . get ( mac , False ) : devices [ mac ] = { } devices [ mac ] [ 'rssi' ] = device [ 'rssi' ] devices [ mac ] [ 'ghunit' ] = ghname elif devices [ mac ] [ 'rssi' ] < device [ 'rssi' ] : devices [ mac ] [ 'rssi' ] = device [ 'rssi' ] devices [ mac ] [ 'ghunit' ] = ghname print ( devices )
Get devices from all GH units on the network .
14,564
def get_queue_obj ( session , queue_url , log_url ) : skip = False if not queue_url : logger . error ( "The queue url is not configured, skipping submit verification" ) skip = True if not session : logger . error ( "Missing requests session, skipping submit verification" ) skip = True queue = QueueSearch ( session = session , queue_url = queue_url , log_url = log_url ) queue . skip = skip return queue
Checks that all the data that is needed for submit verification is available .
14,565
def download_queue ( self , job_ids ) : if self . skip : return None url = "{}?jobtype=completed&jobIds={}" . format ( self . queue_url , "," . join ( str ( x ) for x in job_ids ) ) try : response = self . session . get ( url , headers = { "Accept" : "application/json" } ) if response : response = response . json ( ) else : response = None except Exception as err : logger . error ( err ) response = None return response
Downloads data of completed jobs .
14,566
def find_jobs ( self , job_ids ) : matched_jobs = [ ] if self . skip : return matched_jobs json_data = self . download_queue ( job_ids ) if not json_data : return matched_jobs jobs = json_data [ "jobs" ] for job in jobs : if ( job . get ( "id" ) in job_ids and job . get ( "status" , "" ) . lower ( ) not in _NOT_FINISHED_STATUSES ) : matched_jobs . append ( job ) return matched_jobs
Finds the jobs in the completed job queue .
14,567
def wait_for_jobs ( self , job_ids , timeout , delay ) : if self . skip : return logger . debug ( "Waiting up to %d sec for completion of the job IDs %s" , timeout , job_ids ) remaining_job_ids = set ( job_ids ) found_jobs = [ ] countdown = timeout while countdown > 0 : matched_jobs = self . find_jobs ( remaining_job_ids ) if matched_jobs : remaining_job_ids . difference_update ( { job [ "id" ] for job in matched_jobs } ) found_jobs . extend ( matched_jobs ) if not remaining_job_ids : return found_jobs time . sleep ( delay ) countdown -= delay logger . error ( "Timed out while waiting for completion of the job IDs %s. Results not updated." , list ( remaining_job_ids ) , )
Waits until the jobs appears in the completed job queue .
14,568
def _check_outcome ( self , jobs ) : if self . skip : return False if not jobs : logger . error ( "Import failed!" ) return False failed_jobs = [ ] for job in jobs : status = job . get ( "status" ) if not status : failed_jobs . append ( job ) continue if status . lower ( ) != "success" : failed_jobs . append ( job ) for job in failed_jobs : logger . error ( "job: %s; status: %s" , job . get ( "id" ) , job . get ( "status" ) ) if len ( failed_jobs ) == len ( jobs ) : logger . error ( "Import failed!" ) elif failed_jobs : logger . error ( "Some import jobs failed!" ) else : logger . info ( "Results successfully updated!" ) return not failed_jobs
Parses returned messages and checks submit outcome .
14,569
def _download_log ( self , url , output_file ) : logger . info ( "Saving log %s to %s" , url , output_file ) def _do_log_download ( ) : try : return self . session . get ( url ) except Exception as err : logger . error ( err ) for __ in range ( 5 ) : log_data = _do_log_download ( ) if log_data or log_data is None : break time . sleep ( 2 ) if not ( log_data and log_data . content ) : logger . error ( "Failed to download log file %s." , url ) return with open ( os . path . expanduser ( output_file ) , "ab" ) as out : out . write ( log_data . content )
Saves log returned by the message bus .
14,570
def get_logs ( self , jobs , log_file = None ) : if not ( jobs and self . log_url ) : return for job in jobs : url = "{}?jobId={}" . format ( self . log_url , job . get ( "id" ) ) if log_file : self . _download_log ( "{}&download" . format ( url ) , log_file ) else : logger . info ( "Submit log for job %s: %s" , job . get ( "id" ) , url )
Get log or log url of the jobs .
14,571
def submodules ( self ) : submodules = [ ] submodules . extend ( self . modules ) for p in self . packages : submodules . extend ( p . submodules ) return submodules
Property to return all sub - modules of the node recursively .
14,572
def get_target ( self , target ) : if target not in self . _target_cache : self . _target_cache [ target ] = self . _get_target ( target ) return self . _target_cache [ target ]
Get the result of _get_target cache it and return it .
14,573
def _get_target ( self , target ) : depth = target . count ( '.' ) + 1 parts = target . split ( '.' , 1 ) for m in self . modules : if parts [ 0 ] == m . name : if depth < 3 : return m for p in self . packages : if parts [ 0 ] == p . name : if depth == 1 : return p target = p . _get_target ( parts [ 1 ] ) if target : return target if depth < 3 : return p return None
Get the Package or Module related to given target .
14,574
def build_dependencies ( self ) : for m in self . modules : m . build_dependencies ( ) for p in self . packages : p . build_dependencies ( )
Recursively build the dependencies for sub - modules and sub - packages .
14,575
def print_graph ( self , format = None , output = sys . stdout , depth = 0 , ** kwargs ) : graph = self . as_graph ( depth = depth ) graph . print ( format = format , output = output , ** kwargs )
Print the graph for self s nodes .
14,576
def as_graph ( self , depth = 0 ) : if depth in self . _graph_cache : return self . _graph_cache [ depth ] self . _graph_cache [ depth ] = graph = Graph ( self , depth = depth ) return graph
Create a graph with self as node cache it return it .
14,577
def as_matrix ( self , depth = 0 ) : if depth in self . _matrix_cache : return self . _matrix_cache [ depth ] self . _matrix_cache [ depth ] = matrix = Matrix ( self , depth = depth ) return matrix
Create a matrix with self as node cache it return it .
14,578
def as_treemap ( self ) : if self . _treemap_cache : return self . _treemap_cache self . _treemap_cache = treemap = TreeMap ( self ) return treemap
Return the dependencies as a TreeMap .
14,579
def root ( self ) : node = self while node . package is not None : node = node . package return node
Property to return the root of this node .
14,580
def depth ( self ) : if self . _depth_cache is not None : return self . _depth_cache depth , node = 1 , self while node . package is not None : depth += 1 node = node . package self . _depth_cache = depth return depth
Property to tell the depth of the node in the tree .
14,581
def absolute_name ( self , depth = 0 ) : node , node_depth = self , self . depth if depth < 1 : depth = node_depth while node_depth > depth and node . package is not None : node = node . package node_depth -= 1 names = [ ] while node is not None : names . append ( node . name ) node = node . package return '.' . join ( reversed ( names ) )
Return the absolute name of the node .
14,582
def color_msg ( msg , color ) : " Return colored message " return '' . join ( ( COLORS . get ( color , COLORS [ 'endc' ] ) , msg , COLORS [ 'endc' ] ) )
Return colored message
14,583
def gen_files ( path , prefix = "_" ) : " Return file generator " if op . isdir ( path ) : for name in listdir ( path ) : fpath = op . join ( path , name ) if is_parsed_file ( fpath ) : yield op . abspath ( fpath ) elif is_parsed_file ( path ) : yield op . abspath ( path )
Return file generator
14,584
def pack ( args ) : " Pack files. " from zetalibrary . packer import Packer args = parse_config ( args ) for path in gen_files ( args . source , prefix = args . prefix ) : Packer ( path , args ) . pack ( )
Pack files .
14,585
def nonzero_monies ( self ) : return [ copy . copy ( m ) for m in self . _money_obs if m . amount != 0 ]
Get a list of the underlying Money instances that are not zero
14,586
def index ( pc ) : click . echo ( "Format Version: {0}" . format ( pc . idx [ 'formatVersion' ] ) ) click . echo ( "Publication Date: {0}" . format ( pc . idx [ 'publicationDate' ] ) ) olist = '' for i , o in enumerate ( pc . idx [ 'offers' ] ) : if i < len ( pc . idx [ 'offers' ] ) - 1 : olist += o + ", " else : olist += o click . echo ( "Services Offered: {0}" . format ( olist ) )
Show details about the Pricing API Index .
14,587
def product ( pc , service , attrib , sku ) : pc . service = service . lower ( ) pc . sku = sku pc . add_attributes ( attribs = attrib ) click . echo ( "Service Alias: {0}" . format ( pc . service_alias ) ) click . echo ( "URL: {0}" . format ( pc . service_url ) ) click . echo ( "Region: {0}" . format ( pc . region ) ) click . echo ( "Product Terms: {0}" . format ( pc . terms ) ) click . echo ( "Filtering Attributes: {0}" . format ( pc . attributes ) ) prods = pyutu . find_products ( pc ) for p in prods : click . echo ( "Product SKU: {0} product: {1}" . format ( p , json . dumps ( prods [ p ] , indent = 2 , sort_keys = True ) ) ) click . echo ( "Total Products Found: {0}" . format ( len ( prods ) ) ) click . echo ( "Time: {0} secs" . format ( time . process_time ( ) ) )
Get a list of a service s products . The list will be in the given region matching the specific terms and any given attribute filters or a SKU .
14,588
def price ( pc , service , attrib , sku ) : pc . service = service . lower ( ) pc . sku = sku pc . add_attributes ( attribs = attrib ) click . echo ( "Service Alias: {0}" . format ( pc . service_alias ) ) click . echo ( "URL: {0}" . format ( pc . service_url ) ) click . echo ( "Region: {0}" . format ( pc . region ) ) click . echo ( "Product Terms: {0}" . format ( pc . terms ) ) click . echo ( "Filtering Attributes: {0}" . format ( pc . attributes ) ) prices = pyutu . get_prices ( pc ) for p in prices : click . echo ( "Rate Code: {0} price: {1}" . format ( p , json . dumps ( prices [ p ] , indent = 2 , sort_keys = True ) ) ) click . echo ( "Total Prices Found: {0}" . format ( len ( prices ) ) ) if sys . version_info >= ( 3 , 3 ) : click . echo ( "Time: {0} secs" . format ( time . process_time ( ) ) )
Get a list of a service s prices . The list will be in the given region matching the specific terms and any given attribute filters or a SKU .
14,589
def map_init ( interface , params ) : import numpy as np import random np . random . seed ( params [ 'seed' ] ) random . seed ( params [ 'seed' ] ) return params
Intialize random number generator with given seed params . seed .
14,590
def create_graph_name ( suffix = '' , dirname = None ) : if suffix : suffix = '-%s' % suffix caller = get_callers_name ( level = 3 ) name = '%s%s%s%s' % ( __prefix , caller , suffix , __suffix ) if dirname : name = os . path . join ( dirname , name ) return name
Create a graph name using the name of the caller .
14,591
def save_graph ( graph , suffix = '' , dirname = None , pdf = False ) : name = create_graph_name ( suffix , dirname ) graph . save ( name ) if pdf : graph . save_as_pdf ( name )
Save a graph using caller s name .
14,592
def save_data ( data , suffix = '' , dirname = None ) : if type ( data ) == list : data = np . array ( data ) . T name = create_graph_name ( suffix , dirname ) + '.txt' np . savetxt ( name , data )
Save a dataset using caller s name .
14,593
def read ( path , savedir ) : " Read file from path " if path . startswith ( 'http://' ) : name = op . basename ( path ) save_path = op . join ( savedir , name ) if not op . exists ( save_path ) : src = urllib2 . urlopen ( path ) . read ( ) try : open ( save_path , 'w' ) . write ( src ) except IOError : return src path = save_path return open ( path , 'r' ) . read ( )
Read file from path
14,594
def parse_imports ( self , src ) : " Parse imports from source. " result = [ ] def child ( obj ) : result . append ( obj . group ( 1 ) ) src = self . import_re . sub ( child , src ) return src , result
Parse imports from source .
14,595
def __get_users ( self ) : filter = [ '(objectclass=posixAccount)' ] results = self . client . search ( filter , [ 'uid' ] ) for result in results : yield result . uid . value
Get user list .
14,596
def by_user ( config ) : client = Client ( ) client . prepare_connection ( ) audit_api = API ( client ) CLI . parse_membership ( 'Groups by User' , audit_api . by_user ( ) )
Display LDAP group membership sorted by user .
14,597
def raw ( config ) : client = Client ( ) client . prepare_connection ( ) audit_api = API ( client ) print ( audit_api . raw ( ) )
Dump the contents of LDAP to console in raw format .
14,598
def get_sql_state ( self , state ) : if not hasattr ( state , 'sql_state' ) : setattr ( state , 'sql_state' , SQLStateGraph ( ) ) return state . sql_state
Get SQLStateGraph from state .
14,599
def get_sortkey ( table ) : wfs = WebFeatureService ( url = bcdata . OWS_URL , version = "2.0.0" ) return sorted ( wfs . get_schema ( "pub:" + table ) [ "properties" ] . keys ( ) ) [ 0 ]
Get a field to sort by