idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
17,000
def url_converter ( self , * args , ** kwargs ) : upstream_converter = super ( PatchedManifestStaticFilesStorage , self ) . url_converter ( * args , ** kwargs ) def converter ( matchobj ) : try : upstream_converter ( matchobj ) except ValueError : matched , url = matchobj . groups ( ) return matched return converter
Return the custom URL converter for the given file name .
17,001
def order_by_on_list ( objects , order_field , is_desc = False ) : if callable ( order_field ) : objects . sort ( key = order_field , reverse = is_desc ) return def order_key ( x ) : v = getattr_path ( x , order_field ) if v is None : return MIN return v objects . sort ( key = order_key , reverse = is_desc )
Utility function to sort objects django - style even for non - query set collections
17,002
def render_table ( request , table , links = None , context = None , template = 'tri_table/list.html' , blank_on_empty = False , paginate_by = 40 , page = None , paginator = None , show_hits = False , hit_label = 'Items' , post_bulk_edit = lambda table , queryset , updates : None ) : if not context : context = { } if isinstance ( table , Namespace ) : table = table ( ) assert isinstance ( table , Table ) , table table . request = request should_return , dispatch_result = handle_dispatch ( request = request , obj = table ) if should_return : return dispatch_result context [ 'bulk_form' ] = table . bulk_form context [ 'query_form' ] = table . query_form context [ 'tri_query_error' ] = table . query_error if table . bulk_form and request . method == 'POST' : if table . bulk_form . is_valid ( ) : queryset = table . bulk_queryset ( ) updates = { field . name : field . value for field in table . bulk_form . fields if field . value is not None and field . value != '' and field . attr is not None } queryset . update ( ** updates ) post_bulk_edit ( table = table , queryset = queryset , updates = updates ) return HttpResponseRedirect ( request . META [ 'HTTP_REFERER' ] ) table . context = table_context ( request , table = table , links = links , paginate_by = paginate_by , page = page , extra_context = context , paginator = paginator , show_hits = show_hits , hit_label = hit_label , ) if not table . data and blank_on_empty : return '' if table . query_form and not table . query_form . is_valid ( ) : table . data = None table . context [ 'invalid_form_message' ] = mark_safe ( '<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>' ) return render_template ( request , template , table . context )
Render a table . This automatically handles pagination sorting filtering and bulk operations .
17,003
def generate_duid ( mac ) : valid = mac and isinstance ( mac , six . string_types ) if not valid : raise ValueError ( "Invalid argument was passed" ) return "00:" + mac [ 9 : ] + ":" + mac
DUID is consisted of 10 hex numbers .
17,004
def try_value_to_bool ( value , strict_mode = True ) : if strict_mode : true_list = ( 'True' , ) false_list = ( 'False' , ) val = value else : true_list = ( 'true' , 'on' , 'yes' ) false_list = ( 'false' , 'off' , 'no' ) val = str ( value ) . lower ( ) if val in true_list : return True elif val in false_list : return False return value
Tries to convert value into boolean .
17,005
def create_network ( self , net_view_name , cidr , nameservers = None , members = None , gateway_ip = None , dhcp_trel_ip = None , network_extattrs = None ) : ipv4 = ib_utils . determine_ip_version ( cidr ) == 4 options = [ ] if nameservers : options . append ( obj . DhcpOption ( name = 'domain-name-servers' , value = "," . join ( nameservers ) ) ) if ipv4 and gateway_ip : options . append ( obj . DhcpOption ( name = 'routers' , value = gateway_ip ) ) if ipv4 and dhcp_trel_ip : options . append ( obj . DhcpOption ( name = 'dhcp-server-identifier' , num = 54 , value = dhcp_trel_ip ) ) return obj . Network . create ( self . connector , network_view = net_view_name , cidr = cidr , members = members , options = options , extattrs = network_extattrs , check_if_exists = False )
Create NIOS Network and prepare DHCP options .
17,006
def create_ip_range ( self , network_view , start_ip , end_ip , network , disable , range_extattrs ) : return obj . IPRange . create ( self . connector , network_view = network_view , start_addr = start_ip , end_addr = end_ip , cidr = network , disable = disable , extattrs = range_extattrs , check_if_exists = False )
Creates IPRange or fails if already exists .
17,007
def _parse_options ( self , options ) : attributes = ( 'host' , 'wapi_version' , 'username' , 'password' , 'ssl_verify' , 'http_request_timeout' , 'max_retries' , 'http_pool_connections' , 'http_pool_maxsize' , 'silent_ssl_warnings' , 'log_api_calls_as_info' , 'max_results' , 'paging' ) for attr in attributes : if isinstance ( options , dict ) and attr in options : setattr ( self , attr , options [ attr ] ) elif hasattr ( options , attr ) : value = getattr ( options , attr ) setattr ( self , attr , value ) elif attr in self . DEFAULT_OPTIONS : setattr ( self , attr , self . DEFAULT_OPTIONS [ attr ] ) else : msg = "WAPI config error. Option %s is not defined" % attr raise ib_ex . InfobloxConfigException ( msg = msg ) for attr in ( 'host' , 'username' , 'password' ) : if not getattr ( self , attr ) : msg = "WAPI config error. Option %s can not be blank" % attr raise ib_ex . InfobloxConfigException ( msg = msg ) self . wapi_url = "https://%s/wapi/v%s/" % ( self . host , self . wapi_version ) self . cloud_api_enabled = self . is_cloud_wapi ( self . wapi_version )
Copy needed options to self
17,008
def _parse_reply ( request ) : try : return jsonutils . loads ( request . content ) except ValueError : raise ib_ex . InfobloxConnectionError ( reason = request . content )
Tries to parse reply from NIOS .
17,009
def get_object ( self , obj_type , payload = None , return_fields = None , extattrs = None , force_proxy = False , max_results = None , paging = False ) : self . _validate_obj_type_or_die ( obj_type , obj_type_expected = False ) if max_results is None and self . max_results : max_results = self . max_results if paging is False and self . paging : paging = self . paging query_params = self . _build_query_params ( payload = payload , return_fields = return_fields , max_results = max_results , paging = paging ) proxy_flag = self . cloud_api_enabled and force_proxy ib_object = self . _handle_get_object ( obj_type , query_params , extattrs , proxy_flag ) if ib_object : return ib_object if self . cloud_api_enabled and not force_proxy : ib_object = self . _handle_get_object ( obj_type , query_params , extattrs , proxy_flag = True ) if ib_object : return ib_object return None
Retrieve a list of Infoblox objects of type obj_type
17,010
def create_object ( self , obj_type , payload , return_fields = None ) : self . _validate_obj_type_or_die ( obj_type ) query_params = self . _build_query_params ( return_fields = return_fields ) url = self . _construct_url ( obj_type , query_params ) opts = self . _get_request_options ( data = payload ) self . _log_request ( 'post' , url , opts ) if ( self . session . cookies ) : self . session . auth = None r = self . session . post ( url , ** opts ) self . _validate_authorized ( r ) if r . status_code != requests . codes . CREATED : response = utils . safe_json_load ( r . content ) already_assigned = 'is assigned to another network view' if response and already_assigned in response . get ( 'text' ) : exception = ib_ex . InfobloxMemberAlreadyAssigned else : exception = ib_ex . InfobloxCannotCreateObject raise exception ( response = response , obj_type = obj_type , content = r . content , args = payload , code = r . status_code ) return self . _parse_reply ( r )
Create an Infoblox object of type obj_type
17,011
def update_object ( self , ref , payload , return_fields = None ) : query_params = self . _build_query_params ( return_fields = return_fields ) opts = self . _get_request_options ( data = payload ) url = self . _construct_url ( ref , query_params ) self . _log_request ( 'put' , url , opts ) r = self . session . put ( url , ** opts ) self . _validate_authorized ( r ) if r . status_code != requests . codes . ok : self . _check_service_availability ( 'update' , r , ref ) raise ib_ex . InfobloxCannotUpdateObject ( response = jsonutils . loads ( r . content ) , ref = ref , content = r . content , code = r . status_code ) return self . _parse_reply ( r )
Update an Infoblox object
17,012
def delete_object ( self , ref , delete_arguments = None ) : opts = self . _get_request_options ( ) if not isinstance ( delete_arguments , dict ) : delete_arguments = { } url = self . _construct_url ( ref , query_params = delete_arguments ) self . _log_request ( 'delete' , url , opts ) r = self . session . delete ( url , ** opts ) self . _validate_authorized ( r ) if r . status_code != requests . codes . ok : self . _check_service_availability ( 'delete' , r , ref ) raise ib_ex . InfobloxCannotDeleteObject ( response = jsonutils . loads ( r . content ) , ref = ref , content = r . content , code = r . status_code ) return self . _parse_reply ( r )
Remove an Infoblox object
17,013
def _remap_fields ( cls , kwargs ) : mapped = { } for key in kwargs : if key in cls . _remap : mapped [ cls . _remap [ key ] ] = kwargs [ key ] else : mapped [ key ] = kwargs [ key ] return mapped
Map fields from kwargs into dict acceptable by NIOS
17,014
def from_dict ( cls , eas_from_nios ) : if not eas_from_nios : return return cls ( { name : cls . _process_value ( ib_utils . try_value_to_bool , eas_from_nios [ name ] [ 'value' ] ) for name in eas_from_nios } )
Converts extensible attributes from the NIOS reply .
17,015
def to_dict ( self ) : return { name : { 'value' : self . _process_value ( str , value ) } for name , value in self . _ea_dict . items ( ) if not ( value is None or value == "" or value == [ ] ) }
Converts extensible attributes into the format suitable for NIOS .
17,016
def _process_value ( func , value ) : if isinstance ( value , ( list , tuple ) ) : return [ func ( item ) for item in value ] return func ( value )
Applies processing method for value or each element in it .
17,017
def from_dict ( cls , connector , ip_dict ) : mapping = cls . _global_field_processing . copy ( ) mapping . update ( cls . _custom_field_processing ) for field in mapping : if field in ip_dict : ip_dict [ field ] = mapping [ field ] ( ip_dict [ field ] ) return cls ( connector , ** ip_dict )
Build dict fields as SubObjects if needed .
17,018
def field_to_dict ( self , field ) : value = getattr ( self , field ) if isinstance ( value , ( list , tuple ) ) : return [ self . value_to_dict ( val ) for val in value ] return self . value_to_dict ( value )
Read field value and converts to dict if possible
17,019
def to_dict ( self , search_fields = None ) : fields = self . _fields if search_fields == 'update' : fields = self . _search_for_update_fields elif search_fields == 'all' : fields = self . _all_searchable_fields elif search_fields == 'exclude' : fields = [ field for field in self . _fields if field in self . _updateable_search_fields or field not in self . _search_for_update_fields ] return { field : self . field_to_dict ( field ) for field in fields if getattr ( self , field , None ) is not None }
Builds dict without None object fields
17,020
def fetch ( self , only_ref = False ) : if self . ref : reply = self . connector . get_object ( self . ref , return_fields = self . return_fields ) if reply : self . update_from_dict ( reply ) return True search_dict = self . to_dict ( search_fields = 'update' ) return_fields = [ ] if only_ref else self . return_fields reply = self . connector . get_object ( self . infoblox_type , search_dict , return_fields = return_fields ) if reply : self . update_from_dict ( reply [ 0 ] , only_ref = only_ref ) return True return False
Fetch object from NIOS by _ref or searchfields
17,021
def _ip_setter ( self , ipaddr_name , ipaddrs_name , ips ) : if isinstance ( ips , six . string_types ) : setattr ( self , ipaddr_name , ips ) elif isinstance ( ips , ( list , tuple ) ) and isinstance ( ips [ 0 ] , IP ) : setattr ( self , ipaddr_name , ips [ 0 ] . ip ) setattr ( self , ipaddrs_name , ips ) elif isinstance ( ips , IP ) : setattr ( self , ipaddr_name , ips . ip ) setattr ( self , ipaddrs_name , [ ips ] ) elif ips is None : setattr ( self , ipaddr_name , None ) setattr ( self , ipaddrs_name , None ) else : raise ValueError ( "Invalid format of ip passed in: %s." "Should be string or list of NIOS IP objects." % ips )
Setter for ip fields
17,022
def mac ( self , mac ) : self . _mac = mac if mac : self . duid = ib_utils . generate_duid ( mac ) elif not hasattr ( self , 'duid' ) : self . duid = None
Set mac and duid fields
17,023
def render_property ( property ) : if 'type' in property and property [ 'type' ] in PROPERTY_FIELDS : fields = { } for field in PROPERTY_FIELDS [ property [ 'type' ] ] : if type ( field ) is tuple : fields [ field [ 0 ] ] = '(( .properties.{}.{} ))' . format ( property [ 'name' ] , field [ 1 ] ) else : fields [ field ] = '(( .properties.{}.{} ))' . format ( property [ 'name' ] , field ) out = { property [ 'name' ] : fields } else : if property . get ( 'is_reference' , False ) : out = { property [ 'name' ] : property [ 'default' ] } else : out = { property [ 'name' ] : '(( .properties.{}.value ))' . format ( property [ 'name' ] ) } return out
Render a property for bosh manifest according to its type .
17,024
def match ( obj , matchers = TYPES ) : buf = get_bytes ( obj ) for matcher in matchers : if matcher . match ( buf ) : return matcher return None
Matches the given input againts the available file type matchers .
17,025
def signature ( array ) : length = len ( array ) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array [ : index ]
Returns the first 262 bytes of the given bytearray as part of the file header signature .
17,026
def get_bytes ( obj ) : try : obj = obj . read ( _NUM_SIGNATURE_BYTES ) except AttributeError : pass kind = type ( obj ) if kind is bytearray : return signature ( obj ) if kind is str : return get_signature_bytes ( obj ) if kind is bytes : return signature ( obj ) if kind is memoryview : return signature ( obj ) . tolist ( ) raise TypeError ( 'Unsupported type as file input: %s' % kind )
Infers the input type and reads the first 262 bytes returning a sliced bytearray .
17,027
def get_type ( mime = None , ext = None ) : for kind in types : if kind . extension is ext or kind . mime is mime : return kind return None
Returns the file type instance searching by MIME type or file extension .
17,028
def open ( self , encoding = None ) : try : if IS_GZIPPED_FILE . search ( self . _filename ) : _file = gzip . open ( self . _filename , 'rb' ) else : if encoding : _file = io . open ( self . _filename , 'r' , encoding = encoding , errors = 'replace' ) elif self . _encoding : _file = io . open ( self . _filename , 'r' , encoding = self . _encoding , errors = 'replace' ) else : _file = io . open ( self . _filename , 'r' , errors = 'replace' ) except IOError , e : self . _log_warning ( str ( e ) ) _file = None self . close ( ) return _file
Opens the file with the appropriate call
17,029
def close ( self ) : if not self . active : return self . active = False if self . _file : self . _file . close ( ) self . _sincedb_update_position ( force_update = True ) if self . _current_event : event = '\n' . join ( self . _current_event ) self . _current_event . clear ( ) self . _callback_wrapper ( [ event ] )
Closes all currently open file pointers
17,030
def _ensure_file_is_good ( self , current_time ) : if self . _last_file_mapping_update and current_time - self . _last_file_mapping_update <= self . _stat_interval : return self . _last_file_mapping_update = time . time ( ) try : st = os . stat ( self . _filename ) except EnvironmentError , err : if err . errno == errno . ENOENT : self . _log_info ( 'file removed' ) self . close ( ) return raise fid = self . get_file_id ( st ) if fid != self . _fid : self . _log_info ( 'file rotated' ) self . close ( ) elif self . _file . tell ( ) > st . st_size : if st . st_size == 0 and self . _ignore_truncate : self . _logger . info ( "[{0}] - file size is 0 {1}. " . format ( fid , self . _filename ) + "If you use another tool (i.e. logrotate) to truncate " + "the file, your application may continue to write to " + "the offset it last wrote later. In such a case, we'd " + "better do nothing here" ) return self . _log_info ( 'file truncated' ) self . _update_file ( seek_to_end = False ) elif REOPEN_FILES : self . _log_debug ( 'file reloaded (non-linux)' ) position = self . _file . tell ( ) self . _update_file ( seek_to_end = False ) if self . active : self . _file . seek ( position , os . SEEK_SET )
Every N seconds ensures that the file we are tailing is the file we expect to be tailing
17,031
def _run_pass ( self ) : while True : try : data = self . _file . read ( 4096 ) except IOError , e : if e . errno == errno . ESTALE : self . active = False return False lines = self . _buffer_extract ( data ) if not lines : if self . _current_event and time . time ( ) - self . _last_activity > 1 : event = '\n' . join ( self . _current_event ) self . _current_event . clear ( ) self . _callback_wrapper ( [ event ] ) break self . _last_activity = time . time ( ) if self . _multiline_regex_after or self . _multiline_regex_before : events = multiline_merge ( lines , self . _current_event , self . _multiline_regex_after , self . _multiline_regex_before ) else : events = lines if events : self . _callback_wrapper ( events ) if self . _sincedb_path : current_line_count = len ( lines ) self . _sincedb_update_position ( lines = current_line_count ) self . _sincedb_update_position ( )
Read lines from a file and performs a callback against them
17,032
def _sincedb_init ( self ) : if not self . _sincedb_path : return if not os . path . exists ( self . _sincedb_path ) : self . _log_debug ( 'initializing sincedb sqlite schema' ) conn = sqlite3 . connect ( self . _sincedb_path , isolation_level = None ) conn . execute ( ) conn . close ( )
Initializes the sincedb schema in an sqlite db
17,033
def _sincedb_update_position ( self , lines = 0 , force_update = False ) : if not self . _sincedb_path : return False self . _line_count = self . _line_count + lines old_count = self . _line_count_sincedb lines = self . _line_count current_time = int ( time . time ( ) ) if not force_update : if self . _last_sincedb_write and current_time - self . _last_sincedb_write <= self . _sincedb_write_interval : return False if old_count == lines : return False self . _sincedb_init ( ) self . _last_sincedb_write = current_time self . _log_debug ( 'updating sincedb to {0}' . format ( lines ) ) conn = sqlite3 . connect ( self . _sincedb_path , isolation_level = None ) cursor = conn . cursor ( ) query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);' cursor . execute ( query , { 'fid' : self . _fid , 'filename' : self . _filename } ) query = 'update sincedb set position = :position where fid = :fid and filename = :filename' cursor . execute ( query , { 'fid' : self . _fid , 'filename' : self . _filename , 'position' : lines , } ) conn . close ( ) self . _line_count_sincedb = lines return True
Retrieves the starting position from the sincedb sql db for a given file Returns a boolean representing whether or not it updated the record
17,034
def _sincedb_start_position ( self ) : if not self . _sincedb_path : return None self . _sincedb_init ( ) self . _log_debug ( 'retrieving start_position from sincedb' ) conn = sqlite3 . connect ( self . _sincedb_path , isolation_level = None ) cursor = conn . cursor ( ) cursor . execute ( 'select position from sincedb where fid = :fid and filename = :filename' , { 'fid' : self . _fid , 'filename' : self . _filename } ) start_position = None for row in cursor . fetchall ( ) : start_position , = row return start_position
Retrieves the starting position from the sincedb sql db for a given file
17,035
def _update_file ( self , seek_to_end = True ) : try : self . close ( ) self . _file = self . open ( ) except IOError : pass else : if not self . _file : return self . active = True try : st = os . stat ( self . _filename ) except EnvironmentError , err : if err . errno == errno . ENOENT : self . _log_info ( 'file removed' ) self . close ( ) fid = self . get_file_id ( st ) if not self . _fid : self . _fid = fid if fid != self . _fid : self . _log_info ( 'file rotated' ) self . close ( ) elif seek_to_end : self . _seek_to_end ( )
Open the file for tailing
17,036
def tail ( self , fname , encoding , window , position = None ) : if window <= 0 : raise ValueError ( 'invalid window %r' % window ) encodings = ENCODINGS if encoding : encodings = [ encoding ] + ENCODINGS for enc in encodings : try : f = self . open ( encoding = enc ) if f : return self . tail_read ( f , window , position = position ) return False except IOError , err : if err . errno == errno . ENOENT : return [ ] raise except UnicodeDecodeError : pass
Read last N lines from file fname .
17,037
def create_transport ( beaver_config , logger ) : transport_str = beaver_config . get ( 'transport' ) if '.' not in transport_str : module_path = 'beaver.transports.%s_transport' % transport_str . lower ( ) class_name = '%sTransport' % transport_str . title ( ) else : try : module_path , class_name = transport_str . rsplit ( '.' , 1 ) except ValueError : raise Exception ( 'Invalid transport {0}' . format ( beaver_config . get ( 'transport' ) ) ) _module = __import__ ( module_path , globals ( ) , locals ( ) , class_name , - 1 ) transport_class = getattr ( _module , class_name ) transport = transport_class ( beaver_config = beaver_config , logger = logger ) return transport
Creates and returns a transport object
17,038
def update_files ( self ) : if self . _update_time and int ( time . time ( ) ) - self . _update_time < self . _discover_interval : return self . _update_time = int ( time . time ( ) ) possible_files = [ ] files = [ ] if len ( self . _beaver_config . get ( 'globs' ) ) > 0 : extend_files = files . extend for name , exclude in self . _beaver_config . get ( 'globs' ) . items ( ) : globbed = [ os . path . realpath ( filename ) for filename in eglob ( name , exclude ) ] extend_files ( globbed ) self . _beaver_config . addglob ( name , globbed ) self . _callback ( ( "addglob" , ( name , globbed ) ) ) else : append_files = files . append for name in self . listdir ( ) : append_files ( os . path . realpath ( os . path . join ( self . _folder , name ) ) ) for absname in files : try : st = os . stat ( absname ) except EnvironmentError , err : if err . errno != errno . ENOENT : raise else : if not stat . S_ISREG ( st . st_mode ) : continue append_possible_files = possible_files . append fid = self . get_file_id ( st ) append_possible_files ( ( fid , absname ) ) new_files = [ fname for fid , fname in possible_files if fid not in self . _tails ] self . watch ( new_files )
Ensures all files are properly loaded . Detects new files file removals file rotation and truncation . On non - linux platforms it will also manually reload the file for tailing . Note that this hack is necessary because EOF is cached on BSD systems .
17,039
def close ( self , signalnum = None , frame = None ) : self . _running = False self . _log_debug ( "Closing all tail objects" ) self . _active = False for fid in self . _tails : self . _tails [ fid ] . close ( ) for n in range ( 0 , self . _number_of_consumer_processes ) : if self . _proc [ n ] is not None and self . _proc [ n ] . is_alive ( ) : self . _logger . debug ( "Terminate Process: " + str ( n ) ) self . _proc [ n ] . terminate ( ) self . _proc [ n ] . join ( )
Closes all currently open Tail objects
17,040
def expand_paths ( path ) : pr = itertools . product parts = MAGIC_BRACKETS . findall ( path ) if not path : return if not parts : return [ path ] permutations = [ [ ( p [ 0 ] , i , 1 ) for i in p [ 1 ] . split ( ',' ) ] for p in parts ] return [ _replace_all ( path , i ) for i in pr ( * permutations ) ]
When given a path with brackets expands it to return all permutations of the path with expanded brackets similar to ant .
17,041
def multiline_merge ( lines , current_event , re_after , re_before ) : events = [ ] for line in lines : if re_before and re_before . match ( line ) : current_event . append ( line ) elif re_after and current_event and re_after . match ( current_event [ - 1 ] ) : current_event . append ( line ) else : if current_event : events . append ( '\n' . join ( current_event ) ) current_event . clear ( ) current_event . append ( line ) return events
Merge multi - line events based .
17,042
def create_ssh_tunnel ( beaver_config , logger = None ) : if not beaver_config . use_ssh_tunnel ( ) : return None logger . info ( "Proxying transport using through local ssh tunnel" ) return BeaverSshTunnel ( beaver_config , logger = logger )
Returns a BeaverSshTunnel object if the current config requires us to
17,043
def poll ( self ) : if self . _subprocess is not None : self . _subprocess . poll ( ) time . sleep ( self . _beaver_config . get ( 'subprocess_poll_sleep' ) )
Poll attached subprocess until it is available
17,044
def close ( self ) : if self . _subprocess is not None : os . killpg ( self . _subprocess . pid , signal . SIGTERM ) self . _subprocess = None
Close child subprocess
17,045
def _to_unicode ( self , data , encoding , errors = 'strict' ) : if ( len ( data ) >= 4 ) and ( data [ : 2 ] == '\xfe\xff' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-16be' data = data [ 2 : ] elif ( len ( data ) >= 4 ) and ( data [ : 2 ] == '\xff\xfe' ) and ( data [ 2 : 4 ] != '\x00\x00' ) : encoding = 'utf-16le' data = data [ 2 : ] elif data [ : 3 ] == '\xef\xbb\xbf' : encoding = 'utf-8' data = data [ 3 : ] elif data [ : 4 ] == '\x00\x00\xfe\xff' : encoding = 'utf-32be' data = data [ 4 : ] elif data [ : 4 ] == '\xff\xfe\x00\x00' : encoding = 'utf-32le' data = data [ 4 : ] newdata = unicode ( data , encoding , errors ) return newdata
Given a string and its encoding decodes the string into Unicode . %encoding is a string recognized by encodings . aliases
17,046
def reconnect ( self ) : try : self . conn . close ( ) except Exception , e : self . logger . warn ( e ) self . createConnection ( ) return True
Allows reconnection from when a handled TransportException is thrown
17,047
def _check_connections ( self ) : for server in self . _servers : if self . _is_reachable ( server ) : server [ 'down_until' ] = 0 else : server [ 'down_until' ] = time . time ( ) + 5
Checks if all configured redis servers are reachable
17,048
def _is_reachable ( self , server ) : try : server [ 'redis' ] . ping ( ) return True except UserWarning : self . _logger . warn ( 'Cannot reach redis server: ' + server [ 'url' ] ) except Exception : self . _logger . warn ( 'Cannot reach redis server: ' + server [ 'url' ] ) return False
Checks if the given redis server is reachable
17,049
def invalidate ( self ) : super ( RedisTransport , self ) . invalidate ( ) for server in self . _servers : server [ 'redis' ] . connection_pool . disconnect ( ) return False
Invalidates the current transport and disconnects all redis connections
17,050
def callback ( self , filename , lines , ** kwargs ) : self . _logger . debug ( 'Redis transport called' ) timestamp = self . get_timestamp ( ** kwargs ) if kwargs . get ( 'timestamp' , False ) : del kwargs [ 'timestamp' ] namespaces = self . _beaver_config . get_field ( 'redis_namespace' , filename ) if not namespaces : namespaces = self . _namespace namespaces = namespaces . split ( "," ) self . _logger . debug ( 'Got namespaces: ' . join ( namespaces ) ) data_type = self . _data_type self . _logger . debug ( 'Got data type: ' + data_type ) server = self . _get_next_server ( ) self . _logger . debug ( 'Got redis server: ' + server [ 'url' ] ) pipeline = server [ 'redis' ] . pipeline ( transaction = False ) callback_map = { self . LIST_DATA_TYPE : pipeline . rpush , self . CHANNEL_DATA_TYPE : pipeline . publish , } callback_method = callback_map [ data_type ] for line in lines : for namespace in namespaces : callback_method ( namespace . strip ( ) , self . format ( filename , line , timestamp , ** kwargs ) ) try : pipeline . execute ( ) except redis . exceptions . RedisError , exception : self . _logger . warn ( 'Cannot push lines to redis server: ' + server [ 'url' ] ) raise TransportException ( exception )
Sends log lines to redis servers
17,051
def _get_next_server ( self ) : current_try = 0 max_tries = len ( self . _servers ) while current_try < max_tries : server_index = self . _raise_server_index ( ) server = self . _servers [ server_index ] down_until = server [ 'down_until' ] self . _logger . debug ( 'Checking server ' + str ( current_try + 1 ) + '/' + str ( max_tries ) + ': ' + server [ 'url' ] ) if down_until == 0 : self . _logger . debug ( 'Elected server: ' + server [ 'url' ] ) return server if down_until < time . time ( ) : if self . _is_reachable ( server ) : server [ 'down_until' ] = 0 self . _logger . debug ( 'Elected server: ' + server [ 'url' ] ) return server else : self . _logger . debug ( 'Server still unavailable: ' + server [ 'url' ] ) server [ 'down_until' ] = time . time ( ) + 5 current_try += 1 raise TransportException ( 'Cannot reach any redis server' )
Returns a valid redis server or raises a TransportException
17,052
def valid ( self ) : valid_servers = 0 for server in self . _servers : if server [ 'down_until' ] <= time . time ( ) : valid_servers += 1 return valid_servers > 0
Returns whether or not the transport can send data to any redis server
17,053
def format ( self , filename , line , timestamp , ** kwargs ) : line = unicode ( line . encode ( "utf-8" ) , "utf-8" , errors = "ignore" ) formatter = self . _beaver_config . get_field ( 'format' , filename ) if formatter not in self . _formatters : formatter = self . _default_formatter data = { self . _fields . get ( 'type' ) : kwargs . get ( 'type' ) , self . _fields . get ( 'tags' ) : kwargs . get ( 'tags' ) , '@timestamp' : timestamp , self . _fields . get ( 'host' ) : self . _current_host , self . _fields . get ( 'file' ) : filename , self . _fields . get ( 'message' ) : line } if self . _logstash_version == 0 : data [ '@source' ] = 'file://{0}' . format ( filename ) data [ '@fields' ] = kwargs . get ( 'fields' ) else : data [ '@version' ] = self . _logstash_version fields = kwargs . get ( 'fields' ) for key in fields : data [ key ] = fields . get ( key ) return self . _formatters [ formatter ] ( data )
Returns a formatted log line
17,054
def get_timestamp ( self , ** kwargs ) : timestamp = kwargs . get ( 'timestamp' ) if not timestamp : now = datetime . datetime . utcnow ( ) timestamp = now . strftime ( "%Y-%m-%dT%H:%M:%S" ) + ".%03d" % ( now . microsecond / 1000 ) + "Z" return timestamp
Retrieves the timestamp for a given set of data
17,055
def _make_executable ( path ) : os . chmod ( path , os . stat ( path ) . st_mode | stat . S_IXUSR | stat . S_IXGRP | stat . S_IXOTH )
Make the file at path executable .
17,056
def build_parser ( ) : parser = argparse . ArgumentParser ( description = __doc__ , formatter_class = argparse . ArgumentDefaultsHelpFormatter ) parser . add_argument ( "--in_path" , "-i" , required = True , help = "file path to input GCT(x) file" ) parser . add_argument ( "--rid" , nargs = "+" , help = "filepath to grp file or string array for including rows" ) parser . add_argument ( "--cid" , nargs = "+" , help = "filepath to grp file or string array for including cols" ) parser . add_argument ( "--exclude_rid" , "-er" , nargs = "+" , help = "filepath to grp file or string array for excluding rows" ) parser . add_argument ( "--exclude_cid" , "-ec" , nargs = "+" , help = "filepath to grp file or string array for excluding cols" ) parser . add_argument ( "--out_name" , "-o" , default = "ds_subsetted.gct" , help = "what to name the output file" ) parser . add_argument ( "--out_type" , default = "gct" , choices = [ "gct" , "gctx" ] , help = "whether to write output as GCT or GCTx" ) parser . add_argument ( "--verbose" , "-v" , action = "store_true" , default = False , help = "whether to increase the # of messages reported" ) return parser
Build argument parser .
17,057
def _read_arg ( arg ) : if arg is None : arg_out = arg else : if len ( arg ) == 1 and os . path . exists ( arg [ 0 ] ) : arg_out = grp . read ( arg [ 0 ] ) else : arg_out = arg assert isinstance ( arg_out , list ) , "arg_out must be a list." assert type ( arg_out [ 0 ] ) == str , "arg_out must be a list of strings." return arg_out
If arg is a list with 1 element that corresponds to a valid file path use set_io . grp to read the grp file . Otherwise check that arg is a list of strings .
17,058
def read ( file_path ) : actual_file_path = os . path . expanduser ( file_path ) with open ( actual_file_path , 'r' ) as f : lines = f . readlines ( ) gmt = [ ] for line_num , line in enumerate ( lines ) : fields = line . split ( '\t' ) assert len ( fields ) > 2 , ( "Each line must have at least 3 tab-delimited items. " + "line_num: {}, fields: {}" ) . format ( line_num , fields ) fields [ - 1 ] = fields [ - 1 ] . rstrip ( ) entries = fields [ 2 : ] entries = [ x for x in entries if x ] assert len ( set ( entries ) ) == len ( entries ) , ( "There should not be duplicate entries for the same set. " + "line_num: {}, entries: {}" ) . format ( line_num , entries ) line_dict = { SET_IDENTIFIER_FIELD : fields [ 0 ] , SET_DESC_FIELD : fields [ 1 ] , SET_MEMBERS_FIELD : entries } gmt . append ( line_dict ) verify_gmt_integrity ( gmt ) return gmt
Read a gmt file at the path specified by file_path .
17,059
def verify_gmt_integrity ( gmt ) : set_ids = [ d [ SET_IDENTIFIER_FIELD ] for d in gmt ] assert len ( set ( set_ids ) ) == len ( set_ids ) , ( "Set identifiers should be unique. set_ids: {}" . format ( set_ids ) )
Make sure that set ids are unique .
17,060
def write ( gmt , out_path ) : with open ( out_path , 'w' ) as f : for _ , each_dict in enumerate ( gmt ) : f . write ( each_dict [ SET_IDENTIFIER_FIELD ] + '\t' ) f . write ( each_dict [ SET_DESC_FIELD ] + '\t' ) f . write ( '\t' . join ( [ str ( entry ) for entry in each_dict [ SET_MEMBERS_FIELD ] ] ) ) f . write ( '\n' )
Write a GMT to a text file .
17,061
def parse ( gctx_file_path , convert_neg_666 = True , rid = None , cid = None , ridx = None , cidx = None , row_meta_only = False , col_meta_only = False , make_multiindex = False ) : full_path = os . path . expanduser ( gctx_file_path ) if not os . path . exists ( full_path ) : err_msg = "The given path to the gctx file cannot be found. full_path: {}" logger . error ( err_msg . format ( full_path ) ) raise Exception ( err_msg . format ( full_path ) ) logger . info ( "Reading GCTX: {}" . format ( full_path ) ) gctx_file = h5py . File ( full_path , "r" ) if row_meta_only : row_dset = gctx_file [ row_meta_group_node ] row_meta = parse_metadata_df ( "row" , row_dset , convert_neg_666 ) ( sorted_ridx , sorted_cidx ) = check_and_order_id_inputs ( rid , ridx , cid , cidx , row_meta , None ) gctx_file . close ( ) row_meta = row_meta . iloc [ sorted_ridx ] return row_meta elif col_meta_only : col_dset = gctx_file [ col_meta_group_node ] col_meta = parse_metadata_df ( "col" , col_dset , convert_neg_666 ) ( sorted_ridx , sorted_cidx ) = check_and_order_id_inputs ( rid , ridx , cid , cidx , None , col_meta ) gctx_file . close ( ) col_meta = col_meta . iloc [ sorted_cidx ] return col_meta else : row_dset = gctx_file [ row_meta_group_node ] row_meta = parse_metadata_df ( "row" , row_dset , convert_neg_666 ) col_dset = gctx_file [ col_meta_group_node ] col_meta = parse_metadata_df ( "col" , col_dset , convert_neg_666 ) ( sorted_ridx , sorted_cidx ) = check_and_order_id_inputs ( rid , ridx , cid , cidx , row_meta , col_meta ) data_dset = gctx_file [ data_node ] data_df = parse_data_df ( data_dset , sorted_ridx , sorted_cidx , row_meta , col_meta ) row_meta = row_meta . iloc [ sorted_ridx ] col_meta = col_meta . iloc [ sorted_cidx ] my_version = gctx_file . attrs [ version_node ] if type ( my_version ) == np . ndarray : my_version = my_version [ 0 ] gctx_file . close ( ) my_gctoo = GCToo . GCToo ( data_df = data_df , row_metadata_df = row_meta , col_metadata_df = col_meta , src = full_path , version = my_version , make_multiindex = make_multiindex ) return my_gctoo
Primary method of script . Reads in path to a gctx file and parses into GCToo object .
17,062
def check_id_idx_exclusivity ( id , idx ) : if ( id is not None and idx is not None ) : msg = ( "'id' and 'idx' fields can't both not be None," + " please specify subset in only one of these fields" ) logger . error ( msg ) raise Exception ( "parse_gctx.check_id_idx_exclusivity: " + msg ) elif id is not None : return ( "id" , id ) elif idx is not None : return ( "idx" , idx ) else : return ( None , [ ] )
Makes sure user didn t provide both ids and idx values to subset by .
17,063
def parse_data_df ( data_dset , ridx , cidx , row_meta , col_meta ) : if len ( ridx ) == len ( row_meta . index ) and len ( cidx ) == len ( col_meta . index ) : data_array = np . empty ( data_dset . shape , dtype = np . float32 ) data_dset . read_direct ( data_array ) data_array = data_array . transpose ( ) elif len ( ridx ) <= len ( cidx ) : first_subset = data_dset [ : , ridx ] . astype ( np . float32 ) data_array = first_subset [ cidx , : ] . transpose ( ) elif len ( cidx ) < len ( ridx ) : first_subset = data_dset [ cidx , : ] . astype ( np . float32 ) data_array = first_subset [ : , ridx ] . transpose ( ) data_df = pd . DataFrame ( data_array , index = row_meta . index [ ridx ] , columns = col_meta . index [ cidx ] ) return data_df
Parses in data_df from hdf5 subsetting if specified .
17,064
def get_column_metadata ( gctx_file_path , convert_neg_666 = True ) : full_path = os . path . expanduser ( gctx_file_path ) gctx_file = h5py . File ( full_path , "r" ) col_dset = gctx_file [ col_meta_group_node ] col_meta = parse_metadata_df ( "col" , col_dset , convert_neg_666 ) gctx_file . close ( ) return col_meta
Opens . gctx file and returns only column metadata
17,065
def get_row_metadata ( gctx_file_path , convert_neg_666 = True ) : full_path = os . path . expanduser ( gctx_file_path ) gctx_file = h5py . File ( full_path , "r" ) row_dset = gctx_file [ row_meta_group_node ] row_meta = parse_metadata_df ( "row" , row_dset , convert_neg_666 ) gctx_file . close ( ) return row_meta
Opens . gctx file and returns only row metadata
17,066
def multi_index_df_to_component_dfs ( multi_index_df , rid = "rid" , cid = "cid" ) : rids = list ( multi_index_df . index . get_level_values ( rid ) ) cids = list ( multi_index_df . columns . get_level_values ( cid ) ) if isinstance ( multi_index_df . index , pd . MultiIndex ) : if len ( multi_index_df . index . names ) > 1 : mi_df_index = multi_index_df . index . droplevel ( rid ) rhds = list ( mi_df_index . names ) row_metadata = np . array ( [ mi_df_index . get_level_values ( level ) . values for level in list ( rhds ) ] ) . T else : rhds = [ ] row_metadata = [ ] else : rhds = [ ] row_metadata = [ ] if isinstance ( multi_index_df . columns , pd . MultiIndex ) : if len ( multi_index_df . columns . names ) > 1 : mi_df_columns = multi_index_df . columns . droplevel ( cid ) chds = list ( mi_df_columns . names ) col_metadata = np . array ( [ mi_df_columns . get_level_values ( level ) . values for level in list ( chds ) ] ) . T else : chds = [ ] col_metadata = [ ] else : chds = [ ] col_metadata = [ ] row_metadata_df = pd . DataFrame . from_records ( row_metadata , index = pd . Index ( rids , name = "rid" ) , columns = pd . Index ( rhds , name = "rhd" ) ) col_metadata_df = pd . DataFrame . from_records ( col_metadata , index = pd . Index ( cids , name = "cid" ) , columns = pd . Index ( chds , name = "chd" ) ) data_df = pd . DataFrame ( multi_index_df . values , index = pd . Index ( rids , name = "rid" ) , columns = pd . Index ( cids , name = "cid" ) ) return data_df , row_metadata_df , col_metadata_df
Convert a multi - index df into 3 component dfs .
17,067
def check_df ( self , df ) : if isinstance ( df , pd . DataFrame ) : if not df . index . is_unique : repeats = df . index [ df . index . duplicated ( ) ] . values msg = "Index values must be unique but aren't. The following entries appear more than once: {}" . format ( repeats ) self . logger . error ( msg ) raise Exception ( "GCToo GCToo.check_df " + msg ) if not df . columns . is_unique : repeats = df . columns [ df . columns . duplicated ( ) ] . values msg = "Columns values must be unique but aren't. The following entries appear more than once: {}" . format ( repeats ) raise Exception ( "GCToo GCToo.check_df " + msg ) else : return True else : msg = "expected Pandas DataFrame, got something else: {} of type: {}" . format ( df , type ( df ) ) self . logger . error ( msg ) raise Exception ( "GCToo GCToo.check_df " + msg )
Verifies that df is a pandas DataFrame instance and that its index and column values are unique .
17,068
def are_genes_in_api ( my_clue_api_client , gene_symbols ) : if len ( gene_symbols ) > 0 : query_gene_symbols = gene_symbols if type ( gene_symbols ) is list else list ( gene_symbols ) query_result = my_clue_api_client . run_filter_query ( resource_name , { "where" : { "gene_symbol" : { "inq" : query_gene_symbols } } , "fields" : { "gene_symbol" : True } } ) logger . debug ( "query_result: {}" . format ( query_result ) ) r = set ( [ x [ "gene_symbol" ] for x in query_result ] ) return r else : logger . warning ( "provided gene_symbols was empty, cannot run query" ) return set ( )
determine if genes are present in the API
17,069
def write ( gctoo , out_fname , data_null = "NaN" , metadata_null = "-666" , filler_null = "-666" , data_float_format = "%.4f" ) : if not out_fname . endswith ( ".gct" ) : out_fname += ".gct" f = open ( out_fname , "w" ) dims = [ str ( gctoo . data_df . shape [ 0 ] ) , str ( gctoo . data_df . shape [ 1 ] ) , str ( gctoo . row_metadata_df . shape [ 1 ] ) , str ( gctoo . col_metadata_df . shape [ 1 ] ) ] write_version_and_dims ( VERSION , dims , f ) write_top_half ( f , gctoo . row_metadata_df , gctoo . col_metadata_df , metadata_null , filler_null ) write_bottom_half ( f , gctoo . row_metadata_df , gctoo . data_df , data_null , data_float_format , metadata_null ) f . close ( ) logger . info ( "GCT has been written to {}" . format ( out_fname ) )
Write a gctoo object to a gct file .
17,070
def write_version_and_dims ( version , dims , f ) : f . write ( ( "#" + version + "\n" ) ) f . write ( ( dims [ 0 ] + "\t" + dims [ 1 ] + "\t" + dims [ 2 ] + "\t" + dims [ 3 ] + "\n" ) )
Write first two lines of gct file .
17,071
def append_dims_and_file_extension ( fname , data_df ) : if not fname . endswith ( ".gct" ) : out_fname = '{0}_n{1}x{2}.gct' . format ( fname , data_df . shape [ 1 ] , data_df . shape [ 0 ] ) return out_fname else : basename = os . path . splitext ( fname ) [ 0 ] out_fname = '{0}_n{1}x{2}.gct' . format ( basename , data_df . shape [ 1 ] , data_df . shape [ 0 ] ) return out_fname
Append dimensions and file extension to output filename . N . B . Dimensions are cols x rows .
17,072
def robust_zscore ( mat , ctrl_mat = None , min_mad = 0.1 ) : if ctrl_mat is not None : medians = ctrl_mat . median ( axis = 1 ) median_devs = abs ( ctrl_mat . subtract ( medians , axis = 0 ) ) else : medians = mat . median ( axis = 1 ) median_devs = abs ( mat . subtract ( medians , axis = 0 ) ) sub = mat . subtract ( medians , axis = 'index' ) mads = median_devs . median ( axis = 1 ) mads = mads . clip ( lower = min_mad ) zscore_df = sub . divide ( mads * 1.4826 , axis = 'index' ) return zscore_df . round ( rounding_precision )
Robustly z - score a pandas df along the rows .
17,073
def parse ( file_path , convert_neg_666 = True , rid = None , cid = None , ridx = None , cidx = None , row_meta_only = False , col_meta_only = False , make_multiindex = False ) : if file_path . endswith ( ".gct" ) : out = parse_gct . parse ( file_path , convert_neg_666 = convert_neg_666 , rid = rid , cid = cid , ridx = ridx , cidx = cidx , row_meta_only = row_meta_only , col_meta_only = col_meta_only , make_multiindex = make_multiindex ) elif file_path . endswith ( ".gctx" ) : out = parse_gctx . parse ( file_path , convert_neg_666 = convert_neg_666 , rid = rid , cid = cid , ridx = ridx , cidx = cidx , row_meta_only = row_meta_only , col_meta_only = col_meta_only , make_multiindex = make_multiindex ) else : err_msg = "File to parse must be .gct or .gctx!" logger . error ( err_msg ) raise Exception ( err_msg ) return out
Identifies whether file_path corresponds to a . gct or . gctx file and calls the correct corresponding parse method .
17,074
def get_upper_triangle ( correlation_matrix ) : upper_triangle = correlation_matrix . where ( np . triu ( np . ones ( correlation_matrix . shape ) , k = 1 ) . astype ( np . bool ) ) upper_tri_df = upper_triangle . stack ( ) . reset_index ( level = 1 ) upper_tri_df . columns = [ 'rid' , 'corr' ] upper_tri_df . reset_index ( level = 0 , inplace = True ) upper_tri_df [ 'corr' ] = upper_tri_df [ 'corr' ] . clip ( lower = 0 ) return upper_tri_df . round ( rounding_precision )
Extract upper triangle from a square matrix . Negative values are set to 0 .
17,075
def calculate_weights ( correlation_matrix , min_wt ) : np . fill_diagonal ( correlation_matrix . values , np . nan ) correlation_matrix = correlation_matrix . clip ( lower = 0 ) raw_weights = correlation_matrix . mean ( axis = 1 ) raw_weights = raw_weights . clip ( lower = min_wt ) weights = raw_weights / sum ( raw_weights ) return raw_weights . round ( rounding_precision ) , weights . round ( rounding_precision )
Calculate a weight for each profile based on its correlation to other replicates . Negative correlations are clipped to 0 and weights are clipped to be min_wt at the least .
17,076
def agg_wt_avg ( mat , min_wt = 0.01 , corr_metric = 'spearman' ) : assert mat . shape [ 1 ] > 0 , "mat is empty! mat: {}" . format ( mat ) if mat . shape [ 1 ] == 1 : out_sig = mat upper_tri_df = None raw_weights = None weights = None else : assert corr_metric in [ "spearman" , "pearson" ] corr_mat = mat . corr ( method = corr_metric ) upper_tri_df = get_upper_triangle ( corr_mat ) raw_weights , weights = calculate_weights ( corr_mat , min_wt ) weighted_values = mat * weights out_sig = weighted_values . sum ( axis = 1 ) return out_sig , upper_tri_df , raw_weights , weights
Aggregate a set of replicate profiles into a single signature using a weighted average .
17,077
def get_file_list ( wildcard ) : files = glob . glob ( os . path . expanduser ( wildcard ) ) return files
Search for files to be concatenated . Currently very basic but could expand to be more sophisticated .
17,078
def hstack ( gctoos , remove_all_metadata_fields = False , error_report_file = None , fields_to_remove = [ ] , reset_ids = False ) : row_meta_dfs = [ ] col_meta_dfs = [ ] data_dfs = [ ] srcs = [ ] for g in gctoos : row_meta_dfs . append ( g . row_metadata_df ) col_meta_dfs . append ( g . col_metadata_df ) data_dfs . append ( g . data_df ) srcs . append ( g . src ) logger . debug ( "shapes of row_meta_dfs: {}" . format ( [ x . shape for x in row_meta_dfs ] ) ) all_row_metadata_df = assemble_common_meta ( row_meta_dfs , fields_to_remove , srcs , remove_all_metadata_fields , error_report_file ) all_col_metadata_df = assemble_concatenated_meta ( col_meta_dfs , remove_all_metadata_fields ) all_data_df = assemble_data ( data_dfs , "horiz" ) assert all_data_df . shape [ 0 ] == all_row_metadata_df . shape [ 0 ] , "Number of rows in metadata does not match number of rows in data - all_data_df.shape[0]: {} all_row_metadata_df.shape[0]: {}" . format ( all_data_df . shape [ 0 ] , all_row_metadata_df . shape [ 0 ] ) assert all_data_df . shape [ 1 ] == all_col_metadata_df . shape [ 0 ] , "Number of columns in data does not match number of columns metadata - all_data_df.shape[1]: {} all_col_metadata_df.shape[0]: {}" . format ( all_data_df . shape [ 1 ] , all_col_metadata_df . shape [ 0 ] ) if reset_ids : do_reset_ids ( all_col_metadata_df , all_data_df , "horiz" ) logger . info ( "Build GCToo of all..." ) concated = GCToo . GCToo ( row_metadata_df = all_row_metadata_df , col_metadata_df = all_col_metadata_df , data_df = all_data_df ) return concated
Horizontally concatenate gctoos .
17,079
def assemble_concatenated_meta ( concated_meta_dfs , remove_all_metadata_fields ) : if remove_all_metadata_fields : for df in concated_meta_dfs : df . drop ( df . columns , axis = 1 , inplace = True ) all_concated_meta_df = pd . concat ( concated_meta_dfs , axis = 0 ) n_rows = all_concated_meta_df . shape [ 0 ] logger . debug ( "all_concated_meta_df.shape[0]: {}" . format ( n_rows ) ) n_rows_cumulative = sum ( [ df . shape [ 0 ] for df in concated_meta_dfs ] ) assert n_rows == n_rows_cumulative all_concated_meta_df_sorted = all_concated_meta_df . sort_index ( axis = 0 ) . sort_index ( axis = 1 ) return all_concated_meta_df_sorted
Assemble the concatenated metadata dfs together . For example if horizontally concatenating the concatenated metadata dfs are the column metadata dfs . Both indices are sorted .
17,080
def assemble_data ( data_dfs , concat_direction ) : if concat_direction == "horiz" : all_data_df = pd . concat ( data_dfs , axis = 1 ) n_cols = all_data_df . shape [ 1 ] logger . debug ( "all_data_df.shape[1]: {}" . format ( n_cols ) ) n_cols_cumulative = sum ( [ df . shape [ 1 ] for df in data_dfs ] ) assert n_cols == n_cols_cumulative elif concat_direction == "vert" : all_data_df = pd . concat ( data_dfs , axis = 0 ) n_rows = all_data_df . shape [ 0 ] logger . debug ( "all_data_df.shape[0]: {}" . format ( n_rows ) ) n_rows_cumulative = sum ( [ df . shape [ 0 ] for df in data_dfs ] ) assert n_rows == n_rows_cumulative all_data_df_sorted = all_data_df . sort_index ( axis = 0 ) . sort_index ( axis = 1 ) return all_data_df_sorted
Assemble the data dfs together . Both indices are sorted .
17,081
def do_reset_ids ( concatenated_meta_df , data_df , concat_direction ) : if concat_direction == "horiz" : assert concatenated_meta_df . index . equals ( data_df . columns ) , ( "cids in concatenated_meta_df do not agree with cids in data_df." ) reset_ids_in_meta_df ( concatenated_meta_df ) data_df . columns = pd . Index ( concatenated_meta_df . index . values ) elif concat_direction == "vert" : assert concatenated_meta_df . index . equals ( data_df . index ) , ( "rids in concatenated_meta_df do not agree with rids in data_df." ) reset_ids_in_meta_df ( concatenated_meta_df ) data_df . index = pd . Index ( concatenated_meta_df . index . values )
Reset ids in concatenated metadata and data dfs to unique integers and save the old ids in a metadata column .
17,082
def reset_ids_in_meta_df ( meta_df ) : original_index_name = meta_df . index . name meta_df . index . name = "old_id" meta_df . reset_index ( inplace = True ) meta_df . index . name = original_index_name
Meta_df is modified inplace .
17,083
def subset_gctoo ( gctoo , row_bool = None , col_bool = None , rid = None , cid = None , ridx = None , cidx = None , exclude_rid = None , exclude_cid = None ) : assert sum ( [ ( rid is not None ) , ( row_bool is not None ) , ( ridx is not None ) ] ) <= 1 , ( "Only one of rid, row_bool, and ridx can be provided." ) assert sum ( [ ( cid is not None ) , ( col_bool is not None ) , ( cidx is not None ) ] ) <= 1 , ( "Only one of cid, col_bool, and cidx can be provided." ) rows_to_keep = get_rows_to_keep ( gctoo , rid , row_bool , ridx , exclude_rid ) cols_to_keep = get_cols_to_keep ( gctoo , cid , col_bool , cidx , exclude_cid ) rows_to_keep_bools = gctoo . data_df . index . isin ( rows_to_keep ) cols_to_keep_bools = gctoo . data_df . columns . isin ( cols_to_keep ) out_gctoo = GCToo . GCToo ( src = gctoo . src , version = gctoo . version , data_df = gctoo . data_df . loc [ rows_to_keep_bools , cols_to_keep_bools ] , row_metadata_df = gctoo . row_metadata_df . loc [ rows_to_keep_bools , : ] , col_metadata_df = gctoo . col_metadata_df . loc [ cols_to_keep_bools , : ] ) assert out_gctoo . data_df . size > 0 , "Subsetting yielded an empty gct!" logger . info ( ( "Initial GCToo with {} rows and {} columns subsetted down to " + "{} rows and {} columns." ) . format ( gctoo . data_df . shape [ 0 ] , gctoo . data_df . shape [ 1 ] , out_gctoo . data_df . shape [ 0 ] , out_gctoo . data_df . shape [ 1 ] ) ) return out_gctoo
Extract a subset of data from a GCToo object in a variety of ways . The order of rows and columns will be preserved .
17,084
def get_rows_to_keep ( gctoo , rid = None , row_bool = None , ridx = None , exclude_rid = None ) : if rid is not None : assert type ( rid ) == list , "rid must be a list. rid: {}" . format ( rid ) rows_to_keep = [ gctoo_row for gctoo_row in gctoo . data_df . index if gctoo_row in rid ] num_missing_rids = len ( rid ) - len ( rows_to_keep ) if num_missing_rids != 0 : logger . info ( "{} rids were not found in the GCT." . format ( num_missing_rids ) ) elif row_bool is not None : assert len ( row_bool ) == gctoo . data_df . shape [ 0 ] , ( "row_bool must have length equal to gctoo.data_df.shape[0]. " + "len(row_bool): {}, gctoo.data_df.shape[0]: {}" . format ( len ( row_bool ) , gctoo . data_df . shape [ 0 ] ) ) rows_to_keep = gctoo . data_df . index [ row_bool ] . values elif ridx is not None : assert type ( ridx [ 0 ] ) is int , ( "ridx must be a list of integers. ridx[0]: {}, " + "type(ridx[0]): {}" ) . format ( ridx [ 0 ] , type ( ridx [ 0 ] ) ) assert max ( ridx ) <= gctoo . data_df . shape [ 0 ] , ( "ridx contains an integer larger than the number of rows in " + "the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}" ) . format ( max ( ridx ) , gctoo . data_df . shape [ 0 ] ) rows_to_keep = gctoo . data_df . index [ ridx ] . values else : rows_to_keep = gctoo . data_df . index . values if exclude_rid is not None : rows_to_keep = [ row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid ] return rows_to_keep
Figure out based on the possible row inputs which rows to keep .
17,085
def get_cols_to_keep ( gctoo , cid = None , col_bool = None , cidx = None , exclude_cid = None ) : if cid is not None : assert type ( cid ) == list , "cid must be a list. cid: {}" . format ( cid ) cols_to_keep = [ gctoo_col for gctoo_col in gctoo . data_df . columns if gctoo_col in cid ] num_missing_cids = len ( cid ) - len ( cols_to_keep ) if num_missing_cids != 0 : logger . info ( "{} cids were not found in the GCT." . format ( num_missing_cids ) ) elif col_bool is not None : assert len ( col_bool ) == gctoo . data_df . shape [ 1 ] , ( "col_bool must have length equal to gctoo.data_df.shape[1]. " + "len(col_bool): {}, gctoo.data_df.shape[1]: {}" . format ( len ( col_bool ) , gctoo . data_df . shape [ 1 ] ) ) cols_to_keep = gctoo . data_df . columns [ col_bool ] . values elif cidx is not None : assert type ( cidx [ 0 ] ) is int , ( "cidx must be a list of integers. cidx[0]: {}, " + "type(cidx[0]): {}" ) . format ( cidx [ 0 ] , type ( cidx [ 0 ] ) ) assert max ( cidx ) <= gctoo . data_df . shape [ 1 ] , ( "cidx contains an integer larger than the number of columns in " + "the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}" ) . format ( max ( cidx ) , gctoo . data_df . shape [ 1 ] ) cols_to_keep = gctoo . data_df . columns [ cidx ] . values else : cols_to_keep = gctoo . data_df . columns . values if exclude_cid is not None : cols_to_keep = [ col_to_keep for col_to_keep in cols_to_keep if col_to_keep not in exclude_cid ] return cols_to_keep
Figure out based on the possible columns inputs which columns to keep .
17,086
def read ( in_path ) : assert os . path . exists ( in_path ) , "The following GRP file can't be found. in_path: {}" . format ( in_path ) with open ( in_path , "r" ) as f : lines = f . readlines ( ) grp = [ line . strip ( ) for line in lines if line and not re . match ( "^#" , line ) ] return grp
Read a grp file at the path specified by in_path .
17,087
def write ( grp , out_path ) : with open ( out_path , "w" ) as f : for x in grp : f . write ( str ( x ) + "\n" )
Write a GRP to a text file .
17,088
def make_specified_size_gctoo ( og_gctoo , num_entries , dim ) : assert dim in [ "row" , "col" ] , "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo . data_df . shape [ dim_index ] , ( "number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}" . format ( num_entries , dim , dim_index , og_gctoo . data_df . shape [ dim_index ] ) ) if dim == "col" : columns = [ x for x in og_gctoo . data_df . columns . values ] numpy . random . shuffle ( columns ) columns = columns [ 0 : num_entries ] rows = og_gctoo . data_df . index . values else : rows = [ x for x in og_gctoo . data_df . index . values ] numpy . random . shuffle ( rows ) rows = rows [ 0 : num_entries ] columns = og_gctoo . data_df . columns . values new_data_df = og_gctoo . data_df . loc [ rows , columns ] new_row_meta = og_gctoo . row_metadata_df . loc [ rows ] new_col_meta = og_gctoo . col_metadata_df . loc [ columns ] logger . debug ( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}" . format ( new_col_meta . shape , new_row_meta . shape ) ) new_gctoo = GCToo . GCToo ( data_df = new_data_df , row_metadata_df = new_row_meta , col_metadata_df = new_col_meta ) return new_gctoo
Subsets a GCToo instance along either rows or columns to obtain a specified size .
17,089
def write ( gctoo_object , out_file_name , convert_back_to_neg_666 = True , gzip_compression_level = 6 , max_chunk_kb = 1024 , matrix_dtype = numpy . float32 ) : gctx_out_name = add_gctx_to_out_name ( out_file_name ) hdf5_out = h5py . File ( gctx_out_name , "w" ) write_version ( hdf5_out ) write_src ( hdf5_out , gctoo_object , gctx_out_name ) elem_per_kb = calculate_elem_per_kb ( max_chunk_kb , matrix_dtype ) chunk_size = set_data_matrix_chunk_size ( gctoo_object . data_df . shape , max_chunk_kb , elem_per_kb ) hdf5_out . create_dataset ( data_matrix_node , data = gctoo_object . data_df . transpose ( ) . values , dtype = matrix_dtype ) write_metadata ( hdf5_out , "col" , gctoo_object . col_metadata_df , convert_back_to_neg_666 , gzip_compression = gzip_compression_level ) write_metadata ( hdf5_out , "row" , gctoo_object . row_metadata_df , convert_back_to_neg_666 , gzip_compression = gzip_compression_level ) hdf5_out . close ( )
Writes a GCToo instance to specified file .
17,090
def write_src ( hdf5_out , gctoo_object , out_file_name ) : if gctoo_object . src == None : hdf5_out . attrs [ src_attr ] = out_file_name else : hdf5_out . attrs [ src_attr ] = gctoo_object . src
Writes src as attribute of gctx out file .
17,091
def calculate_elem_per_kb ( max_chunk_kb , matrix_dtype ) : if matrix_dtype == numpy . float32 : return ( max_chunk_kb * 8 ) / 32 elif matrix_dtype == numpy . float64 : return ( max_chunk_kb * 8 ) / 64 else : msg = "Invalid matrix_dtype: {}; only numpy.float32 and numpy.float64 are currently supported" . format ( matrix_dtype ) logger . error ( msg ) raise Exception ( "write_gctx.calculate_elem_per_kb " + msg )
Calculates the number of elem per kb depending on the max chunk size set .
17,092
def set_data_matrix_chunk_size ( df_shape , max_chunk_kb , elem_per_kb ) : row_chunk_size = min ( df_shape [ 0 ] , 1000 ) col_chunk_size = min ( ( ( max_chunk_kb * elem_per_kb ) // row_chunk_size ) , df_shape [ 1 ] ) return ( row_chunk_size , col_chunk_size )
Sets chunk size to use for writing data matrix . Note . Calculation used here is for compatibility with cmapM and cmapR .
17,093
def convert ( self , form ) : if not is_lazy_user ( form . instance ) : raise NotLazyError ( 'You cannot convert a non-lazy user' ) user = form . save ( ) self . filter ( user = user ) . delete ( ) converted . send ( self , user = user ) return user
Convert a lazy user to a non - lazy one . The form passed in is expected to be a ModelForm instance bound to the user to be converted .
17,094
def generate_username ( self , user_class ) : m = getattr ( user_class , 'generate_username' , None ) if m : return m ( ) else : max_length = user_class . _meta . get_field ( self . username_field ) . max_length return uuid . uuid4 ( ) . hex [ : max_length ]
Generate a new username for a user
17,095
def is_lazy_user ( user ) : if user . is_anonymous : return False backend = getattr ( user , 'backend' , None ) if backend == 'lazysignup.backends.LazySignupBackend' : return True from lazysignup . models import LazyUser return bool ( LazyUser . objects . filter ( user = user ) . count ( ) > 0 )
Return True if the passed user is a lazy user .
17,096
def add ( queue_name , payload = None , content_type = None , source = None , task_id = None , build_id = None , release_id = None , run_id = None ) : if task_id : task = WorkQueue . query . filter_by ( task_id = task_id ) . first ( ) if task : return task . task_id else : task_id = uuid . uuid4 ( ) . hex if payload and not content_type and not isinstance ( payload , basestring ) : payload = json . dumps ( payload ) content_type = 'application/json' now = datetime . datetime . utcnow ( ) task = WorkQueue ( task_id = task_id , queue_name = queue_name , eta = now , source = source , build_id = build_id , release_id = release_id , run_id = run_id , payload = payload , content_type = content_type ) db . session . add ( task ) return task . task_id
Adds a work item to a queue .
17,097
def _task_to_dict ( task ) : payload = task . payload if payload and task . content_type == 'application/json' : payload = json . loads ( payload ) return dict ( task_id = task . task_id , queue_name = task . queue_name , eta = _datetime_to_epoch_seconds ( task . eta ) , source = task . source , created = _datetime_to_epoch_seconds ( task . created ) , lease_attempts = task . lease_attempts , last_lease = _datetime_to_epoch_seconds ( task . last_lease ) , payload = payload , content_type = task . content_type )
Converts a WorkQueue to a JSON - able dictionary .
17,098
def lease ( queue_name , owner , count = 1 , timeout_seconds = 60 ) : now = datetime . datetime . utcnow ( ) query = ( WorkQueue . query . filter_by ( queue_name = queue_name , status = WorkQueue . LIVE ) . filter ( WorkQueue . eta <= now ) . order_by ( WorkQueue . eta ) . with_lockmode ( 'update' ) . limit ( count ) ) task_list = query . all ( ) if not task_list : return None next_eta = now + datetime . timedelta ( seconds = timeout_seconds ) for task in task_list : task . eta = next_eta task . lease_attempts += 1 task . last_owner = owner task . last_lease = now task . heartbeat = None task . heartbeat_number = 0 db . session . add ( task ) return [ _task_to_dict ( task ) for task in task_list ]
Leases a work item from a queue usually the oldest task available .
17,099
def _get_task_with_policy ( queue_name , task_id , owner ) : now = datetime . datetime . utcnow ( ) task = ( WorkQueue . query . filter_by ( queue_name = queue_name , task_id = task_id ) . with_lockmode ( 'update' ) . first ( ) ) if not task : raise TaskDoesNotExistError ( 'task_id=%r' % task_id ) lease_delta = now - task . eta if lease_delta > datetime . timedelta ( 0 ) : db . session . rollback ( ) raise LeaseExpiredError ( 'queue=%r, task_id=%r expired %s' % ( task . queue_name , task_id , lease_delta ) ) if task . last_owner != owner : db . session . rollback ( ) raise NotOwnerError ( 'queue=%r, task_id=%r, owner=%r' % ( task . queue_name , task_id , task . last_owner ) ) return task
Fetches the specified task and enforces ownership policy .