idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
13,200
def get_experimental_ddg_values ( self , record , dataframe_record ) : new_idxs = [ ] for analysis_set in self . get_analysis_sets ( record ) : ddg_details = record [ 'DDG' ] [ analysis_set ] exp_ddg_fieldname = BenchmarkRun . get_analysis_set_fieldname ( 'Experimental' , analysis_set ) new_idxs . append ( exp_ddg_fieldname ) dataframe_record [ exp_ddg_fieldname ] = None if ddg_details : dataframe_record [ exp_ddg_fieldname ] = ddg_details [ 'MeanDDG' ] try : idx = self . csv_headers . index ( 'Experimental' ) self . csv_headers = self . csv_headers [ : idx ] + new_idxs + self . csv_headers [ idx + 1 : ] except ValueError , e : pass
Adds the mean experimental value associated with each analysis set to the dataframe row .
13,201
def compute_stability_classification ( self , predicted_data , record , dataframe_record ) : new_idxs = [ ] stability_classication_x_cutoff , stability_classication_y_cutoff = self . stability_classication_x_cutoff , self . stability_classication_y_cutoff for analysis_set in self . get_analysis_sets ( record ) : ddg_details = record [ 'DDG' ] [ analysis_set ] exp_ddg_fieldname = BenchmarkRun . get_analysis_set_fieldname ( 'Experimental' , analysis_set ) stability_classification_fieldname = BenchmarkRun . get_analysis_set_fieldname ( 'StabilityClassification' , analysis_set ) new_idxs . append ( stability_classification_fieldname ) dataframe_record [ stability_classification_fieldname ] = None if ddg_details : stability_classification = None if dataframe_record [ exp_ddg_fieldname ] != None : stability_classification = fraction_correct ( [ dataframe_record [ exp_ddg_fieldname ] ] , [ predicted_data [ self . ddg_analysis_type ] ] , x_cutoff = stability_classication_x_cutoff , y_cutoff = stability_classication_y_cutoff ) stability_classification = int ( stability_classification ) assert ( stability_classification == 0 or stability_classification == 1 ) dataframe_record [ stability_classification_fieldname ] = stability_classification try : idx = self . csv_headers . index ( 'StabilityClassification' ) self . csv_headers = self . csv_headers [ : idx ] + new_idxs + self . csv_headers [ idx + 1 : ] except ValueError , e : pass
Calculate the stability classification for the analysis cases . Must be called after get_experimental_ddg_values .
13,202
def compute_absolute_error ( self , predicted_data , record , dataframe_record ) : new_idxs = [ ] for analysis_set in self . get_analysis_sets ( record ) : ddg_details = record [ 'DDG' ] [ analysis_set ] exp_ddg_fieldname = BenchmarkRun . get_analysis_set_fieldname ( 'Experimental' , analysis_set ) absolute_error_fieldname = BenchmarkRun . get_analysis_set_fieldname ( 'AbsoluteError' , analysis_set ) new_idxs . append ( absolute_error_fieldname ) dataframe_record [ absolute_error_fieldname ] = None if ddg_details and predicted_data [ self . ddg_analysis_type ] != None : absolute_error = abs ( dataframe_record [ exp_ddg_fieldname ] - predicted_data [ self . ddg_analysis_type ] ) dataframe_record [ absolute_error_fieldname ] = absolute_error try : idx = self . csv_headers . index ( 'AbsoluteError' ) self . csv_headers = self . csv_headers [ : idx ] + new_idxs + self . csv_headers [ idx + 1 : ] except ValueError , e : pass
Calculate the absolute error for the analysis cases . Must be called after get_experimental_ddg_values .
13,203
def add_result ( self , values ) : idx = [ values [ 'host' ] ] for gid in self . key_gids [ 1 : ] : idx . append ( values [ gid ] ) idx = tuple ( idx ) try : self . results [ idx ] += 1 except KeyError : self . results [ idx ] = 1 self . _last_idx = idx
Add a tuple or increment the value of an existing one in the rule results dictionary .
13,204
def increase_last ( self , k ) : idx = self . _last_idx if idx is not None : self . results [ idx ] += k
Increase the last result by k .
13,205
def parse_rules ( self ) : try : rule_options = self . config . items ( 'rules' ) except configparser . NoSectionError : raise LogRaptorConfigError ( "the app %r has no defined rules!" % self . name ) rules = [ ] for option , value in rule_options : pattern = value . replace ( '\n' , '' ) if not self . args . filters : pattern = string . Template ( pattern ) . safe_substitute ( self . fields ) rules . append ( AppRule ( option , pattern , self . args ) ) continue for filter_group in self . args . filters : _pattern , filter_keys = exact_sub ( pattern , filter_group ) _pattern = string . Template ( _pattern ) . safe_substitute ( self . fields ) if len ( filter_keys ) >= len ( filter_group ) : rules . append ( AppRule ( option , _pattern , self . args , filter_keys ) ) elif self . _thread : rules . append ( AppRule ( option , _pattern , self . args ) ) return rules
Add a set of rules to the app dividing between filter and other rule set
13,206
def increase_last ( self , k ) : rule = self . _last_rule if rule is not None : rule . increase_last ( k )
Increase the counter of the last matched rule by k .
13,207
def get_sections_by_delegate_and_term ( person , term , future_terms = 0 , include_secondaries = True , transcriptable_course = 'yes' , delete_flag = [ 'active' ] ) : data = _get_sections_by_person_and_term ( person , term , "GradeSubmissionDelegate" , include_secondaries , future_terms , transcriptable_course , delete_flag ) return _json_to_sectionref ( data )
Returns a list of uw_sws . models . SectionReference objects for the passed grade submission delegate and term .
13,208
def get_sections_by_curriculum_and_term ( curriculum , term ) : url = "{}?{}" . format ( section_res_url_prefix , urlencode ( [ ( "curriculum_abbreviation" , curriculum . label , ) , ( "quarter" , term . quarter . lower ( ) , ) , ( "year" , term . year , ) , ] ) ) return _json_to_sectionref ( get_resource ( url ) )
Returns a list of uw_sws . models . SectionReference objects for the passed curriculum and term .
13,209
def get_sections_by_building_and_term ( building , term ) : url = "{}?{}" . format ( section_res_url_prefix , urlencode ( [ ( "quarter" , term . quarter . lower ( ) , ) , ( "facility_code" , building , ) , ( "year" , term . year , ) , ] ) ) return _json_to_sectionref ( get_resource ( url ) )
Returns a list of uw_sws . models . SectionReference objects for the passed building and term .
13,210
def _json_to_sectionref ( data ) : section_term = None sections = [ ] for section_data in data . get ( "Sections" , [ ] ) : if ( section_term is None or section_data [ "Year" ] != section_term . year or section_data [ "Quarter" ] != section_term . quarter ) : section_term = get_term_by_year_and_quarter ( section_data [ "Year" ] , section_data [ "Quarter" ] ) section = SectionReference ( term = section_term , curriculum_abbr = section_data [ "CurriculumAbbreviation" ] , course_number = section_data [ "CourseNumber" ] , section_id = section_data [ "SectionID" ] , url = section_data [ "Href" ] ) sections . append ( section ) return sections
Returns a list of SectionReference object created from the passed json data .
13,211
def get_section_by_url ( url , include_instructor_not_on_time_schedule = True ) : if not course_url_pattern . match ( url ) : raise InvalidSectionURL ( url ) return _json_to_section ( get_resource ( url ) , include_instructor_not_on_time_schedule = ( include_instructor_not_on_time_schedule ) )
Returns a uw_sws . models . Section object for the passed section url .
13,212
def get_section_by_label ( label , include_instructor_not_on_time_schedule = True ) : validate_section_label ( label ) url = "{}/{}.json" . format ( course_res_url_prefix , encode_section_label ( label ) ) return get_section_by_url ( url , include_instructor_not_on_time_schedule )
Returns a uw_sws . models . Section object for the passed section label .
13,213
def get_linked_sections ( section , include_instructor_not_on_time_schedule = True ) : linked_sections = [ ] for url in section . linked_section_urls : section = get_section_by_url ( url , include_instructor_not_on_time_schedule ) linked_sections . append ( section ) return linked_sections
Returns a list of uw_sws . models . Section objects representing linked sections for the passed section .
13,214
def get_joint_sections ( section , include_instructor_not_on_time_schedule = True ) : joint_sections = [ ] for url in section . joint_section_urls : section = get_section_by_url ( url , include_instructor_not_on_time_schedule ) joint_sections . append ( section ) return joint_sections
Returns a list of uw_sws . models . Section objects representing joint sections for the passed section .
13,215
def get_chain_details_by_related_pdb_chains ( self , pdb_id , chain_id , pfam_accs ) : if not pfam_accs : return None associated_pdb_chains = set ( ) pfam_api = self . get_pfam_api ( ) for pfam_acc in pfam_accs : associated_pdb_chains = associated_pdb_chains . union ( pfam_api . get_pdb_chains_from_pfam_accession_number ( pfam_acc ) ) hits = [ ] pfam_scop_mapping = { } for pdb_chain_pair in associated_pdb_chains : ass_pdb_id , ass_chain_id = pdb_chain_pair [ 0 ] , pdb_chain_pair [ 1 ] hit = self . get_chain_details ( ass_pdb_id , chain = ass_chain_id , internal_function_call = True , pfam_scop_mapping = pfam_scop_mapping ) if hit and hit . get ( 'chains' ) : assert ( len ( hit [ 'chains' ] ) == 1 ) hits . append ( hit [ 'chains' ] [ ass_chain_id ] ) allowed_scop_domains = map ( int , map ( set . intersection , pfam_scop_mapping . values ( ) ) [ 0 ] ) allowed_scop_domains = list ( set ( ( allowed_scop_domains or [ ] ) + ( self . get_sunid_for_pfam_accs ( pfam_accs ) or [ ] ) ) ) filtered_hits = [ ] print ( pfam_accs ) print ( allowed_scop_domains ) print ( '%d hits' % len ( hits ) ) for hit in hits : domains_to_ignore = [ ] for k , v in hit [ 'domains' ] . iteritems ( ) : if v [ 'sunid' ] in allowed_scop_domains : filtered_hits . append ( v ) print ( '%d filtered_hits' % len ( filtered_hits ) ) if not filtered_hits : return None d = self . get_basic_pdb_chain_information ( pdb_id , chain_id ) d . update ( self . get_common_fields ( filtered_hits ) ) d . update ( dict ( SCOPe_sources = 'Pfam + SCOPe' , SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id' , SCOPe_trust_level = 3 ) ) for k , v in sorted ( self . levels . iteritems ( ) ) : d [ v ] = None d . update ( dict ( self . get_common_hierarchy ( filtered_hits ) ) ) return d
Returns a dict of SCOPe details using info This returns Pfam - level information for a PDB chain i . e . no details on the protein species or domain will be returned . If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns pretty complete information .
13,216
def recall_service ( self , service ) : if not isinstance ( service , Service ) : raise TypeError ( "service must be of type Service." ) logger . warning ( "The deployment for {0} on {1} failed starting the rollback." . format ( service . alias , self . url . geturl ( ) ) ) def anonymous ( anonymous_service ) : if not isinstance ( anonymous_service , Service ) : raise TypeError ( "service must be an instance of Service." ) containers = self . find_previous_service_containers ( anonymous_service ) if containers : for name in list ( anonymous_service . containers . keys ( ) ) : del anonymous_service . containers [ name ] anonymous_service . cargo . delete ( ) for name , container in six . iteritems ( containers ) : if container . state ( ) . get ( 'running' ) : logger . info ( "is already running... Might want to investigate." , extra = { 'formatter' : 'container' , 'container' : container . name } ) else : if container . start ( ) : logger . info ( "is restarted and healthy." , extra = { 'formatter' : 'container' , 'container' : container . name } ) else : logger . error ( "failed to start." , extra = { 'formatter' : 'container' , 'container' : container . name } ) container . dump_logs ( ) raise Exception ( "The deployment for {0} on {1} went horribly wrong" . format ( container . name , self . url . geturl ( ) ) ) self . _service_map ( service , anonymous , descending = False )
This method assumes that its a roll back during a deployment . If not used during a deployment session
13,217
def clean_up_dangling_images ( self ) : cargoes = Image . all ( client = self . _client_session , filters = { 'dangling' : True } ) for id , cargo in six . iteritems ( cargoes ) : logger . info ( "Removing dangling image: {0}" . format ( id ) ) cargo . delete ( )
Clean up all dangling images .
13,218
def offload_all_service_containers ( self , service ) : def anonymous ( anonymous_service ) : if not isinstance ( anonymous_service , Service ) : raise TypeError ( "service must be an instance of Service." ) containers = self . find_service_containers ( anonymous_service ) if containers : logger . info ( "Deleting service: {0} containers." . format ( anonymous_service . name ) ) for container in six . itervalues ( containers ) : container . delete ( ) self . _service_map ( service , anonymous , descending = True )
Deletes all containers related to the service .
13,219
def _container_registration ( self , alias ) : containers = Container . find_by_name ( self . _client_session , alias ) def validate_name ( name ) : valid = True if name in containers : valid = False return valid count = 1 container_name = "{0}-0{1}" . format ( alias , count ) while not validate_name ( container_name ) : count += 1 container_index = count if count > 10 else "0{0}" . format ( count ) container_name = "{0}-{1}" . format ( alias , container_index ) return container_name
Check for an available name and return that to the caller .
13,220
def teardown_databases ( self , old_config , options ) : if len ( old_config ) > 1 : old_names , mirrors = old_config else : old_names = old_config for connection , old_name , destroy in old_names : if destroy : connection . creation . destroy_test_db ( old_name , options [ 'verbosity' ] )
Destroys all the non - mirror databases .
13,221
def oembed ( url , class_ = "" ) : o = "<a href=\"{url}\" class=\"oembed {class_}\" ></a>" . format ( url = url , class_ = class_ ) return Markup ( o )
Create OEmbed link
13,222
def img_src ( url , class_ = "" , responsive = False , lazy_load = False , id_ = "" ) : if not url . startswith ( "http://" ) and not url . startswith ( "https://" ) : url = static_url ( url ) data_src = "" if responsive : class_ += " responsive" if lazy_load : data_src = url url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNgYAAAAAMAASsJTYQAAAAASUVORK5CYII=" class_ += " lazy" img = "<img src=\"{src}\" class=\"{class_}\" id=\"{id_}\" data-src={data_src}>" . format ( src = url , class_ = class_ , id_ = id_ , data_src = data_src ) return Markup ( img )
Create an image src
13,223
def give_dots_yield ( R , r , r_ , resolution = 2 * PI / 1000 , spins = 50 ) : def x ( theta ) : return ( R - r ) * math . cos ( theta ) + r_ * math . cos ( ( R - r ) / r * theta ) def y ( theta ) : return ( R - r ) * math . sin ( theta ) - r_ * math . sin ( ( R - r ) / r * theta ) theta = 0.0 while theta < 2 * PI * spins : yield ( x ( theta ) , y ( theta ) ) theta += resolution
Generate Spirograph dots without numpy using yield .
13,224
def give_dots ( R , r , r_ , resolution = 2 * PI / 1000 , spins = 50 ) : thetas = np . arange ( 0 , 2 * PI * spins , resolution ) Rr = R - r x = Rr * np . cos ( thetas ) + r_ * np . cos ( Rr / r * thetas ) y = Rr * np . sin ( thetas ) - r_ * np . sin ( Rr / r * thetas ) return x , y
Generate Spirograph dots with numpy .
13,225
def spiro_image ( R , r , r_ , resolution = 2 * PI / 1000 , spins = 50 , size = [ 32 , 32 ] ) : x , y = give_dots ( 200 , r , r_ , spins = 20 ) xy = np . array ( [ x , y ] ) . T xy = np . array ( np . around ( xy ) , dtype = np . int64 ) xy = xy [ ( xy [ : , 0 ] >= - 250 ) & ( xy [ : , 1 ] >= - 250 ) & ( xy [ : , 0 ] < 250 ) & ( xy [ : , 1 ] < 250 ) ] xy = xy + 250 img = np . ones ( [ 500 , 500 ] , dtype = np . uint8 ) img [ : ] = 255 img [ xy [ : , 0 ] , xy [ : , 1 ] ] = 0 img = misc . imresize ( img , size ) fimg = img / 255.0 return fimg
Create image with given Spirograph parameters using numpy and scipy .
13,226
def html ( text , lazy_images = False ) : extensions = [ 'markdown.extensions.nl2br' , 'markdown.extensions.sane_lists' , 'markdown.extensions.toc' , 'markdown.extensions.tables' , OEmbedExtension ( ) ] if lazy_images : extensions . append ( LazyImageExtension ( ) ) return markdown . markdown ( text , extensions = extensions )
To render a markdown format text into HTML .
13,227
def run ( self , root ) : "Find all images and append to markdown.images. " self . markdown . images = [ ] for image in root . getiterator ( "img" ) : self . markdown . images . append ( image . attrib [ "src" ] )
Find all images and append to markdown . images .
13,228
def dict_map ( function , dictionary ) : return dict ( ( key , function ( value ) ) for key , value in dictionary . items ( ) )
dict_map is much like the built - in function map . It takes a dictionary and applys a function to the values of that dictionary returning a new dictionary with the mapped values in the original keys .
13,229
def sorted_items ( d , key = __identity , reverse = False ) : def pairkey_key ( item ) : return key ( item [ 0 ] ) return sorted ( d . items ( ) , key = pairkey_key , reverse = reverse )
Return the items of the dictionary sorted by the keys
13,230
def invert_map ( map ) : res = dict ( ( v , k ) for k , v in map . items ( ) ) if not len ( res ) == len ( map ) : raise ValueError ( 'Key conflict in inverted mapping' ) return res
Given a dictionary return another dictionary with keys and values switched . If any of the values resolve to the same key raises a ValueError .
13,231
def matching_key_for ( self , key ) : try : return next ( e_key for e_key in self . keys ( ) if e_key == key ) except StopIteration : raise KeyError ( key )
Given a key return the actual key stored in self that matches . Raise KeyError if the key isn t found .
13,232
def backup ( backup_filename = None ) : timestamp = time . strftime ( "%Y-%m-%d-%H-%M-%S" , time . gmtime ( ) ) if not backup_filename : if not os . path . isdir ( BACKUPS_PATH ) : print 'Need to create {}' . format ( BACKUPS_PATH ) os . makedirs ( BACKUPS_PATH , 0700 ) backup_filename = '{backups_path}/cozy-{timestamp}.tgz' . format ( backups_path = BACKUPS_PATH , timestamp = timestamp ) elif os . path . exists ( backup_filename ) : print 'Backup file already exists: {}' . format ( backup_filename ) return couchdb_path = _get_couchdb_path ( ) cmd = 'tar cvzf {backup_filename}' cmd += ' --exclude stack.token' cmd += ' --exclude couchdb.login' cmd += ' --exclude self-hosting.json' cmd += ' /etc/cozy /usr/local/var/cozy {couchdb_path}/cozy.couch' cmd = cmd . format ( backup_filename = backup_filename , couchdb_path = couchdb_path ) helpers . cmd_exec ( cmd , show_output = True ) print 'Backup file: {}' . format ( backup_filename )
Backup a Cozy
13,233
def get_config ( ) : from boiler . migrations . config import MigrationsConfig map = dict ( path = 'MIGRATIONS_PATH' , db_url = 'SQLALCHEMY_DATABASE_URI' , metadata = 'SQLAlchemy metadata' ) app = bootstrap . get_app ( ) params = dict ( ) params [ 'path' ] = app . config . get ( map [ 'path' ] , 'migrations' ) params [ 'db_url' ] = app . config . get ( map [ 'db_url' ] ) params [ 'metadata' ] = db . metadata for param , value in params . items ( ) : if not value : msg = 'Configuration error: [{}] is undefined' raise Exception ( msg . format ( map [ param ] ) ) config = MigrationsConfig ( ** params ) return config
Prepare and return alembic config These configurations used to live in alembic config initialiser but that just tight coupling . Ideally we should move that to userspace and find a way to pass these into alembic commands .
13,234
def init ( ) : try : config = get_config ( ) print ( config . dir ) alembic_command . init ( config , config . dir , 'project' ) except CommandError as e : click . echo ( red ( str ( e ) ) )
Initialize new migrations directory
13,235
def revision ( revision , path , branch_label , splice , head , sql , autogenerate , message ) : alembic_command . revision ( config = get_config ( ) , rev_id = revision , version_path = path , branch_label = branch_label , splice = splice , head = head , sql = sql , autogenerate = autogenerate , message = message )
Create new revision file
13,236
def merge ( revision , branch_label , message , list_revisions = '' ) : alembic_command . merge ( config = get_config ( ) , revisions = list_revisions , message = message , branch_label = branch_label , rev_id = revision )
Merge two revision together create new revision file
13,237
def up ( tag , sql , revision ) : alembic_command . upgrade ( config = get_config ( ) , revision = revision , sql = sql , tag = tag )
Upgrade to revision
13,238
def down ( tag , sql , revision ) : alembic_command . downgrade ( config = get_config ( ) , revision = revision , sql = sql , tag = tag )
Downgrade to revision
13,239
def history ( verbose , range ) : alembic_command . history ( config = get_config ( ) , rev_range = range , verbose = verbose )
List revision changesets chronologically
13,240
def heads ( resolve , verbose ) : alembic_command . heads ( config = get_config ( ) , verbose = verbose , resolve_dependencies = resolve )
Show available heads
13,241
def stamp ( revision , sql , tag ) : alembic_command . stamp ( config = get_config ( ) , revision = revision , sql = sql , tag = tag )
Stamp db to given revision without migrating
13,242
def get_or_create_in_transaction ( tsession , model , values , missing_columns = [ ] , variable_columns = [ ] , updatable_columns = [ ] , only_use_supplied_columns = False , read_only = False ) : values = copy . deepcopy ( values ) fieldnames = [ c . name for c in list ( sqlalchemy_inspect ( model ) . columns ) ] for c in missing_columns : fieldnames . remove ( c ) for c in updatable_columns : fieldnames . remove ( c ) for c in variable_columns : if c in fieldnames : fieldnames . remove ( c ) if only_use_supplied_columns : fieldnames = sorted ( set ( fieldnames ) . intersection ( set ( values . keys ( ) ) ) ) else : unexpected_fields = set ( values . keys ( ) ) . difference ( set ( fieldnames ) ) . difference ( set ( variable_columns ) ) . difference ( set ( updatable_columns ) ) if unexpected_fields : raise Exception ( "The fields '{0}' were passed but not found in the schema for table {1}." . format ( "', '" . join ( sorted ( unexpected_fields ) ) , model . __dict__ [ '__tablename__' ] ) ) pruned_values = { } for k in set ( values . keys ( ) ) . intersection ( set ( fieldnames ) ) : v = values [ k ] pruned_values [ k ] = v instance = tsession . query ( model ) . filter_by ( ** pruned_values ) if instance . count ( ) > 1 : raise Exception ( 'Multiple records were found with the search criteria.' ) instance = instance . first ( ) if instance : if read_only == False : for c in updatable_columns : setattr ( instance , c , values [ c ] ) tsession . flush ( ) return instance else : if read_only == False : if sorted ( pruned_values . keys ( ) ) != sorted ( fieldnames ) : raise Exception ( 'Some required fields are missing: {0}. Either supply these fields or add them to the missing_columns list.' . format ( set ( fieldnames ) . difference ( pruned_values . keys ( ) ) ) ) instance = model ( ** pruned_values ) tsession . add ( instance ) tsession . flush ( ) return instance return None
Uses the SQLAlchemy model to retrieve an existing record based on the supplied field values or if there is no existing record to create a new database record .
13,243
def get_or_create_in_transaction_wrapper ( tsession , model , values , missing_columns = [ ] , variable_columns = [ ] , updatable_columns = [ ] , only_use_supplied_columns = False , read_only = False ) : return get_or_create_in_transaction ( tsession , model , values , missing_columns = missing_columns , variable_columns = variable_columns , updatable_columns = updatable_columns , only_use_supplied_columns = only_use_supplied_columns , read_only = read_only )
This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API . Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies .
13,244
def get_weight ( self , rule ) : if not issubclass ( rule . __class__ , ( Rule , RuleLeaf ) ) : raise TypeError ( "Rule to get weight ({}) is not subclass " "of {} or {}." . format ( rule , Rule , RuleLeaf ) ) try : ind = self . _R . index ( rule ) return self . _W [ ind ] except : return None
Get weight for rule .
13,245
def evaluate ( self , artifact ) : r s = 0 w = 0.0 if len ( self . R ) == 0 : return 0.0 , None for i in range ( len ( self . R ) ) : s += self . R [ i ] ( artifact ) * self . W [ i ] w += abs ( self . W [ i ] ) if w == 0.0 : return 0.0 , None return s / w , None
r Evaluate artifact with agent s current rules and weights .
13,246
def _init_dates ( self ) : if self . total_transactions == 0 : return None self . epoch_start = Result . select ( Result . epoch ) . order_by ( Result . epoch . asc ( ) ) . limit ( 1 ) . get ( ) . epoch self . epoch_finish = Result . select ( Result . epoch ) . order_by ( Result . epoch . desc ( ) ) . limit ( 1 ) . get ( ) . epoch self . start_datetime = time . strftime ( '%Y-%m-%d %H:%M:%S' , time . localtime ( self . epoch_start ) ) self . finish_datetime = time . strftime ( '%Y-%m-%d %H:%M:%S' , time . localtime ( self . epoch_finish ) )
Initialize all dates properties
13,247
def _init_dataframes ( self ) : df = pd . read_sql_query ( "SELECT elapsed, epoch, scriptrun_time, custom_timers FROM result ORDER BY epoch ASC" , db . get_conn ( ) ) self . _get_all_timers ( df ) self . main_results = self . _get_processed_dataframe ( df ) for key , value in six . iteritems ( self . _timers_values ) : df = pd . DataFrame ( value , columns = [ 'epoch' , 'scriptrun_time' ] ) df . index = pd . to_datetime ( df [ 'epoch' ] , unit = 's' ) timer_results = self . _get_processed_dataframe ( df ) self . timers_results [ key ] = timer_results del self . _timers_values
Initialise the main dataframe for the results and the custom timers dataframes
13,248
def _get_all_timers ( self , dataframe ) : s = dataframe [ 'custom_timers' ] . apply ( json . loads ) s . index = dataframe [ 'epoch' ] for index , value in s . iteritems ( ) : if not value : continue for key , value in six . iteritems ( value ) : self . _timers_values [ key ] . append ( ( index , value ) ) self . total_timers += 1 del dataframe [ 'custom_timers' ] del s
Get all timers and set them in the _timers_values property
13,249
def _get_processed_dataframe ( self , dataframe ) : dataframe . index = pd . to_datetime ( dataframe [ 'epoch' ] , unit = 's' , utc = True ) del dataframe [ 'epoch' ] summary = dataframe . describe ( percentiles = [ .80 , .90 , .95 ] ) . transpose ( ) . loc [ 'scriptrun_time' ] df_grp = dataframe . groupby ( pd . TimeGrouper ( '{}S' . format ( self . interval ) ) ) df_final = df_grp . apply ( lambda x : x . describe ( percentiles = [ .80 , .90 , .95 ] ) [ 'scriptrun_time' ] ) return { "raw" : dataframe . round ( 2 ) , "compiled" : df_final . round ( 2 ) , "summary" : summary . round ( 2 ) }
Generate required dataframe for results from raw dataframe
13,250
def _init_turrets ( self ) : for turret in Turret . select ( ) : self . turrets . append ( turret . to_dict ( ) )
Setup data from database
13,251
def compile_results ( self ) : self . _init_dataframes ( ) self . total_transactions = len ( self . main_results [ 'raw' ] ) self . _init_dates ( )
Compile all results for the current test
13,252
def centroid ( X ) : C = np . sum ( X , axis = 0 ) / len ( X ) return C
Calculate the centroid from a matrix X
13,253
def is_valid ( self , instance ) : errors = self . errors ( instance ) if isinstance ( errors , list ) : return not any ( errors ) return not bool ( errors )
Return True if no errors are raised when validating instance .
13,254
def _validate ( self , data ) : errors = { } if not self . _enabled : return errors for field in self . validators : field_errors = [ ] for validator in self . validators [ field ] : try : validator ( data . get ( field , None ) ) except ValidationError as e : field_errors += e . messages if field_errors : errors [ field ] = ErrorList ( field_errors ) return errors
Helper to run validators on the field data .
13,255
def errors ( self , instance ) : if isinstance ( instance , dict ) : return self . _validate ( instance ) elif isinstance ( instance , forms . BaseForm ) : if instance . is_bound and instance . is_valid ( ) : return self . _validate ( instance . cleaned_data ) return self . _validate ( dict ( [ ( f , instance . initial . get ( f , instance [ f ] . value ( ) ) ) for f in self . validators ] ) ) elif isinstance ( instance , formsets . BaseFormSet ) : if instance . can_delete : validate_forms = [ form for form in instance . initial_forms if not instance . _should_delete_form ( form ) ] + [ form for form in instance . extra_forms if ( form . has_changed ( ) and not instance . _should_delete_form ( form ) ) ] return [ self . errors ( f ) for f in validate_forms ] else : validate_forms = instance . initial_forms + [ form for form in instance . extra_forms if form . has_changed ( ) ] return [ self . errors ( f ) for f in validate_forms ] elif isinstance ( instance , models . Model ) : return self . _validate ( dict ( [ ( f , getattr ( instance , f ) ) for f in self . validators ] ) )
Run all field validators and return a dict of errors .
13,256
def queryset_formatter ( queryset ) : return Markup ( base_list_formatter ( None , [ '<a href="{}">{}</a>' . format ( u . get_admin_url ( _external = True ) , u ) for u in queryset ] , ) )
This is used for custom detail fields returning a QuerySet of admin objects .
13,257
def qs_field ( model_class , field , filters = None , formatter = queryset_formatter , manager_name = 'objects' , ) : if filters is None : filters = { } def _ ( view , context , _model , name ) : filters [ field ] = _model manager = getattr ( model_class , manager_name ) return formatter ( manager ( ** filters ) ) return _
Show computed fields based on QuerySet s .
13,258
def _get_pdb_id ( self , elem , ** kwargs ) : id = elem . attrib [ 'ID' ] if self . restrict_to_transmembrane_proteins : tmp = elem . attrib [ 'TMP' ] assert ( tmp == 'no' or tmp == 'yes' or tmp == 'not' ) if tmp == 'yes' : self . ids [ id ] = PDBTM . _get_tm_type ( elem ) else : self . ids [ id ] = self . ids . get ( id , 0 ) + 1
If self . restrict_to_transmembrane_proteins is False then this adds all ids to self . ids . Otherwise only transmembrane protein ids are added .
13,259
def get_xml ( self , pdb_id ) : self . tmp_string = None context = etree . iterparse ( io . BytesIO ( self . xml_contents ) , events = ( 'end' , ) , tag = self . PDBTM_entry_tag_type ) try : fast_iter ( context , self . _get_xml , pdb_id = pdb_id . upper ( ) ) except EarlyOut : pass return self . tmp_string
Returns the XML for pdb_id if the tag exists .
13,260
def multimatch ( self , origin = None , rel = None , target = None , attrs = None , include_ids = False ) : raise NotImplementedError origin = origin if origin is None or isinstance ( origin , set ) else set ( [ origin ] ) rel = rel if rel is None or isinstance ( rel , set ) else set ( [ rel ] ) target = target if target is None or isinstance ( target , set ) else set ( [ target ] ) for index , curr_rel in enumerate ( self . _relationships ) : matches = True if origin and curr_rel [ ORIGIN ] not in origin : matches = False if rel and curr_rel [ RELATIONSHIP ] not in rel : matches = False if target and curr_rel [ TARGET ] not in target : matches = False if attrs : for k , v in attrs . items ( ) : if k not in curr_rel [ ATTRIBUTES ] or curr_rel [ ATTRIBUTES ] . get ( k ) != v : matches = False if matches : if include_ids : yield index , ( curr_rel [ 0 ] , curr_rel [ 1 ] , curr_rel [ 2 ] , curr_rel [ 3 ] . copy ( ) ) else : yield ( curr_rel [ 0 ] , curr_rel [ 1 ] , curr_rel [ 2 ] , curr_rel [ 3 ] . copy ( ) ) return
Iterator over relationship IDs that match a pattern of components with multiple options provided for each component
13,261
def add ( self , origin , rel , target , attrs = None ) : if not origin : raise ValueError ( 'Relationship origin cannot be null' ) if not rel : raise ValueError ( 'Relationship ID cannot be null' ) attrs = attrs or { } origin_item = self . _db_coll . find_one ( { 'origin' : origin } ) rel = self . _abbreviate ( rel ) target = self . _abbreviate ( target ) rel_info = { 'rid' : rel , 'instances' : [ [ target , attrs ] ] } if origin_item is None : self . _db_coll . insert_one ( { 'origin' : origin , 'rels' : [ rel_info ] , } ) else : origin_item [ 'rels' ] . append ( rel_info ) self . _db_coll . replace_one ( { 'origin' : origin } , origin_item ) return
Add one relationship to the model
13,262
def remove ( self , index ) : raise NotImplementedError if hasattr ( index , '__iter__' ) : ind = set ( index ) else : ind = [ index ] self . _relationships = [ r for i , r in enumerate ( self . _relationships ) if i not in ind ]
Delete one or more relationship by index from the extent
13,263
async def act ( self ) : self . age += 1 self . added_last = False self . learn_from_domain ( method = self . env_learning_method , amount = self . env_learning_amount ) artifact = self . invent ( self . search_width ) args = artifact . framings [ self . name ] [ 'args' ] val = artifact . evals [ self . name ] self . _log ( logging . DEBUG , "Created spirograph with args={}, val={}" . format ( args , val ) ) self . spiro_args = args self . arg_history . append ( self . spiro_args ) self . add_artifact ( artifact ) if val >= self . _own_threshold : artifact . self_criticism = 'pass' self . learn ( artifact , self . teaching_iterations ) self . add_candidate ( artifact ) self . added_last = True elif self . jump == 'random' : largs = self . spiro_args self . spiro_args = np . random . uniform ( - 199 , 199 , self . spiro_args . shape ) self . _log ( logging . DEBUG , "Jumped from {} to {}" . format ( largs , self . spiro_args ) ) self . save_images ( artifact )
Agent s main method to create new spirographs .
13,264
def learn_from_domain ( self , method = 'random' , amount = 10 ) : if method == 'none' : return arts = self . env . artifacts if len ( arts ) == 0 : return if 'random' in method : samples = min ( len ( arts ) , amount ) ars = np . random . choice ( arts , samples , replace = False ) for a in ars : self . learn ( a , self . teaching_iterations ) if 'closest' in method : ars = arts dists = [ ] for a in ars : args = a . framings [ a . creator ] [ 'args' ] d = np . sqrt ( np . sum ( np . square ( args - self . spiro_args ) ) ) dists . append ( ( d , a ) ) dists . sort ( key = operator . itemgetter ( 0 ) ) for d , a in dists [ : amount ] : self . learn ( a , self . teaching_iterations )
Learn SOM from artifacts introduced to the environment .
13,265
def plot_distances ( self , mean_dist , distances , indeces ) : from matplotlib import pyplot as plt x = np . arange ( len ( distances ) ) y = [ mean_dist for i in x ] fig , ax = plt . subplots ( ) data_line = ax . plot ( indeces , distances , label = 'Min Distance to previous' , marker = '.' , color = 'black' , linestyle = "" ) mean_line = ax . plot ( indeces , y , label = 'Mean' , linestyle = '--' , color = 'green' ) if len ( distances ) > 0 : z = np . poly1d ( np . polyfit ( x , distances , 2 ) ) f = [ z ( i ) for i in x ] mean_line = ax . plot ( indeces , f , label = 'Fitted' , linestyle = '-' , color = 'red' ) legend = ax . legend ( loc = 'upper right' , prop = { 'size' : 8 } ) agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN" . format ( self . sanitized_name ( ) , self . age , self . env_learning_method , self . env_learning_amount , self . env_learn_on_add , self . stmem . length , self . _novelty_threshold , self . _own_threshold , self . jump , self . search_width , self . move_radius ) ax . set_title ( "{} min distances: env_learn={} {}" . format ( self . name , self . env_learning_method , self . env_learning_amount ) ) ax . set_ylabel ( 'min distance to preceding artifact' ) ax . set_xlabel ( 'iteration' ) if self . logger is not None : imname = os . path . join ( self . logger . folder , '{}_dists.png' . format ( agent_vars ) ) plt . savefig ( imname ) plt . close ( ) else : plt . show ( )
Plot distances of the generated spirographs w . r . t . the previously generated spirogaphs .
13,266
def get_qtls_from_mapqtl_data ( matrix , threshold , inputfile ) : trait_name = inputfile . split ( ')_' , 1 ) [ 1 ] . split ( '.mqo' ) [ 0 ] qtls = [ ] qtl = None for entry in matrix [ 1 : ] : if qtl is None : qtl = entry if qtl [ 1 ] != entry [ 1 ] : if float ( qtl [ 4 ] ) > float ( threshold ) : qtl [ 0 ] = trait_name qtls . append ( qtl ) qtl = entry if entry [ 4 ] == '' : entry [ 4 ] = 0 if qtl [ 4 ] == '' : qtl [ 4 ] = 0 if float ( entry [ 4 ] ) > float ( qtl [ 4 ] ) : qtl = entry if float ( qtl [ 4 ] ) > float ( threshold ) : qtl [ 0 ] = trait_name if qtl not in qtls : qtls . append ( qtl ) return qtls
Extract the QTLs found by MapQTL reading its file . This assume that there is only one QTL per linkage group .
13,267
def get_files ( cls , folder , session_id = '' ) : filelist = [ ] if folder is None or not os . path . isdir ( folder ) : return filelist if session_id is None : session_id = '' for root , dirs , files in os . walk ( folder ) : for filename in files : if filename . startswith ( 'Session %s' % session_id ) and filename . endswith ( '.mqo' ) : filename = os . path . join ( root , filename ) filelist . append ( filename ) return filelist
Retrieve the list of files the plugin can work on . Find this list based on the files name files extension or even actually by reading in the file . If a session identifier is specified it will restrict the list of files returned to those with this session identifier in their name .
13,268
def get_session_identifiers ( cls , folder = None , inputfile = None ) : sessions = [ ] if folder is None or not os . path . isdir ( folder ) : return sessions for root , dirs , files in os . walk ( folder ) : for filename in files : if filename . startswith ( 'Session ' ) and filename . endswith ( '.mqo' ) : session = filename . split ( ) [ 1 ] if session not in sessions : sessions . append ( session ) return sessions
Retrieve the list of session identifiers contained in the data on the folder .
13,269
def parse_range ( s , range_separator = '-' ) : return reduce ( lambda x , y : x + y , ( map ( lambda r : ( range ( int ( r . split ( range_separator ) [ 0 ] ) , int ( r . split ( range_separator ) [ 1 ] ) + 1 ) ) if range_separator in r else [ int ( r ) ] , s . split ( ',' ) ) ) )
Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s . Written by Laurens Kraal 2014 .
13,270
def merge_range_pairs ( prs ) : new_prs = [ ] sprs = [ sorted ( p ) for p in prs ] sprs = sorted ( sprs ) merged = False x = 0 while x < len ( sprs ) : newx = x + 1 new_pair = list ( sprs [ x ] ) for y in range ( x + 1 , len ( sprs ) ) : if new_pair [ 0 ] <= sprs [ y ] [ 0 ] - 1 <= new_pair [ 1 ] : new_pair [ 0 ] = min ( new_pair [ 0 ] , sprs [ y ] [ 0 ] ) new_pair [ 1 ] = max ( new_pair [ 1 ] , sprs [ y ] [ 1 ] ) newx = y + 1 if new_pair not in new_prs : new_prs . append ( new_pair ) x = newx return new_prs
Takes in a list of pairs specifying ranges and returns a sorted list of merged sorted ranges .
13,271
def split_pdb_residue ( s ) : if s . isdigit ( ) : return ( int ( s ) , ' ' ) else : assert ( s [ : - 1 ] . isdigit ( ) ) return ( ( s [ : - 1 ] , s [ - 1 ] ) )
Splits a PDB residue into the numeric and insertion code components .
13,272
def do_chunked_gzip ( infh , outfh , filename ) : import gzip gzfh = gzip . GzipFile ( 'rawlogs' , mode = 'wb' , fileobj = outfh ) if infh . closed : infh = open ( infh . name , 'r' ) else : infh . seek ( 0 ) readsize = 0 sys . stdout . write ( 'Gzipping {0}: ' . format ( filename ) ) if os . stat ( infh . name ) . st_size : infh . seek ( 0 ) progressbar = ProgressBar ( sys . stdout , os . stat ( infh . name ) . st_size , "bytes gzipped" ) while True : chunk = infh . read ( GZIP_CHUNK_SIZE ) if not chunk : break if sys . version_info [ 0 ] >= 3 : gzfh . write ( bytes ( chunk , "utf-8" ) ) else : gzfh . write ( chunk ) readsize += len ( chunk ) progressbar . redraw ( readsize ) gzfh . close ( )
A memory - friendly way of compressing the data .
13,273
def mail_message ( smtp_server , message , from_address , rcpt_addresses ) : if smtp_server [ 0 ] == '/' : p = os . popen ( smtp_server , 'w' ) p . write ( message ) p . close ( ) else : import smtplib server = smtplib . SMTP ( smtp_server ) server . sendmail ( from_address , rcpt_addresses , message ) server . quit ( )
Send mail using smtp .
13,274
def get_value_unit ( value , unit , prefix ) : prefixes = ( '' , 'K' , 'M' , 'G' , 'T' ) if len ( unit ) : if unit [ : 1 ] in prefixes : valprefix = unit [ 0 ] unit = unit [ 1 : ] else : valprefix = '' else : valprefix = '' while valprefix != prefix : uidx = prefixes . index ( valprefix ) if uidx > prefixes . index ( prefix ) : value *= 1024 valprefix = prefixes [ uidx - 1 ] else : if value < 10240 : return value , '{0}{1}' . format ( valprefix , unit ) value = int ( round ( value / 1024.0 ) ) valprefix = prefixes [ uidx + 1 ] return value , '{0}{1}' . format ( valprefix , unit )
Return a human - readable value with unit specification . Try to transform the unit prefix to the one passed as parameter . When transform to higher prefix apply nearest integer round .
13,275
def get_fmt_results ( results , limit = 5 , sep = '::' , fmt = None ) : result_list = [ ] for key in sorted ( results , key = lambda x : results [ x ] , reverse = True ) : if len ( result_list ) >= limit and results [ key ] <= 1 : break if fmt is not None : fmtkey = [ ] for i in range ( len ( key ) ) : if i % 2 == 1 : fmtkey . append ( fmt . format ( key [ i ] ) ) else : fmtkey . append ( key [ i ] ) result_list . append ( u'{0}({1})' . format ( sep . join ( fmtkey ) , results [ key ] ) ) else : result_list . append ( u'{0}({1})' . format ( sep . join ( key ) , results [ key ] ) ) else : return result_list if fmt is not None : result_list . append ( fmt . format ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) ) ) else : result_list . append ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) ) return result_list
Return a list of formatted strings representation on a result dictionary . The elements of the key are divided by a separator string . The result is appended after the key between parentheses . Apply a format transformation to odd elements of the key if a fmt parameter is passed .
13,276
def safe_expand ( template , mapping ) : for _ in range ( len ( mapping ) + 1 ) : _template = template template = string . Template ( template ) . safe_substitute ( mapping ) if template == _template : return template else : raise ValueError ( "circular mapping provided!" )
Safe string template expansion . Raises an error if the provided substitution mapping has circularities .
13,277
def protected_property ( func ) : if func . __name__ . startswith ( '_' ) : raise ValueError ( "%r: Cannot decorate a protected method!" % func ) @ property @ wraps ( func ) def proxy_wrapper ( self ) : try : return getattr ( self , '_%s' % func . __name__ ) except AttributeError : pass return func ( self ) return proxy_wrapper
Class method decorator that creates a property that returns the protected attribute or the value returned by the wrapped method if the protected attribute is not defined .
13,278
def open_resource ( source ) : try : return open ( source , mode = 'rb' ) except ( IOError , OSError ) as err : try : resource = urlopen ( source ) except ValueError : pass else : resource . name = resource . url if hasattr ( resource , '__enter__' ) : return resource else : return closing ( resource ) raise err except TypeError : if hasattr ( source , 'read' ) and hasattr ( source , 'readlines' ) : return source raise
Opens a resource in binary reading mode . Wraps the resource with a context manager when it doesn t have one .
13,279
def load ( stream , fmt = 'lha' ) : if fmt == 'lha' : return pylha . load ( stream ) elif fmt == 'json' : if isinstance ( stream , str ) : return json . loads ( stream ) else : return json . load ( stream ) elif fmt == 'yaml' : return yaml . load ( stream )
Load a parameter file in DSixTools SLHA - like format or its JSON or YAML representation .
13,280
def sm_lha2dict ( lha ) : d = OrderedDict ( ) v = dict ( lha [ 'BLOCK' ] [ 'GAUGE' ] [ 'values' ] ) d [ 'g' ] = v [ 1 ] d [ 'gp' ] = v [ 2 ] d [ 'gs' ] = v [ 3 ] v = dict ( lha [ 'BLOCK' ] [ 'SCALAR' ] [ 'values' ] ) d [ 'Lambda' ] = v [ 1 ] d [ 'm2' ] = v [ 2 ] d [ 'Gu' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GU' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGU' in lha [ 'BLOCK' ] : d [ 'Gu' ] = d [ 'Gu' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGU' ] [ 'values' ] , ( 3 , 3 ) ) d [ 'Gd' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GD' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGD' in lha [ 'BLOCK' ] : d [ 'Gd' ] = d [ 'Gd' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGD' ] [ 'values' ] , ( 3 , 3 ) ) d [ 'Ge' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GE' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGE' in lha [ 'BLOCK' ] : d [ 'Ge' ] = d [ 'Ge' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGE' ] [ 'values' ] , ( 3 , 3 ) ) if 'THETA' in lha [ 'BLOCK' ] : v = dict ( lha [ 'BLOCK' ] [ 'THETA' ] [ 'values' ] ) d [ 'Theta' ] = v . get ( 1 , 0 ) d [ 'Thetap' ] = v . get ( 2 , 0 ) d [ 'Thetas' ] = v . get ( 3 , 0 ) else : d [ 'Theta' ] = 0 d [ 'Thetap' ] = 0 d [ 'Thetas' ] = 0 return d
Convert a dictionary returned by pylha from a DSixTools SM input file into a dictionary of SM values .
13,281
def sm_dict2lha ( d ) : blocks = OrderedDict ( [ ( 'GAUGE' , { 'values' : [ [ 1 , d [ 'g' ] . real ] , [ 2 , d [ 'gp' ] . real ] , [ 3 , d [ 'gs' ] . real ] ] } ) , ( 'SCALAR' , { 'values' : [ [ 1 , d [ 'Lambda' ] . real ] , [ 2 , d [ 'm2' ] . real ] ] } ) , ( 'GU' , { 'values' : matrix2lha ( d [ 'Gu' ] . real ) } ) , ( 'IMGU' , { 'values' : matrix2lha ( d [ 'Gu' ] . imag ) } ) , ( 'GD' , { 'values' : matrix2lha ( d [ 'Gd' ] . real ) } ) , ( 'IMGD' , { 'values' : matrix2lha ( d [ 'Gd' ] . imag ) } ) , ( 'GE' , { 'values' : matrix2lha ( d [ 'Ge' ] . real ) } ) , ( 'IMGE' , { 'values' : matrix2lha ( d [ 'Ge' ] . imag ) } ) , ( 'THETA' , { 'values' : [ [ 1 , d [ 'Theta' ] . real ] , [ 2 , d [ 'Thetap' ] . real ] , [ 3 , d [ 'Thetas' ] . real ] ] } ) , ] ) return { 'BLOCK' : blocks }
Convert a a dictionary of SM parameters into a dictionary that pylha can convert into a DSixTools SM output file .
13,282
def wc_lha2dict ( lha ) : C = OrderedDict ( ) for k , ( block , i ) in WC_dict_0f . items ( ) : try : C [ k ] = dict ( lha [ 'BLOCK' ] [ block ] [ 'values' ] ) [ i ] except KeyError : C [ k ] = 0 for k in definitions . WC_keys_2f : try : C [ k ] = lha2matrix ( lha [ 'BLOCK' ] [ 'WC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 ) ) . real except KeyError : C [ k ] = np . zeros ( ( 3 , 3 ) ) try : C [ k ] = C [ k ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMWC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 ) ) except KeyError : pass for k in definitions . WC_keys_4f : try : C [ k ] = lha2matrix ( lha [ 'BLOCK' ] [ 'WC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 , 3 , 3 ) ) except KeyError : C [ k ] = np . zeros ( ( 3 , 3 , 3 , 3 ) ) try : C [ k ] = C [ k ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMWC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 , 3 , 3 ) ) except KeyError : pass return C
Convert a dictionary returned by pylha from a DSixTools WC input file into a dictionary of Wilson coefficients .
13,283
def compute_rmsd_by_matrix ( dataframe_1 , dataframe_2 , use_assertion = False ) : if use_assertion : assert ( [ i for i in dataframe_1 . index ] == [ i for i in dataframe_2 . index ] ) num_points = dataframe_1 . shape [ 0 ] return numpy . linalg . norm ( dataframe_1 - dataframe_2 ) / numpy . sqrt ( num_points )
Computes the RMSD of two pandas dataframes . The dataframes are expected to be of equal dimensions and use_assertion can be set to assert that the row indices match .
13,284
def jinja_extensions_feature ( app ) : app . jinja_env . globals [ 'momentjs' ] = MomentJsFilters app . jinja_env . filters . update ( MomentJsFilters ( ) . get_filters ( ) ) app . jinja_env . filters . update ( DateFilters ( ) . get_filters ( ) ) app . jinja_env . filters . update ( HumanizeFilters ( ) . get_filters ( ) ) app . jinja_env . globals . update ( dict ( asset = functions . asset , dev_proxy = functions . dev_proxy ) )
Enables custom templating extensions
13,285
def log ( msg , level = 0 ) : red = '\033[91m' endc = '\033[0m' cfg = { 'version' : 1 , 'disable_existing_loggers' : False , 'formatters' : { 'stdout' : { 'format' : '[%(levelname)s]: %(asctime)s - %(message)s' , 'datefmt' : '%x %X' } , 'stderr' : { 'format' : red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc , 'datefmt' : '%x %X' } } , 'handlers' : { 'stdout' : { 'class' : 'logging.StreamHandler' , 'level' : 'DEBUG' , 'formatter' : 'stdout' } , 'stderr' : { 'class' : 'logging.StreamHandler' , 'level' : 'ERROR' , 'formatter' : 'stderr' } } , 'loggers' : { 'info' : { 'handlers' : [ 'stdout' ] , 'level' : 'INFO' , 'propagate' : True } , 'error' : { 'handlers' : [ 'stderr' ] , 'level' : 'ERROR' , 'propagate' : False } } } dictConfig ( cfg ) lg = 'info' if level == 0 else 'error' lvl = 20 if level == 0 else 40 logger = logging . getLogger ( lg ) logger . log ( lvl , msg )
Logs a message to the console with optional level paramater
13,286
def insert ( self , part ) : params = { k : str ( v ) for k , v in part . params . items ( ) } res = c . create_assembly_instance ( self . uri . as_dict ( ) , part . uri . as_dict ( ) , params ) return res
Insert a part into this assembly .
13,287
def parse_http_scheme ( uri ) : regex = re . compile ( r'^(?:http)s?://' , flags = re . IGNORECASE ) match = regex . match ( uri ) return match . group ( 0 ) if match else 'http://'
match on http scheme if no match is found will assume http
13,288
def parse_stream ( response ) : stream_data = [ ] stream = stdout for data in response : if data : try : data = data . decode ( 'utf-8' ) except AttributeError as e : logger . exception ( "Unable to parse stream, Attribute Error Raised: {0}" . format ( e ) ) stream . write ( data ) continue try : normalized_data = normalize_keys ( json . loads ( data ) ) except ValueError : stream . write ( data ) continue except TypeError : stream . write ( data ) continue if 'progress' in normalized_data : stream_data . append ( normalized_data ) _display_progress ( normalized_data , stream ) elif 'error' in normalized_data : _display_error ( normalized_data , stream ) elif 'status' in normalized_data : stream_data . append ( normalized_data ) _display_status ( normalized_data , stream ) elif 'stream' in normalized_data : stream_data . append ( normalized_data ) _display_stream ( normalized_data , stream ) else : stream . write ( data ) stream . flush ( ) return stream_data
take stream from docker - py lib and display it to the user .
13,289
def normalize_keys ( suspect , snake_case = True ) : if not isinstance ( suspect , dict ) : raise TypeError ( 'you must pass a dict.' ) for key in list ( suspect ) : if not isinstance ( key , six . string_types ) : continue if snake_case : s1 = first_cap_re . sub ( r'\1_\2' , key ) new_key = all_cap_re . sub ( r'\1_\2' , s1 ) . lower ( ) else : new_key = key . lower ( ) value = suspect . pop ( key ) if isinstance ( value , dict ) : suspect [ new_key ] = normalize_keys ( value , snake_case ) elif isinstance ( value , list ) : for i in range ( 0 , len ( value ) ) : if isinstance ( value [ i ] , dict ) : normalize_keys ( value [ i ] , snake_case ) suspect [ new_key ] = value else : suspect [ new_key ] = value return suspect
take a dict and turn all of its type string keys into snake_case
13,290
def _display_status ( normalized_data , stream ) : if 'Pull complete' in normalized_data [ 'status' ] or 'Download complete' in normalized_data [ 'status' ] : stream . write ( "\n" ) if 'id' in normalized_data : stream . write ( "%s - " % normalized_data [ 'id' ] ) stream . write ( "{0}\n" . format ( normalized_data [ 'status' ] ) )
print status message from docker - py stream .
13,291
def _display_stream ( normalized_data , stream ) : try : stream . write ( normalized_data [ 'stream' ] ) except UnicodeEncodeError : stream . write ( normalized_data [ 'stream' ] . encode ( "utf-8" ) )
print stream message from docker - py stream .
13,292
def version ( self ) -> str : output , _ = self . _execute ( 'version' ) return output . splitlines ( ) [ 0 ] . split ( ) [ - 1 ]
Show the version number of Android Debug Bridge .
13,293
def get_state ( self ) -> str : output , error = self . _execute ( 'get-state' ) if error : raise DeviceConnectionException ( error . split ( ':' , 1 ) [ - 1 ] . strip ( ) ) return output . strip ( )
offline | bootloader | device
13,294
def acme_init ( ) : acme_private_key = ACME_PRIVATE_KEY acme_intermediate_cert = ACME_INTERMEDIATE_CERT acme_intermediate_cert_url = ACME_INTERMEDIATE_CERT_URL if not os . path . isfile ( acme_private_key ) : print 'Create {}' . format ( acme_private_key ) cmd = 'openssl genrsa 4096 > {acme_private_key}' . format ( acme_private_key = acme_private_key ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) helpers . file_rights ( acme_private_key , mode = 0444 , uid = 0 , gid = 0 ) else : print 'Already exist: {}' . format ( acme_private_key ) if not os . path . isfile ( acme_intermediate_cert ) : print 'Create {}' . format ( acme_intermediate_cert ) cmd = 'wget -O - {acme_intermediate_cert_url} > {acme_intermediate_cert}' cmd = cmd . format ( acme_intermediate_cert_url = acme_intermediate_cert_url , acme_intermediate_cert = acme_intermediate_cert ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) helpers . file_rights ( acme_intermediate_cert , mode = 0444 , uid = 0 , gid = 0 ) else : print 'Already exist: {}' . format ( acme_intermediate_cert )
Init acme key
13,295
def acme_sign_certificate ( common_name , size = DEFAULT_KEY_SIZE ) : private_key_path = '{}/{}.key' . format ( CERTIFICATES_PATH , common_name ) certificate_path = '{}/{}.crt' . format ( CERTIFICATES_PATH , common_name ) certificate_request_path = '{}/{}.csr' . format ( CERTIFICATES_PATH , common_name ) signed_cert = '{certificates_path}/{common_name}-signed.crt' . format ( certificates_path = CERTIFICATES_PATH , common_name = common_name ) generate_certificate ( common_name , size ) cmd = 'openssl req -new -sha256 -key {private_key_path}' cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}' cmd = cmd . format ( private_key_path = private_key_path , common_name = common_name , certificate_request_path = certificate_request_path ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) _internal_sign_certificate ( certificate_path , certificate_request_path , signed_cert ) cron = "/etc/cron.monthly/acme-renew" if not os . path . exists ( cron ) : with open ( cron , "w" ) as file : file . write ( "#!/bin/bash\ncozy_management renew_certificates\n" ) st = os . stat ( cron ) os . chmod ( cron , st . st_mode | S_IXUSR )
Sign certificate with acme_tiny for let s encrypt
13,296
def acme_renew_certificates ( ) : for csr in glob ( os . path . join ( CERTIFICATES_PATH , '*.csr' ) ) : common_name = os . path . basename ( csr ) common_name = os . path . splitext ( common_name ) [ 0 ] certificate_path = "{}.crt" . format ( common_name ) certificate_path = os . path . join ( CERTIFICATES_PATH , certificate_path ) with open ( certificate_path ) as file : crt = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , file . read ( ) ) expiration = crt . get_notAfter ( ) expiration = _parse_asn1_generalized_date ( expiration ) remaining = expiration - datetime . utcnow ( ) if remaining > timedelta ( days = 30 ) : print "No need to renew {} ({})" . format ( certificate_path , remaining ) continue print "Renewing {} ({})" . format ( certificate_path , remaining ) certificate_request_path = "{}.csr" . format ( common_name ) certificate_request_path = os . path . join ( CERTIFICATES_PATH , certificate_request_path ) signed_cert = "{}-signed.crt" . format ( common_name ) signed_cert = os . path . join ( CERTIFICATES_PATH , signed_cert ) _internal_sign_certificate ( certificate_path , certificate_request_path , signed_cert )
Renew certificates with acme_tiny for let s encrypt
13,297
def get_crt_common_name ( certificate_path = OLD_CERTIFICATE_PATH ) : try : certificate_file = open ( certificate_path ) crt = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , certificate_file . read ( ) ) return crt . get_subject ( ) . commonName except IOError : return None
Get CN from certificate
13,298
def normalize_cert_dir ( ) : current_cn = get_crt_common_name ( ) if not os . path . isdir ( COZY_CONFIG_PATH ) : print 'Need to create {}' . format ( COZY_CONFIG_PATH ) os . mkdir ( COZY_CONFIG_PATH , 0755 ) if not os . path . isdir ( CERTIFICATES_PATH ) : print 'Need to create {}' . format ( CERTIFICATES_PATH ) os . mkdir ( CERTIFICATES_PATH , 0755 ) if not os . path . isdir ( ACME_PRIVATE_PATH ) : print 'Need to create {}' . format ( ACME_PRIVATE_PATH ) os . mkdir ( ACME_PRIVATE_PATH , 0700 ) if os . path . isfile ( OLD_CERTIFICATE_PATH ) and not os . path . islink ( OLD_CERTIFICATE_PATH ) : target = '{}/{}.crt' . format ( CERTIFICATES_PATH , current_cn ) print 'Move {} to {}' . format ( CERTIFICATES_PATH , target ) os . rename ( OLD_CERTIFICATE_PATH , target ) else : print 'Nothing to do for {}' . format ( OLD_CERTIFICATE_PATH ) if os . path . isfile ( OLD_PRIVATE_KEY_PATH ) and not os . path . islink ( OLD_PRIVATE_KEY_PATH ) : target = '{}/{}.key' . format ( CERTIFICATES_PATH , current_cn ) print 'Move {} to {}' . format ( OLD_PRIVATE_KEY_PATH , target ) os . rename ( OLD_PRIVATE_KEY_PATH , target ) else : print 'Nothing to do for {}' . format ( OLD_PRIVATE_KEY_PATH ) if current_cn : make_links ( current_cn )
Put old cerfificate form to new one
13,299
def clean_links ( ) : if os . path . isfile ( CURRENT_CERTIFICATE_PATH ) : print 'Delete symlink {}' . format ( CURRENT_CERTIFICATE_PATH ) os . remove ( CURRENT_CERTIFICATE_PATH ) if os . path . isfile ( CURRENT_PRIVATE_KEY_PATH ) : print 'Delete symlink {}' . format ( CURRENT_PRIVATE_KEY_PATH ) os . remove ( CURRENT_PRIVATE_KEY_PATH )
Clean symlink for nginx