idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
12,300
def read_by ( cls , removed = False , ** kwargs ) : if not removed : kwargs [ 'time_removed' ] = 0 return cls . query . filter_by ( ** kwargs )
filter_by query helper that handles soft delete logic . If your query conditions require expressions use read .
12,301
def read ( cls , * criteria , ** kwargs ) : if not kwargs . get ( 'removed' , False ) : return cls . query . filter ( cls . time_removed == 0 , * criteria ) return cls . query . filter ( * criteria )
filter query helper that handles soft delete logic . If your query conditions do not require expressions consider using read_by .
12,302
def delete ( self , session , commit = True , soft = True ) : if soft : self . time_removed = sqlalchemy . func . unix_timestamp ( ) else : session . delete ( self ) if commit : session . commit ( )
Delete a row from the DB .
12,303
def walk_paths ( self , base : Optional [ pathlib . PurePath ] = pathlib . PurePath ( ) ) -> Iterator [ pathlib . PurePath ] : raise NotImplementedError ( )
Recursively traverse all paths inside this entity including the entity itself .
12,304
def _walk_paths ( self , base : pathlib . PurePath ) -> Iterator [ pathlib . PurePath ] : return self . walk_paths ( base )
Internal helper for walking paths . This is required to exclude the name of the root entity from the walk .
12,305
def from_path ( cls , path : pathlib . Path ) -> 'Entity' : if path . is_file ( ) : return File . from_path ( path ) return Directory . from_path ( path )
Create an entity from a local path .
12,306
def _md5 ( path : pathlib . PurePath ) : hash_ = hashlib . md5 ( ) with open ( path , 'rb' ) as f : for chunk in iter ( lambda : f . read ( 4096 ) , b'' ) : hash_ . update ( chunk ) return hash_ . hexdigest ( )
Calculate the MD5 checksum of a file .
12,307
def from_path ( cls , path : pathlib . Path ) -> 'File' : if not path . is_file ( ) : raise ValueError ( 'Path does not point to a file' ) return File ( path . name , path . stat ( ) . st_size , cls . _md5 ( path ) )
Create a file entity from a file path .
12,308
def from_path ( cls , path : pathlib . Path ) -> 'Directory' : if not path . is_dir ( ) : raise ValueError ( 'Path does not point to a directory' ) return Directory ( path . name , { entity . name : Entity . from_path ( entity ) for entity in path . iterdir ( ) } )
Create a directory entity from a directory path .
12,309
def best_result ( self ) : best_result = None for result in self . results : if best_result is None or result . figure_of_merit > best_result . figure_of_merit : best_result = result return best_result
The best result of the grid search . That is the result output by the non linear search that had the highest maximum figure of merit .
12,310
def make_lists ( self , grid_priors ) : return optimizer . make_lists ( len ( grid_priors ) , step_size = self . hyper_step_size , centre_steps = False )
Produces a list of lists of floats where each list of floats represents the values in each dimension for one step of the grid search .
12,311
def fit ( self , analysis , grid_priors ) : grid_priors = list ( set ( grid_priors ) ) results = [ ] lists = self . make_lists ( grid_priors ) results_list = [ list ( map ( self . variable . name_for_prior , grid_priors ) ) + [ "figure_of_merit" ] ] def write_results ( ) : with open ( "{}/results" . format ( self . phase_output_path ) , "w+" ) as f : f . write ( "\n" . join ( map ( lambda ls : ", " . join ( map ( lambda value : "{:.2f}" . format ( value ) if isinstance ( value , float ) else str ( value ) , ls ) ) , results_list ) ) ) for values in lists : arguments = self . make_arguments ( values , grid_priors ) model_mapper = self . variable . mapper_from_partial_prior_arguments ( arguments ) labels = [ ] for prior in arguments . values ( ) : labels . append ( "{}_{:.2f}_{:.2f}" . format ( model_mapper . name_for_prior ( prior ) , prior . lower_limit , prior . upper_limit ) ) name_path = "{}{}/{}" . format ( self . phase_name , self . phase_tag , "_" . join ( labels ) ) optimizer_instance = self . optimizer_instance ( model_mapper , name_path ) optimizer_instance . constant = self . constant result = optimizer_instance . fit ( analysis ) results . append ( result ) results_list . append ( [ * [ prior . lower_limit for prior in arguments . values ( ) ] , result . figure_of_merit ] ) write_results ( ) return GridSearchResult ( results , lists )
Fit an analysis with a set of grid priors . The grid priors are priors associated with the model mapper of this instance that are replaced by uniform priors for each step of the grid search .
12,312
def portTryReduce ( root : LNode , port : LPort ) : if not port . children : return for p in port . children : portTryReduce ( root , p ) target_nodes = { } ch_cnt = countDirectlyConnected ( port , target_nodes ) if not target_nodes : return new_target , children_edge_to_destroy = max ( target_nodes . items ( ) , key = lambda x : len ( x [ 1 ] ) ) cnt = len ( children_edge_to_destroy ) if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2 : return children_to_destroy = set ( ) on_target_children_to_destroy = set ( ) for child , edge in children_edge_to_destroy : if child . direction == PortType . OUTPUT : target_ch = edge . dsts elif child . direction == PortType . INPUT : target_ch = edge . srcs else : raise ValueError ( child . direction ) if len ( target_ch ) != 1 : raise NotImplementedError ( "multiple connected nodes" , target_ch ) target_ch = target_ch [ 0 ] try : assert target_ch . parent is new_target , ( target_ch , target_ch . parent , new_target ) except AssertionError : print ( 'Wrong target:\n' , edge . src , "\n" , edge . dst , "\n" , target_ch . parent , "\n" , new_target ) raise if child . direction == PortType . OUTPUT : edge . removeTarget ( target_ch ) elif child . direction == PortType . INPUT : edge . removeTarget ( child ) if not edge . srcs or not edge . dsts : edge . remove ( ) if not target_ch . incomingEdges and not target_ch . outgoingEdges : on_target_children_to_destroy . add ( target_ch ) if not child . incomingEdges and not child . outgoingEdges : children_to_destroy . add ( child ) port . children = [ ch for ch in port . children if ch not in children_to_destroy ] new_target . children = [ ch for ch in new_target . children if ch not in on_target_children_to_destroy ] if port . direction == PortType . OUTPUT : root . addEdge ( port , new_target ) elif port . direction == PortType . INPUT : root . addEdge ( new_target , port ) else : raise NotImplementedError ( port . direction )
Check if majority of children is connected to same port if it is the case reduce children and connect this port instead children
12,313
def resolveSharedConnections ( root : LNode ) : for ch in root . children : resolveSharedConnections ( ch ) for ch in root . children : for p in ch . iterPorts ( ) : portTryReduce ( root , p )
Walk all ports on all nodes and group subinterface connections to only parent interface connection if it is possible
12,314
def countDirectlyConnected ( port : LPort , result : dict ) -> int : inEdges = port . incomingEdges outEdges = port . outgoingEdges if port . children : ch_cnt = 0 for ch in port . children : ch_cnt += countDirectlyConnected ( ch , result ) return ch_cnt elif not inEdges and not outEdges : if port . direction == PortType . INPUT : if port . originObj is not None : assert not port . originObj . src . drivers , port . originObj else : print ( "Warning" , port , "not connected" ) return 0 else : connectedElemCnt = 0 for e in inEdges : connectedElemCnt += len ( e . srcs ) if connectedElemCnt > 1 : return 0 for e in outEdges : connectedElemCnt += len ( e . dsts ) if connectedElemCnt > 1 : return 0 if connectedElemCnt != 1 : return 0 if inEdges : e = inEdges [ 0 ] else : e = outEdges [ 0 ] if e . srcs [ 0 ] . name != e . dsts [ 0 ] . name : return 0 if e . srcs [ 0 ] is port : p = e . dsts [ 0 ] . parent else : p = e . srcs [ 0 ] . parent if not isinstance ( p , LNode ) : connections = result . get ( p , [ ] ) connections . append ( ( port , e ) ) result [ p ] = connections return 1
Count how many ports are directly connected to other nodes
12,315
def deploy ( self , image_name , ip , flavor = 'm1.small' ) : body_value = { "port" : { "admin_state_up" : True , "name" : self . name + '_provision' , "network_id" : os_utils . get_network_id ( self . nova_api , 'provision_bob' ) , 'fixed_ips' : [ { 'ip_address' : ip } ] } } response = self . neutron . create_port ( body = body_value ) self . _provision_port_id = response [ 'port' ] [ 'id' ] self . mac = response [ 'port' ] [ 'mac_address' ] image_id_to_boot_from = os_utils . get_image_id ( self . nova_api , image_name ) flavor_id = os_utils . get_flavor_id ( self . nova_api , flavor ) keypair_id = os_utils . get_keypair_id ( self . nova_api , self . _keypair ) nics = [ { 'port-id' : self . _provision_port_id } ] self . _os_instance = os_provisioner . build_openstack_instance ( self . nova_api , self . name , image_id_to_boot_from , flavor_id , keypair_id , nics ) if not self . _os_instance : LOG . error ( "deployment has failed" ) raise Exception ( ) os_provisioner . add_provision_security_group ( self . nova_api ) os_utils . add_security_groups ( self . _os_instance , [ 'provision' ] ) os_utils . add_security_groups ( self . _os_instance , self . _security_groups ) LOG . info ( "add security groups '%s'" % self . _security_groups ) LOG . info ( "instance '%s' ready to use" % self . name ) self . _os_instance . stop ( )
Create the node .
12,316
def pxe_netboot ( self , filename ) : new_port = { 'extra_dhcp_opts' : [ { 'opt_name' : 'bootfile-name' , 'opt_value' : 'http://192.0.2.240:8088/' + filename , 'ip_version' : 4 , } , { 'opt_name' : 'tftp-server' , 'opt_value' : '192.0.2.240' , 'ip_version' : '4' } , { 'opt_name' : 'server-ip-address' , 'opt_value' : '192.0.2.240' , 'ip_version' : '4' } ] } self . neutron . update_port ( self . _provision_port_id , { 'port' : new_port } )
Specify which file ipxe should load during the netboot .
12,317
def initialize ( self , size = 2 ) : for i in range ( 0 , size ) : self . nodes . append ( Baremetal ( self . nova_api , self . neutron , self . _keypair , self . _key_filename , self . _security_groups , name = 'baremetal_%d' % i ) ) with concurrent . futures . ThreadPoolExecutor ( max_workers = 5 ) as executor : for bm_node in self . nodes : future = executor . submit ( bm_node . deploy , 'ipxe.usb' , '192.0.2.%d' % self . _idx , flavor = 'm1.large' ) self . _idx += 1 bm_node . _future = future for bm_node in self . nodes : bm_node . _future . result ( ) pm_addr = self . bmc . register_host ( bm_node . name ) self . instackenv . append ( { "pm_type" : "pxe_ipmitool" , "mac" : [ bm_node . mac ] , "cpu" : "4" , "memory" : "8196" , "disk" : "80" , "arch" : "x86_64" , "pm_user" : "admin" , "pm_password" : "password" , "pm_addr" : pm_addr } ) self . bmc . ssh_pool . stop_all ( )
Populate the node poll .
12,318
def create_bmc ( self , os_username , os_password , os_project_id , os_auth_url ) : bmc = ovb_bmc . OvbBmc ( nova_api = self . nova_api , neutron = self . neutron , keypair = self . _keypair , key_filename = self . _key_filename , security_groups = self . _security_groups , image_name = 'Fedora 23 x86_64' , ip = '192.0.2.254' , os_username = os_username , os_password = os_password , os_project_id = os_project_id , os_auth_url = os_auth_url ) return bmc
Deploy the BMC machine .
12,319
def untlxml2py ( untl_filename ) : parent_stack = [ ] for event , element in iterparse ( untl_filename , events = ( 'start' , 'end' ) ) : if NAMESPACE_REGEX . search ( element . tag , 0 ) : element_tag = NAMESPACE_REGEX . search ( element . tag , 0 ) . group ( 1 ) else : element_tag = element . tag if element_tag in PYUNTL_DISPATCH : if event == 'start' : parent_stack . append ( PYUNTL_DISPATCH [ element_tag ] ( ) ) elif event == 'end' : child = parent_stack . pop ( ) if element . text is not None : content = element . text . strip ( ) if content != '' : child . set_content ( element . text ) if element . get ( 'qualifier' , False ) : child . set_qualifier ( element . get ( 'qualifier' ) ) if len ( parent_stack ) > 0 : parent_stack [ - 1 ] . add_child ( child ) else : return child else : raise PyuntlException ( 'Element "%s" not in UNTL dispatch.' % ( element_tag ) )
Parse a UNTL XML file object into a pyuntl element tree .
12,320
def untldict2py ( untl_dict ) : untl_root = PYUNTL_DISPATCH [ 'metadata' ] ( ) untl_py_list = [ ] for element_name , element_list in untl_dict . items ( ) : for element_dict in element_list : qualifier = element_dict . get ( 'qualifier' , None ) content = element_dict . get ( 'content' , None ) child_list = [ ] if isinstance ( content , dict ) : for key , value in content . items ( ) : child_list . append ( PYUNTL_DISPATCH [ key ] ( content = value ) , ) if qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier ) else : untl_element = PYUNTL_DISPATCH [ element_name ] ( ) for child in child_list : untl_element . add_child ( child ) elif content is not None and qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier , content = content , ) elif qualifier is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( qualifier = qualifier , ) elif content is not None : untl_element = PYUNTL_DISPATCH [ element_name ] ( content = content , ) elif len ( child_list ) > 0 : untl_element = PYUNTL_DISPATCH [ element_name ] ( ) untl_py_list . append ( untl_element ) for untl_element in untl_py_list : untl_root . add_child ( untl_element ) return untl_root
Convert a UNTL dictionary into a Python object .
12,321
def post2pydict ( post , ignore_list ) : root_element = PYUNTL_DISPATCH [ 'metadata' ] ( ) untl_form_dict = { } form_post = dict ( post . copy ( ) ) for key , value_list in form_post . items ( ) : if key not in ignore_list : ( element_tag , element_attribute ) = key . split ( '-' , 1 ) if element_tag not in untl_form_dict : untl_form_dict [ element_tag ] = ( ) untl_form_dict [ element_tag ] += ( element_attribute , value_list ) , for element_tag , attribute_tuple in untl_form_dict . items ( ) : attribute_count = len ( attribute_tuple ) value_count = len ( attribute_tuple [ 0 ] [ 1 ] ) for i in range ( 0 , attribute_count ) : if not len ( attribute_tuple [ i ] [ 1 ] ) == value_count : raise PyuntlException ( 'Field values did not match up ' 'numerically for %s' % ( element_tag ) ) for i in range ( 0 , value_count ) : untl_element = None content = '' qualifier = '' child_list = [ ] for j in range ( 0 , attribute_count ) : if attribute_tuple [ j ] [ 0 ] == 'content' : content = unicode ( attribute_tuple [ j ] [ 1 ] [ i ] ) elif attribute_tuple [ j ] [ 0 ] == 'qualifier' : qualifier = attribute_tuple [ j ] [ 1 ] [ i ] else : if attribute_tuple [ j ] [ 1 ] [ i ] != '' : child_tag = attribute_tuple [ j ] [ 0 ] if child_tag in PARENT_FORM : qualifier = attribute_tuple [ j ] [ 1 ] [ i ] else : child_list . append ( PYUNTL_DISPATCH [ attribute_tuple [ j ] [ 0 ] ] ( content = attribute_tuple [ j ] [ 1 ] [ i ] ) ) if content != '' and qualifier != '' : untl_element = PYUNTL_DISPATCH [ element_tag ] ( content = content , qualifier = qualifier ) elif content != '' : untl_element = PYUNTL_DISPATCH [ element_tag ] ( content = content ) elif qualifier != '' : untl_element = PYUNTL_DISPATCH [ element_tag ] ( qualifier = qualifier ) elif len ( child_list ) > 0 : untl_element = PYUNTL_DISPATCH [ element_tag ] ( ) if len ( child_list ) > 0 and untl_element is not None : for child in child_list : untl_element . add_child ( child ) if untl_element is not None : root_element . add_child ( untl_element ) return root_element . create_element_dict ( )
Convert the UNTL posted data to a Python dictionary .
12,322
def untlpy2dcpy ( untl_elements , ** kwargs ) : sDate = None eDate = None ark = kwargs . get ( 'ark' , None ) domain_name = kwargs . get ( 'domain_name' , None ) scheme = kwargs . get ( 'scheme' , 'http' ) resolve_values = kwargs . get ( 'resolve_values' , None ) resolve_urls = kwargs . get ( 'resolve_urls' , None ) verbose_vocabularies = kwargs . get ( 'verbose_vocabularies' , None ) if resolve_values or resolve_urls : if verbose_vocabularies : vocab_data = verbose_vocabularies else : vocab_data = retrieve_vocab ( ) else : vocab_data = None dc_root = DC_CONVERSION_DISPATCH [ 'dc' ] ( ) for element in untl_elements . children : if element . tag in DC_CONVERSION_DISPATCH : if element . children : dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , children = element . children , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , ) else : dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , ) if element . tag == 'coverage' : if element . qualifier == 'sDate' : sDate = dc_element elif element . qualifier == 'eDate' : eDate = dc_element else : dc_root . add_child ( dc_element ) elif dc_element : dc_root . add_child ( dc_element ) if ark and domain_name : permalink_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'permalink' , domain_name = domain_name , ark = ark , scheme = scheme ) dc_root . add_child ( permalink_identifier ) ark_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'ark' , content = ark , ) dc_root . add_child ( ark_identifier ) if sDate and eDate : dc_element = DC_CONVERSION_DISPATCH [ 'coverage' ] ( content = '%s-%s' % ( sDate . content , eDate . content ) , ) dc_root . add_child ( dc_element ) elif sDate : dc_root . add_child ( sDate ) elif eDate : dc_root . add_child ( eDate ) return dc_root
Convert the UNTL elements structure into a DC structure .
12,323
def untlpy2highwirepy ( untl_elements , ** kwargs ) : highwire_list = [ ] title = None publisher = None creation = None escape = kwargs . get ( 'escape' , False ) for element in untl_elements . children : if element . tag in HIGHWIRE_CONVERSION_DISPATCH : highwire_element = HIGHWIRE_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , children = element . children , escape = escape , ) if highwire_element : if element . tag == 'title' : if element . qualifier != 'officialtitle' and not title : title = highwire_element elif element . qualifier == 'officialtitle' : title = highwire_element elif element . tag == 'publisher' : if not publisher : publisher = highwire_element highwire_list . append ( publisher ) elif element . tag == 'date' : if not creation and element . qualifier == 'creation' : if highwire_element . content : creation = highwire_element if creation : highwire_list . append ( creation ) elif highwire_element . content : highwire_list . append ( highwire_element ) if title : highwire_list . append ( title ) return highwire_list
Convert a UNTL Python object to a highwire Python object .
12,324
def untlpydict2dcformatteddict ( untl_dict , ** kwargs ) : ark = kwargs . get ( 'ark' , None ) domain_name = kwargs . get ( 'domain_name' , None ) scheme = kwargs . get ( 'scheme' , 'http' ) resolve_values = kwargs . get ( 'resolve_values' , None ) resolve_urls = kwargs . get ( 'resolve_urls' , None ) verbose_vocabularies = kwargs . get ( 'verbose_vocabularies' , None ) untl_py = untldict2py ( untl_dict ) dc_py = untlpy2dcpy ( untl_py , ark = ark , domain_name = domain_name , resolve_values = resolve_values , resolve_urls = resolve_urls , verbose_vocabularies = verbose_vocabularies , scheme = scheme ) return dcpy2formatteddcdict ( dc_py )
Convert a UNTL data dictionary to a formatted DC data dictionary .
12,325
def formatted_dc_dict ( dc_dict ) : for key , element_list in dc_dict . items ( ) : new_element_list = [ ] for element in element_list : new_element_list . append ( element [ 'content' ] ) dc_dict [ key ] = new_element_list return dc_dict
Change the formatting of the DC data dictionary .
12,326
def generate_dc_xml ( dc_dict ) : root_namespace = '{%s}' % DC_NAMESPACES [ 'oai_dc' ] elements_namespace = '{%s}' % DC_NAMESPACES [ 'dc' ] schema_location = ( 'http://www.openarchives.org/OAI/2.0/oai_dc/ ' 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd' ) root_attributes = { '{%s}schemaLocation' % XSI : schema_location , } return pydict2xmlstring ( dc_dict , ordering = DC_ORDER , root_label = 'dc' , root_namespace = root_namespace , elements_namespace = elements_namespace , namespace_map = DC_NAMESPACES , root_attributes = root_attributes , )
Generate a DC XML string .
12,327
def generate_dc_json ( dc_dict ) : formatted_dict = formatted_dc_dict ( dc_dict ) return json . dumps ( formatted_dict , sort_keys = True , indent = 4 )
Generate DC JSON data .
12,328
def highwirepy2dict ( highwire_elements ) : highwire_dict = { } for element in highwire_elements : if element . name not in highwire_dict : highwire_dict [ element . name ] = [ ] highwire_dict [ element . name ] . append ( { 'content' : element . content } ) return highwire_dict
Convert a list of highwire elements into a dictionary .
12,329
def generate_highwire_json ( highwire_elements ) : highwire_dict = highwirepy2dict ( highwire_elements ) return json . dumps ( highwire_dict , sort_keys = True , indent = 4 )
Convert highwire elements into a JSON structure .
12,330
def dcdict2rdfpy ( dc_dict ) : ark_prefix = 'ark: ark:' uri = URIRef ( '' ) rdf_py = ConjunctiveGraph ( ) DC = Namespace ( 'http://purl.org/dc/elements/1.1/' ) for element_value in dc_dict [ 'identifier' ] : if element_value [ 'content' ] . startswith ( ark_prefix ) : uri = URIRef ( element_value [ 'content' ] . replace ( ark_prefix , 'info:ark' ) ) rdf_py . bind ( 'dc' , DC ) for element_name in DC_ORDER : element_value_list = dc_dict . get ( element_name , [ ] ) for element_value in element_value_list : if ( 'http' in element_value [ 'content' ] and ' ' not in element_value [ 'content' ] ) : rdf_py . add ( ( uri , DC [ element_name ] , URIRef ( element_value [ 'content' ] ) ) ) else : rdf_py . add ( ( uri , DC [ element_name ] , Literal ( element_value [ 'content' ] ) ) ) return rdf_py
Convert a DC dictionary into an RDF Python object .
12,331
def add_empty_fields ( untl_dict ) : for element in UNTL_XML_ORDER : if element not in untl_dict : try : py_object = PYUNTL_DISPATCH [ element ] ( content = '' , qualifier = '' , ) except : try : py_object = PYUNTL_DISPATCH [ element ] ( content = '' ) except : try : py_object = PYUNTL_DISPATCH [ element ] ( ) except : raise PyuntlException ( 'Could not add empty element field.' ) else : untl_dict [ element ] = [ { 'content' : { } } ] else : if not py_object . contained_children : untl_dict [ element ] = [ { 'content' : '' } ] else : untl_dict [ element ] = [ { 'content' : { } } ] else : if not py_object . contained_children : untl_dict [ element ] = [ { 'content' : '' , 'qualifier' : '' } ] else : untl_dict [ element ] = [ { 'content' : { } , 'qualifier' : '' } ] for child in py_object . contained_children : untl_dict [ element ] [ 0 ] . setdefault ( 'content' , { } ) untl_dict [ element ] [ 0 ] [ 'content' ] [ child ] = '' return untl_dict
Add empty values if UNTL fields don t have values .
12,332
def add_empty_etd_ms_fields ( etd_ms_dict ) : for element in ETD_MS_ORDER : if element not in etd_ms_dict : try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( content = '' , qualifier = '' , ) except : try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( content = '' ) except : try : py_object = ETD_MS_CONVERSION_DISPATCH [ element ] ( ) except : raise PyuntlException ( 'Could not add empty element field.' ) else : etd_ms_dict [ element ] = [ { 'content' : { } } ] else : if not py_object . contained_children : etd_ms_dict [ element ] = [ { 'content' : '' } ] else : etd_ms_dict [ element ] = [ { 'content' : { } } ] else : if py_object : if not py_object . contained_children : etd_ms_dict [ element ] = [ { 'content' : '' , 'qualifier' : '' } ] else : etd_ms_dict [ element ] = [ { 'content' : { } , 'qualifier' : '' } ] if py_object : for child in py_object . contained_children : etd_ms_dict [ element ] [ 0 ] . setdefault ( 'content' , { } ) etd_ms_dict [ element ] [ 0 ] [ 'content' ] [ child ] = '' return etd_ms_dict
Add empty values for ETD_MS fields that don t have values .
12,333
def find_untl_errors ( untl_dict , ** kwargs ) : fix_errors = kwargs . get ( 'fix_errors' , False ) error_dict = { } for element_name in REQUIRES_QUALIFIER : for element in untl_dict . get ( element_name , [ ] ) : error_dict [ element_name ] = 'no_qualifier' if fix_errors : element . setdefault ( 'qualifier' , '' ) found_data = { 'untl_dict' : untl_dict , 'error_dict' : error_dict , } return found_data
Add empty required qualifiers to create valid UNTL .
12,334
def untlpy2etd_ms ( untl_elements , ** kwargs ) : degree_children = { } date_exists = False seen_creation = False etd_ms_root = ETD_MS_CONVERSION_DISPATCH [ 'thesis' ] ( ) for element in untl_elements . children : etd_ms_element = None if element . tag in ETD_MS_CONVERSION_DISPATCH : if element . children : etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , children = element . children , ) elif element . tag == 'degree' : if element . qualifier in [ 'name' , 'level' , 'discipline' , 'grantor' ] : degree_children [ element . qualifier ] = element . content elif element . tag == 'date' : if element . qualifier == 'creation' : for child in etd_ms_root . children : if child . tag == 'date' : del child if not seen_creation : date_exists = False seen_creation = True if not date_exists : etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , ) date_exists = True elif element . tag not in [ 'date' , 'degree' ] : etd_ms_element = ETD_MS_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , ) if etd_ms_element : etd_ms_root . add_child ( etd_ms_element ) if element . tag == 'meta' : ark = False for i in etd_ms_root . children : if i . tag == 'identifier' and i . content . startswith ( 'http://digital.library.unt.edu/' ) : ark = True if not ark : ark = False if element . qualifier == 'ark' : ark = element . content if ark is not None : ark_identifier = ETD_MS_CONVERSION_DISPATCH [ 'identifier' ] ( ark = ark , ) etd_ms_root . add_child ( ark_identifier ) if degree_children : degree_element = ETD_MS_CONVERSION_DISPATCH [ 'degree' ] ( ) degree_child_element = None for k , v in degree_children . iteritems ( ) : degree_child_element = ETD_MS_DEGREE_DISPATCH [ k ] ( content = v , ) if degree_child_element : degree_element . add_child ( degree_child_element ) etd_ms_root . add_child ( degree_element ) return etd_ms_root
Convert the UNTL elements structure into an ETD_MS structure .
12,335
def etd_ms_dict2xmlfile ( filename , metadata_dict ) : try : f = open ( filename , 'w' ) f . write ( generate_etd_ms_xml ( metadata_dict ) . encode ( "utf-8" ) ) f . close ( ) except : raise MetadataGeneratorException ( 'Failed to create an XML file. Filename: %s' % ( filename ) )
Create an ETD MS XML file .
12,336
def signal_to_noise_map ( self ) : signal_to_noise_map = np . divide ( self . data , self . noise_map ) signal_to_noise_map [ signal_to_noise_map < 0 ] = 0 return signal_to_noise_map
The signal - to - noise_map of the data and noise - map which are fitted .
12,337
def structure ( cls ) : if cls . signature is NotImplemented : raise NotImplementedError ( "no signature defined" ) up = cls . cutter . elucidate ( ) down = str ( Seq ( up ) . reverse_complement ( ) ) ovhg = cls . cutter . ovhgseq upsig , downsig = cls . signature if cls . cutter . is_5overhang ( ) : upsite = "^{}_" . format ( ovhg ) downsite = "_{}^" . format ( Seq ( ovhg ) . reverse_complement ( ) ) else : upsite = "_{}^" . format ( ovhg ) downsite = "^{}_" . format ( Seq ( ovhg ) . reverse_complement ( ) ) if issubclass ( cls , AbstractModule ) : return "" . join ( [ up . replace ( upsite , "({})(" . format ( upsig ) ) , "N*" , down . replace ( downsite , ")({})" . format ( downsig ) ) , ] ) elif issubclass ( cls , AbstractVector ) : return "" . join ( [ down . replace ( downsite , "({})(" . format ( downsig ) ) , "N*" , up . replace ( upsite , ")({})" . format ( upsig ) ) , ] ) else : raise RuntimeError ( "Part must be either a module or a vector!" )
Get the part structure as a DNA regex pattern .
12,338
def characterize ( cls , record ) : classes = list ( cls . __subclasses__ ( ) ) if not isabstract ( cls ) : classes . append ( cls ) for subclass in classes : entity = subclass ( record ) if entity . is_valid ( ) : return entity raise RuntimeError ( "could not find the type for '{}'" . format ( record . id ) )
Load the record in a concrete subclass of this type .
12,339
def global_request ( self , kind , data = None , wait = True ) : if wait : self . completion_event = threading . Event ( ) m = Message ( ) m . add_byte ( cMSG_GLOBAL_REQUEST ) m . add_string ( kind ) m . add_boolean ( wait ) if data is not None : m . add ( * data ) self . _log ( DEBUG , 'Sending global request "%s"' % kind ) self . _send_user_message ( m ) if not wait : return None while True : self . completion_event . wait ( 0.1 ) if not self . active : return None if self . completion_event . isSet ( ) : break return self . global_response
Make a global request to the remote host . These are normally extensions to the SSH2 protocol .
12,340
def _activate_inbound ( self ) : block_size = self . _cipher_info [ self . remote_cipher ] [ 'block-size' ] if self . server_mode : IV_in = self . _compute_key ( 'A' , block_size ) key_in = self . _compute_key ( 'C' , self . _cipher_info [ self . remote_cipher ] [ 'key-size' ] ) else : IV_in = self . _compute_key ( 'B' , block_size ) key_in = self . _compute_key ( 'D' , self . _cipher_info [ self . remote_cipher ] [ 'key-size' ] ) engine = self . _get_cipher ( self . remote_cipher , key_in , IV_in ) mac_size = self . _mac_info [ self . remote_mac ] [ 'size' ] mac_engine = self . _mac_info [ self . remote_mac ] [ 'class' ] if self . server_mode : mac_key = self . _compute_key ( 'E' , mac_engine ( ) . digest_size ) else : mac_key = self . _compute_key ( 'F' , mac_engine ( ) . digest_size ) self . packetizer . set_inbound_cipher ( engine , block_size , mac_engine , mac_size , mac_key ) compress_in = self . _compression_info [ self . remote_compression ] [ 1 ] if ( compress_in is not None ) and ( ( self . remote_compression != 'zlib@openssh.com' ) or self . authenticated ) : self . _log ( DEBUG , 'Switching on inbound compression ...' ) self . packetizer . set_inbound_compressor ( compress_in ( ) )
switch on newly negotiated encryption parameters for inbound traffic
12,341
def enable_user ( self , user ) : if user in self . ssh_pool . _ssh_clients : return if user == 'root' : _root_ssh_client = ssh . SshClient ( hostname = self . hostname , user = 'root' , key_filename = self . _key_filename , via_ip = self . via_ip ) _root_ssh_client . start ( ) result , _ = _root_ssh_client . run ( 'uname -a' ) image_user = None if 'Please login as the user "cloud-user"' in result : image_user = 'cloud-user' _root_ssh_client . stop ( ) elif 'Please login as the user "fedora" rather than the user "root"' in result : image_user = 'fedora' _root_ssh_client . stop ( ) elif 'Please login as the user "centos" rather than the user "root"' in result : image_user = 'centos' _root_ssh_client . stop ( ) if image_user : self . enable_user ( image_user ) LOG . info ( 'enabling the root user' ) _cmd = "sudo sed -i 's,.*ssh-rsa,ssh-rsa,' /root/.ssh/authorized_keys" self . ssh_pool . run ( image_user , _cmd ) _root_ssh_client . start ( ) self . ssh_pool . add_ssh_client ( 'root' , _root_ssh_client ) return self . ssh_pool . build_ssh_client ( hostname = self . hostname , user = user , key_filename = self . _key_filename , via_ip = self . via_ip )
Enable the root account on the remote host .
12,342
def send_file ( self , local_path , remote_path , user = 'root' , unix_mode = None ) : self . enable_user ( user ) return self . ssh_pool . send_file ( user , local_path , remote_path , unix_mode = unix_mode )
Upload a local file on the remote host .
12,343
def send_dir ( self , local_path , remote_path , user = 'root' ) : self . enable_user ( user ) return self . ssh_pool . send_dir ( user , local_path , remote_path )
Upload a directory on the remote host .
12,344
def create_file ( self , path , content , mode = 'w' , user = 'root' ) : self . enable_user ( user ) return self . ssh_pool . create_file ( user , path , content , mode )
Create a file on the remote host .
12,345
def yum_install ( self , packages , ignore_error = False ) : return self . run ( 'yum install -y --quiet ' + ' ' . join ( packages ) , ignore_error = ignore_error , retry = 5 )
Install some packages on the remote host .
12,346
def rhsm_register ( self , rhsm ) : login = rhsm . get ( 'login' ) password = rhsm . get ( 'password' , os . environ . get ( 'RHN_PW' ) ) pool_id = rhsm . get ( 'pool_id' ) self . run ( 'rm /etc/pki/product/69.pem' , ignore_error = True ) custom_log = 'subscription-manager register --username %s --password *******' % login self . run ( 'subscription-manager register --username %s --password "%s"' % ( login , password ) , success_status = ( 0 , 64 ) , custom_log = custom_log , retry = 3 ) if pool_id : self . run ( 'subscription-manager attach --pool %s' % pool_id ) else : self . run ( 'subscription-manager attach --auto' ) self . rhsm_active = True
Register the host on the RHSM .
12,347
def enable_repositories ( self , repositories ) : for r in repositories : if r [ 'type' ] != 'rhsm_channel' : continue if r [ 'name' ] not in self . rhsm_channels : self . rhsm_channels . append ( r [ 'name' ] ) if self . rhsm_active : subscription_cmd = "subscription-manager repos '--disable=*' --enable=" + ' --enable=' . join ( self . rhsm_channels ) self . run ( subscription_cmd ) repo_files = [ r for r in repositories if r [ 'type' ] == 'yum_repo' ] for repo_file in repo_files : self . create_file ( repo_file [ 'dest' ] , repo_file [ 'content' ] ) packages = [ r [ 'name' ] for r in repositories if r [ 'type' ] == 'package' ] if packages : self . yum_install ( packages )
Enable a list of RHSM repositories .
12,348
def create_stack_user ( self ) : self . run ( 'adduser -m stack' , success_status = ( 0 , 9 ) ) self . create_file ( '/etc/sudoers.d/stack' , 'stack ALL=(root) NOPASSWD:ALL\n' ) self . run ( 'mkdir -p /home/stack/.ssh' ) self . run ( 'cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys' ) self . run ( 'chown -R stack:stack /home/stack/.ssh' ) self . run ( 'chmod 700 /home/stack/.ssh' ) self . run ( 'chmod 600 /home/stack/.ssh/authorized_keys' ) self . ssh_pool . build_ssh_client ( self . hostname , 'stack' , self . _key_filename , self . via_ip )
Create the stack user on the machine .
12,349
def fetch_image ( self , path , dest , user = 'root' ) : self . run ( 'test -f %s || curl -L -s -o %s %s' % ( dest , dest , path ) , user = user , ignore_error = True )
Store in the user home directory an image from a remote location .
12,350
def clean_system ( self ) : self . run ( 'systemctl disable NetworkManager' , success_status = ( 0 , 1 ) ) self . run ( 'systemctl stop NetworkManager' , success_status = ( 0 , 5 ) ) self . run ( 'pkill -9 dhclient' , success_status = ( 0 , 1 ) ) self . yum_remove ( [ 'cloud-init' , 'NetworkManager' ] ) self . run ( 'systemctl enable network' ) self . run ( 'systemctl restart network' )
Clean up unnecessary packages from the system .
12,351
def yum_update ( self , allow_reboot = False ) : self . run ( 'yum clean all' ) self . run ( 'test -f /usr/bin/subscription-manager && subscription-manager repos --list-enabled' , ignore_error = True ) self . run ( 'yum repolist' ) self . run ( 'yum update -y --quiet' , retry = 3 ) if allow_reboot : self . run ( 'grubby --set-default $(ls /boot/vmlinuz-*.x86_64|tail -1)' ) default_kernel = self . run ( 'grubby --default-kernel' ) [ 0 ] . rstrip ( ) cur_kernel = self . run ( 'uname -r' ) [ 0 ] . rstrip ( ) if cur_kernel not in default_kernel : self . run ( 'reboot' , ignore_error = True ) self . ssh_pool . stop_all ( )
Do a yum update on the system .
12,352
def get_by_range ( model_cls , * args , ** kwargs ) : start_timestamp = kwargs . get ( 'start_timestamp' ) end_timestamp = kwargs . get ( 'end_timestamp' ) if ( start_timestamp is not None ) and ( end_timestamp is not None ) and ( start_timestamp > end_timestamp ) : raise InvalidTimestampRange models = model_cls . read_time_range ( * args , end_timestamp = end_timestamp ) . order_by ( model_cls . time_order ) if start_timestamp is not None : index = 0 for index , model in enumerate ( models , start = 1 ) : if model . timestamp <= start_timestamp : break models = models [ : index ] return models
Get ordered list of models for the specified time range . The timestamp on the earliest model will likely occur before start_timestamp . This is to ensure that we return the models for the entire range .
12,353
def read_time_range ( cls , * args , ** kwargs ) : criteria = list ( args ) start = kwargs . get ( 'start_timestamp' ) end = kwargs . get ( 'end_timestamp' ) if start is not None : criteria . append ( cls . time_order <= - start ) if end is not None : criteria . append ( cls . time_order >= - end ) return cls . read ( * criteria )
Get all timezones set within a given time . Uses time_dsc_index
12,354
def add_data ( self , data , metadata = None ) : subdata = np . atleast_2d ( data ) if subdata . shape [ 1 ] != self . grid . nr_of_elements : if subdata . shape [ 0 ] == self . grid . nr_of_elements : subdata = subdata . T else : raise Exception ( 'Number of values does not match the number of ' + 'elements in the grid' ) K = subdata . shape [ 0 ] if metadata is not None : if K > 1 : if ( not isinstance ( metadata , ( list , tuple ) ) or len ( metadata ) != K ) : raise Exception ( 'metadata does not fit the provided data' ) else : metadata = [ metadata , ] if metadata is None : metadata = [ None for i in range ( 0 , K ) ] return_ids = [ ] for dataset , meta in zip ( subdata , metadata ) : cid = self . _get_next_index ( ) self . parsets [ cid ] = dataset self . metadata [ cid ] = meta return_ids . append ( cid ) if len ( return_ids ) == 1 : return return_ids [ 0 ] else : return return_ids
Add data to the parameter set
12,355
def load_model_from_file ( self , filename ) : assert os . path . isfile ( filename ) data = np . loadtxt ( filename ) . squeeze ( ) assert len ( data . shape ) == 1 pid = self . add_data ( data ) return pid
Load one parameter set from a file which contains one value per line
12,356
def load_from_sens_file ( self , filename ) : sens_data = np . loadtxt ( filename , skiprows = 1 ) nid_re = self . add_data ( sens_data [ : , 2 ] ) nid_im = self . add_data ( sens_data [ : , 3 ] ) return nid_re , nid_im
Load real and imaginary parts from a sens . dat file generated by CRMod
12,357
def save_to_rho_file ( self , filename , cid_mag , cid_pha = None ) : mag_data = self . parsets [ cid_mag ] if cid_pha is None : pha_data = np . zeros ( mag_data . shape ) else : pha_data = self . parsets [ cid_pha ] with open ( filename , 'wb' ) as fid : fid . write ( bytes ( '{0}\n' . format ( self . grid . nr_of_elements ) , 'utf-8' , ) ) np . savetxt ( fid , np . vstack ( ( mag_data , pha_data , ) ) . T , fmt = '%f %f' )
Save one or two parameter sets in the rho . dat forward model format
12,358
def _clean_pid ( self , pid ) : if isinstance ( pid , ( list , tuple ) ) : if len ( pid ) == 1 : return pid [ 0 ] else : return pid return pid
if pid is a number don t do anything . If pid is a list with one entry strip the list and return the number . If pid contains more than one entries do nothing .
12,359
def modify_area ( self , pid , xmin , xmax , zmin , zmax , value ) : area_polygon = shapgeo . Polygon ( ( ( xmin , zmax ) , ( xmax , zmax ) , ( xmax , zmin ) , ( xmin , zmin ) ) ) self . modify_polygon ( pid , area_polygon , value )
Modify the given dataset in the rectangular area given by the parameters and assign all parameters inside this area the given value .
12,360
def extract_points ( self , pid , points ) : xy = self . grid . get_element_centroids ( ) data = self . parsets [ pid ] iobj = spi . NearestNDInterpolator ( xy , data ) values = iobj ( points ) return values
Extract values at certain points in the grid from a given parameter set . Cells are selected by interpolating the centroids of the cells towards the line using a nearest scheme .
12,361
def extract_along_line ( self , pid , xy0 , xy1 , N = 10 ) : assert N >= 2 xy0 = np . array ( xy0 ) . squeeze ( ) xy1 = np . array ( xy1 ) . squeeze ( ) assert xy0 . size == 2 assert xy1 . size == 2 points = [ ( x , y ) for x , y in zip ( np . linspace ( xy0 [ 0 ] , xy1 [ 0 ] , N ) , np . linspace ( xy0 [ 1 ] , xy1 [ 1 ] , N ) ) ] result = self . extract_points ( pid , points ) results_xyv = np . hstack ( ( points , result [ : , np . newaxis ] ) ) return results_xyv
Extract parameter values along a given line .
12,362
def extract_polygon_area ( self , pid , polygon_points ) : polygon = shapgeo . Polygon ( polygon_points ) xy = self . grid . get_element_centroids ( ) in_poly = [ ] for nr , point in enumerate ( xy ) : if shapgeo . Point ( point ) . within ( polygon ) : in_poly . append ( nr ) values = self . parsets [ pid ] [ in_poly ] return np . array ( in_poly ) , values
Extract all data points whose element centroid lies within the given polygon .
12,363
def rotate_point ( xorigin , yorigin , x , y , angle ) : rotx = ( x - xorigin ) * np . cos ( angle ) - ( y - yorigin ) * np . sin ( angle ) roty = ( x - yorigin ) * np . sin ( angle ) + ( y - yorigin ) * np . cos ( angle ) return rotx , roty
Rotate the given point by angle
12,364
def get_R_mod ( options , rho0 ) : tomodir = tdManager . tdMan ( elem_file = options . elem_file , elec_file = options . elec_file , config_file = options . config_file , ) tomodir . add_homogeneous_model ( magnitude = rho0 ) Z = tomodir . measurements ( ) [ : , 0 ] return Z
Compute synthetic measurements over a homogeneous half - space
12,365
def make_and_return_path_from_path_and_folder_names ( path , folder_names ) : for folder_name in folder_names : path += folder_name + '/' try : os . makedirs ( path ) except FileExistsError : pass return path
For a given path create a directory structure composed of a set of folders and return the path to the \ inner - most folder .
12,366
def register_host ( self , bm_instance ) : bmc_ip = '10.130.%d.100' % ( self . _bmc_range_start + self . _nic_cpt ) bmc_net = '10.130.%d.0' % ( self . _bmc_range_start + self . _nic_cpt ) bmc_gw = '10.130.%d.1' % ( self . _bmc_range_start + self . _nic_cpt ) device = 'eth%d' % ( 2 + self . _nic_cpt ) body_create_subnet = { 'subnets' : [ { 'name' : 'bmc_' + device , 'cidr' : bmc_net + '/24' , 'ip_version' : 4 , 'network_id' : self . _bmc_net [ 'id' ] } ] } subnet_id = self . neutron . create_subnet ( body = body_create_subnet ) [ 'subnets' ] [ 0 ] [ 'id' ] self . attach_subnet_to_router ( subnet_id ) self . os_instance . interface_attach ( None , self . _bmc_net [ 'id' ] , bmc_ip ) content = self . create_file ( '/etc/sysconfig/network-scripts/ifcfg-%s' % device , content = content . format ( device = device , bmc_ip = bmc_ip , bmc_gw = bmc_gw ) ) content = self . create_file ( '/etc/sysconfig/network-scripts/route-%s' % device , content = content . format ( bmc_gw = bmc_gw ) ) self . run ( 'ifup %s' % device ) self . run ( 'ip rule add from %s table %d' % ( bmc_ip , self . _nic_cpt + 2 ) ) self . run ( 'ip route add default via %s dev %s table %d' % ( bmc_gw , device , self . _nic_cpt + 2 ) ) content = unit = 'openstack-bmc-%d.service' % self . _nic_cpt self . create_file ( '/usr/lib/systemd/system/%s' % unit , content . format ( os_username = self . os_username , os_password = protect_password ( self . os_password ) , os_project_id = self . os_project_id , os_auth_url = self . os_auth_url , bm_instance = bm_instance , bmc_ip = bmc_ip ) ) self . run ( 'systemctl enable %s' % unit ) self . run ( 'systemctl start %s' % unit ) self . _nic_cpt += 1 return bmc_ip
Register an existing nova VM .
12,367
def Godeps ( self ) : dict = [ ] for package in sorted ( self . _packages . keys ( ) ) : dict . append ( { "ImportPath" : str ( package ) , "Rev" : str ( self . _packages [ package ] ) } ) return dict
Return the snapshot in Godeps . json form
12,368
def GLOGFILE ( self ) : lines = [ ] for package in sorted ( self . _packages . keys ( ) ) : lines . append ( "%s %s" % ( str ( package ) , str ( self . _packages [ package ] ) ) ) return "\n" . join ( lines )
Return the snapshot in GLOGFILE form
12,369
def Glide ( self ) : dict = { "hash" : "???" , "updated" : str ( datetime . datetime . now ( tz = pytz . utc ) . isoformat ( ) ) , "imports" : [ ] , } decomposer = ImportPathsDecomposerBuilder ( ) . buildLocalDecomposer ( ) decomposer . decompose ( self . _packages . keys ( ) ) classes = decomposer . classes ( ) for ipp in classes : dep = { "name" : ipp , "version" : str ( self . _packages [ classes [ ipp ] [ 0 ] ] ) } if len ( classes [ ipp ] ) > 1 or classes [ ipp ] [ 0 ] != ipp : dep [ "subpackages" ] = map ( lambda l : l [ len ( ipp ) + 1 : ] , classes [ ipp ] ) dict [ "imports" ] . append ( dep ) return yaml . dump ( dict , default_flow_style = False )
Return the snapshot in glide . lock form
12,370
def render ( self , trajectories : Tuple [ NonFluents , Fluents , Fluents , Fluents , np . array ] , batch : Optional [ int ] = None ) -> None : non_fluents , initial_state , states , actions , interms , rewards = trajectories non_fluents = dict ( non_fluents ) states = dict ( ( name , fluent [ 0 ] ) for name , fluent in states ) actions = dict ( ( name , fluent [ 0 ] ) for name , fluent in actions ) rewards = rewards [ 0 ] idx = self . _compiler . rddl . domain . state_fluent_ordering . index ( 'location/1' ) start = initial_state [ idx ] [ 0 ] g = non_fluents [ 'GOAL/1' ] path = states [ 'location/1' ] deltas = actions [ 'move/1' ] centers = non_fluents [ 'DECELERATION_ZONE_CENTER/2' ] decays = non_fluents [ 'DECELERATION_ZONE_DECAY/1' ] zones = [ ( x , y , d ) for ( x , y ) , d in zip ( centers , decays ) ] self . _ax1 = plt . gca ( ) self . _render_state_space ( ) self . _render_start_and_goal_positions ( start , g ) self . _render_deceleration_zones ( zones ) self . _render_state_action_trajectory ( start , path , deltas ) plt . title ( 'Navigation' , fontweight = 'bold' ) plt . legend ( loc = 'lower right' ) plt . show ( )
Render the simulated state - action trajectories for Navigation domain .
12,371
def persistent_timer ( func ) : @ functools . wraps ( func ) def timed_function ( optimizer_instance , * args , ** kwargs ) : start_time_path = "{}/.start_time" . format ( optimizer_instance . phase_output_path ) try : with open ( start_time_path ) as f : start = float ( f . read ( ) ) except FileNotFoundError : start = time . time ( ) with open ( start_time_path , "w+" ) as f : f . write ( str ( start ) ) result = func ( optimizer_instance , * args , ** kwargs ) execution_time = str ( dt . timedelta ( seconds = time . time ( ) - start ) ) logger . info ( "{} took {} to run" . format ( optimizer_instance . phase_name , execution_time ) ) with open ( "{}/execution_time" . format ( optimizer_instance . phase_output_path ) , "w+" ) as f : f . write ( execution_time ) return result return timed_function
Times the execution of a function . If the process is stopped and restarted then timing is continued using saved files .
12,372
def backup_path ( self ) -> str : return "{}/{}/{}{}/optimizer_backup" . format ( conf . instance . output_path , self . phase_path , self . phase_name , self . phase_tag )
The path to the backed up optimizer folder .
12,373
def backup ( self ) : try : shutil . rmtree ( self . backup_path ) except FileNotFoundError : pass try : shutil . copytree ( self . opt_path , self . backup_path ) except shutil . Error as e : logger . exception ( e )
Copy files from the sym - linked optimizer folder to the backup folder in the workspace .
12,374
def restore ( self ) : if os . path . exists ( self . backup_path ) : for file in glob . glob ( self . backup_path + "/*" ) : shutil . copy ( file , self . path )
Copy files from the backup folder to the sym - linked optimizer folder .
12,375
def config ( self , attribute_name , attribute_type = str ) : return self . named_config . get ( self . __class__ . __name__ , attribute_name , attribute_type )
Get a config field from this optimizer s section in non_linear . ini by a key and value type .
12,376
def weighted_sample_instance_from_weighted_samples ( self , index ) : model , weight , likelihood = self . weighted_sample_model_from_weighted_samples ( index ) self . _weighted_sample_model = model return self . variable . instance_from_physical_vector ( model ) , weight , likelihood
Setup a model instance of a weighted sample including its weight and likelihood .
12,377
def weighted_sample_model_from_weighted_samples ( self , index ) : return list ( self . pdf . samples [ index ] ) , self . pdf . weights [ index ] , - 0.5 * self . pdf . loglikes [ index ]
From a weighted sample return the model weight and likelihood hood .
12,378
def compare_digest ( a , b ) : py_version = sys . version_info [ 0 ] if py_version >= 3 : return _compare_digest_py3 ( a , b ) return _compare_digest_py2 ( a , b )
Compare 2 hash digest .
12,379
def _render_trajectories ( self , trajectories : Tuple [ NonFluents , Fluents , Fluents , Fluents , np . array ] ) -> None : if self . _verbose : non_fluents , initial_state , states , actions , interms , rewards = trajectories shape = states [ 0 ] [ 1 ] . shape batch_size , horizon , = shape [ 0 ] , shape [ 1 ] states = [ ( s [ 0 ] , s [ 1 ] [ 0 ] ) for s in states ] interms = [ ( f [ 0 ] , f [ 1 ] [ 0 ] ) for f in interms ] actions = [ ( a [ 0 ] , a [ 1 ] [ 0 ] ) for a in actions ] rewards = np . reshape ( rewards , [ batch_size , horizon ] ) [ 0 ] self . _render_batch ( non_fluents , states , actions , interms , rewards )
Prints the first batch of simulated trajectories .
12,380
def _render_batch ( self , non_fluents : NonFluents , states : Fluents , actions : Fluents , interms : Fluents , rewards : np . array , horizon : Optional [ int ] = None ) -> None : if horizon is None : horizon = len ( states [ 0 ] [ 1 ] ) self . _render_round_init ( horizon , non_fluents ) for t in range ( horizon ) : s = [ ( s [ 0 ] , s [ 1 ] [ t ] ) for s in states ] f = [ ( f [ 0 ] , f [ 1 ] [ t ] ) for f in interms ] a = [ ( a [ 0 ] , a [ 1 ] [ t ] ) for a in actions ] r = rewards [ t ] self . _render_timestep ( t , s , a , f , r ) self . _render_round_end ( rewards )
Prints non_fluents states actions interms and rewards for given horizon .
12,381
def _render_timestep ( self , t : int , s : Fluents , a : Fluents , f : Fluents , r : np . float32 ) -> None : print ( "============================" ) print ( "TIME = {}" . format ( t ) ) print ( "============================" ) fluent_variables = self . _compiler . rddl . action_fluent_variables self . _render_fluent_timestep ( 'action' , a , fluent_variables ) fluent_variables = self . _compiler . rddl . interm_fluent_variables self . _render_fluent_timestep ( 'interms' , f , fluent_variables ) fluent_variables = self . _compiler . rddl . state_fluent_variables self . _render_fluent_timestep ( 'states' , s , fluent_variables ) self . _render_reward ( r )
Prints fluents and rewards for the given timestep t .
12,382
def _render_fluent_timestep ( self , fluent_type : str , fluents : Sequence [ Tuple [ str , np . array ] ] , fluent_variables : Sequence [ Tuple [ str , List [ str ] ] ] ) -> None : for fluent_pair , variable_list in zip ( fluents , fluent_variables ) : name , fluent = fluent_pair _ , variables = variable_list print ( name ) fluent = fluent . flatten ( ) for variable , value in zip ( variables , fluent ) : print ( '- {}: {} = {}' . format ( fluent_type , variable , value ) ) print ( )
Prints fluents of given fluent_type as list of instantiated variables with corresponding values .
12,383
def _render_reward ( self , r : np . float32 ) -> None : print ( "reward = {:.4f}" . format ( float ( r ) ) ) print ( )
Prints reward r .
12,384
def _render_round_init ( self , horizon : int , non_fluents : NonFluents ) -> None : print ( '*********************************************************' ) print ( '>>> ROUND INIT, horizon = {}' . format ( horizon ) ) print ( '*********************************************************' ) fluent_variables = self . _compiler . rddl . non_fluent_variables self . _render_fluent_timestep ( 'non-fluents' , non_fluents , fluent_variables )
Prints round init information about horizon and non_fluents .
12,385
def _render_round_end ( self , rewards : np . array ) -> None : print ( "*********************************************************" ) print ( ">>> ROUND END" ) print ( "*********************************************************" ) total_reward = np . sum ( rewards ) print ( "==> Objective value = {}" . format ( total_reward ) ) print ( "==> rewards = {}" . format ( list ( rewards ) ) ) print ( )
Prints round end information about rewards .
12,386
def _truncate_to_field ( model , field_name , value ) : field = model . _meta . get_field ( field_name ) if len ( value ) > field . max_length : midpoint = field . max_length // 2 len_after_midpoint = field . max_length - midpoint first = value [ : midpoint ] sep = '...' last = value [ len ( value ) - len_after_midpoint + len ( sep ) : ] value = sep . join ( [ first , last ] ) return value
Shorten data to fit in the specified model field .
12,387
def on_failure ( self , exc , task_id , args , kwargs , einfo ) : if not FailedTask . objects . filter ( task_id = task_id , datetime_resolved = None ) . exists ( ) : FailedTask . objects . create ( task_name = _truncate_to_field ( FailedTask , 'task_name' , self . name ) , task_id = task_id , args = args , kwargs = kwargs , exc = _truncate_to_field ( FailedTask , 'exc' , repr ( exc ) ) , ) super ( PersistOnFailureTask , self ) . on_failure ( exc , task_id , args , kwargs , einfo )
If the task fails persist a record of the task .
12,388
def render ( self , trajectories : Tuple [ NonFluents , Fluents , Fluents , Fluents , np . array ] , batch : Optional [ int ] = None ) -> None : raise NotImplementedError
Renders the simulated trajectories for the given batch .
12,389
def distribution ( self , limit = 1024 ) : res = self . _qexec ( "%s, count(*) as __cnt" % self . name ( ) , group = "%s" % self . name ( ) , order = "__cnt DESC LIMIT %d" % limit ) dist = [ ] cnt = self . _table . size ( ) for i , r in enumerate ( res ) : dist . append ( list ( r ) + [ i , r [ 1 ] / float ( cnt ) ] ) self . _distribution = pd . DataFrame ( dist , columns = [ "value" , "cnt" , "r" , "fraction" ] ) self . _distribution . index = self . _distribution . r return self . _distribution
Build the distribution of distinct values
12,390
def parse ( self , name ) : name = name . strip ( ) groups = self . _parseFedora ( name ) if groups : self . _signature = DistributionNameSignature ( "Fedora" , groups . group ( 1 ) ) return self raise ValueError ( "Distribution name '%s' not recognized" % name )
Parse distribution string
12,391
def get_token ( url : str , scopes : str , credentials_dir : str ) -> dict : tokens . configure ( url = url , dir = credentials_dir ) tokens . manage ( 'lizzy' , [ scopes ] ) tokens . start ( ) return tokens . get ( 'lizzy' )
Get access token info .
12,392
def config ( config , fork_name = "" , origin_name = "" ) : state = read ( config . configfile ) any_set = False if fork_name : update ( config . configfile , { "FORK_NAME" : fork_name } ) success_out ( "fork-name set to: {}" . format ( fork_name ) ) any_set = True if origin_name : update ( config . configfile , { "ORIGIN_NAME" : origin_name } ) success_out ( "origin-name set to: {}" . format ( origin_name ) ) any_set = True if not any_set : info_out ( "Fork-name: {}" . format ( state [ "FORK_NAME" ] ) )
Setting various configuration options
12,393
def set_area_to_sip_signature ( self , xmin , xmax , zmin , zmax , spectrum ) : assert isinstance ( spectrum , ( sip_response , sip_response2 ) ) assert np . all ( self . frequencies == spectrum . frequencies ) for frequency , rmag , rpha in zip ( self . frequencies , spectrum . rmag , spectrum . rpha ) : td = self . tds [ frequency ] pidm , pidp = td . a [ 'forward_model' ] td . parman . modify_area ( pidm , xmin , xmax , zmin , zmax , rmag ) td . parman . modify_area ( pidp , xmin , xmax , zmin , zmax , rpha )
Parameterize the eit instance by supplying one SIP spectrum and the area to apply to .
12,394
def add_homogeneous_model ( self , magnitude , phase = 0 , frequency = None ) : if frequency is None : frequencies = self . frequencies else : assert isinstance ( frequency , Number ) frequencies = [ frequency , ] for freq in frequencies : pidm , pidp = self . tds [ freq ] . add_homogeneous_model ( magnitude , phase ) self . a [ 'forward_rmag' ] [ freq ] = pidm self . a [ 'forward_rpha' ] [ freq ] = pidp
Add homogeneous models to one or all tomodirs . Register those as forward models
12,395
def apply_crtomo_cfg ( self ) : for key in sorted ( self . tds . keys ( ) ) : self . tds [ key ] . crtomo_cfg = self . crtomo_cfg . copy ( )
Set the global crtomo_cfg for all frequencies
12,396
def apply_noise_models ( self ) : for key in sorted ( self . tds . keys ( ) ) : self . tds [ key ] . noise_model = self . noise_model
Set the global noise_model for all frequencies
12,397
def load_inversion_results ( self , sipdir ) : frequency_file = sipdir + os . sep + 'frequencies.dat' frequencies = np . loadtxt ( frequency_file ) self . _init_frequencies ( frequencies ) for nr , ( frequency_key , item ) in enumerate ( sorted ( self . tds . items ( ) ) ) : for label in ( 'rmag' , 'rpha' , 'cre' , 'cim' ) : if label not in self . assigments : self . a [ label ] = { } tdir = sipdir + os . sep + 'invmod' + os . sep + '{:02}_{:.6f}' . format ( nr , frequency_key ) + os . sep rmag_file = sorted ( glob ( tdir + 'inv/*.mag' ) ) [ - 1 ] rmag_data = np . loadtxt ( rmag_file , skiprows = 1 ) [ : , 2 ] pid_rmag = item . parman . add_data ( rmag_data ) self . a [ 'rmag' ] [ frequency_key ] = pid_rmag rpha_file = sorted ( glob ( tdir + 'inv/*.pha' ) ) [ - 1 ] rpha_data = np . loadtxt ( rpha_file , skiprows = 1 ) [ : , 2 ] pid_rpha = item . parman . add_data ( rpha_data ) self . a [ 'rpha' ] [ frequency_key ] = pid_rpha sigma_file = sorted ( glob ( tdir + 'inv/*.sig' ) ) [ - 1 ] sigma_data = np . loadtxt ( sigma_file , skiprows = 1 ) pid_cre = item . parman . add_data ( sigma_data [ : , 0 ] ) pid_cim = item . parman . add_data ( sigma_data [ : , 1 ] ) self . a [ 'cre' ] [ frequency_key ] = pid_cre self . a [ 'cim' ] [ frequency_key ] = pid_cim
Given an sEIT inversion directory load inversion results and store the corresponding parameter ids in self . assignments
12,398
def plot_forward_models ( self , maglim = None , phalim = None , ** kwargs ) : return_dict = { } N = len ( self . frequencies ) nrx = min ( N , 4 ) nrz = int ( np . ceil ( N / nrx ) ) for index , key , limits in zip ( ( 0 , 1 ) , ( 'rmag' , 'rpha' ) , ( maglim , phalim ) ) : if limits is None : cbmin = None cbmax = None else : cbmin = limits [ 0 ] cbmax = limits [ 1 ] fig , axes = plt . subplots ( nrz , nrx , figsize = ( 16 / 2.54 , nrz * 3 / 2.54 ) , sharex = True , sharey = True , ) for ax in axes . flat : ax . set_visible ( False ) for ax , frequency in zip ( axes . flat , self . frequencies ) : ax . set_visible ( True ) td = self . tds [ frequency ] pids = td . a [ 'forward_model' ] td . plot . plot_elements_to_ax ( pids [ index ] , ax = ax , plot_colorbar = True , cbposition = 'horizontal' , cbmin = cbmin , cbmax = cbmax , ** kwargs ) for ax in axes [ 0 : - 1 , : ] . flat : ax . set_xlabel ( '' ) for ax in axes [ : , 1 : ] . flat : ax . set_ylabel ( '' ) fig . tight_layout ( ) return_dict [ key ] = { 'fig' : fig , 'axes' : axes , } return return_dict
Create plots of the forward models
12,399
def add_to_configs ( self , configs ) : for f , td in self . tds . items ( ) : td . configs . add_to_configs ( configs )
Add configurations to all tomodirs