idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
12,700
def login ( config , api_key = "" ) : if not api_key : info_out ( "If you don't have an API Key, go to:\n" "https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n" ) api_key = getpass . getpass ( "API Key: " ) url = urllib . parse . urljoin ( config . bugzilla_url , "/rest/whoami" ) assert url . startswith ( "https://" ) , url response = requests . get ( url , params = { "api_key" : api_key } ) if response . status_code == 200 : if response . json ( ) . get ( "error" ) : error_out ( "Failed - {}" . format ( response . json ( ) ) ) else : update ( config . configfile , { "BUGZILLA" : { "bugzilla_url" : config . bugzilla_url , "api_key" : api_key , } } , ) success_out ( "Yay! It worked!" ) else : error_out ( "Failed - {} ({})" . format ( response . status_code , response . json ( ) ) )
Store your Bugzilla API Key
12,701
def logout ( config ) : state = read ( config . configfile ) if state . get ( "BUGZILLA" ) : remove ( config . configfile , "BUGZILLA" ) success_out ( "Forgotten" ) else : error_out ( "No stored Bugzilla credentials" )
Remove and forget your Bugzilla credentials
12,702
def get_hypergeometric_stats ( N , indices ) : assert isinstance ( N , ( int , np . integer ) ) assert isinstance ( indices , np . ndarray ) and np . issubdtype ( indices . dtype , np . uint16 ) K = indices . size pvals = np . empty ( N + 1 , dtype = np . float64 ) folds = np . empty ( N + 1 , dtype = np . float64 ) pvals [ 0 ] = 1.0 folds [ 0 ] = 1.0 n = 0 k = 0 p = 1.0 while n < N : if k < K and indices [ k ] == n : p *= ( float ( ( n + 1 ) * ( K - k ) ) / float ( ( N - n ) * ( k + 1 ) ) ) k += 1 else : p *= ( float ( ( n + 1 ) * ( N - K - n + k ) ) / float ( ( N - n ) * ( n - k + 1 ) ) ) n += 1 pvals [ n ] = get_hgp ( p , k , N , K , n ) folds [ n ] = k / ( K * ( n / float ( N ) ) ) return pvals , folds
Calculates hypergeom . p - values and fold enrichments for all cutoffs .
12,703
def parse ( self , prefix ) : self . _prefix = "" url = re . sub ( r'http://' , '' , prefix ) url = re . sub ( r'https://' , '' , url ) custom_prefix = self . detectCustomImportPaths ( url ) if custom_prefix != { } : url = custom_prefix [ "provider_prefix" ] info = self . _parsePrefix ( url ) self . _signature = info [ "signature" ] self . _prefix = info [ "prefix" ] return self
Parse import path into provider project repository and other recognizable parts
12,704
def detectKnownRepo ( self , url ) : if url . startswith ( 'github.com' ) : return GITHUB if url . startswith ( 'code.google.com/p' ) : return GOOGLECODE if url . startswith ( 'golang.org/x' ) : return GOLANGORG if url . startswith ( 'gopkg.in' ) : return GOPKG if url . startswith ( 'bitbucket.org' ) : return BITBUCKET if url . startswith ( 'google.golang.org' ) : return GOOGLEGOLANGORG return UNKNOWN
For given import path detect provider .
12,705
def get_qualifier_dict ( vocabularies , qualifier_vocab ) : if vocabularies . get ( qualifier_vocab , None ) is None : raise UNTLFormException ( 'Could not retrieve qualifier vocabulary "%s" for the form.' % ( qualifier_vocab ) ) else : return vocabularies . get ( qualifier_vocab )
Get the qualifier dictionary based on the element s qualifier vocabulary .
12,706
def get_content_dict ( vocabularies , content_vocab ) : if vocabularies . get ( content_vocab , None ) is None : raise UNTLFormException ( 'Could not retrieve content vocabulary "%s" for the form.' % ( content_vocab ) ) else : return vocabularies . get ( content_vocab )
Get the content dictionary based on the element s content vocabulary .
12,707
def get_group_usage_link ( self ) : first_element = self . group_list [ 0 ] usage_link = getattr ( first_element . form , 'usage_link' , None ) return usage_link
Get the usage link for the group element .
12,708
def get_adjustable_form ( self , element_dispatch ) : adjustable_form = { } for key in element_dispatch . keys ( ) : adjustable_form [ key ] = element_dispatch [ key ] ( ) return adjustable_form
Create an adjustable form from an element dispatch table .
12,709
def set_coverage_placeName ( self ) : if ( self . solr_response and self . solr_response != 'error' and self . solr_response . response != 'error' ) : location_list = self . solr_response . get_location_list_facet ( ) . facet_list else : location_list = [ ] form_dict = { 'view_type' : 'prefill' , 'value_json' : json . dumps ( location_list , ensure_ascii = False ) , 'value_py' : location_list , } return form_dict
Determine the properties for the placeName coverage field .
12,710
def get_meta_attributes ( self , ** kwargs ) : superuser = kwargs . get ( 'superuser' , False ) if ( self . untl_object . qualifier == 'recordStatus' or self . untl_object . qualifier == 'system' ) : if superuser : self . editable = True self . repeatable = True else : self . editable = False self . view_type = 'qualified-input' elif self . untl_object . qualifier == 'hidden' : self . label = 'Object Hidden' self . view_type = 'radio' else : self . editable = False self . view_type = 'qualified-input'
Determine the form attributes for the meta field .
12,711
def _bit_mismatch ( int1 : int , int2 : int ) -> int : for i in range ( max ( int1 . bit_length ( ) , int2 . bit_length ( ) ) ) : if ( int1 >> i ) & 1 != ( int2 >> i ) & 1 : return i return - 1
Returns the index of the first different bit or - 1 if the values are the same .
12,712
def searchRootOfTree ( reducibleChildren : Set [ LNode ] , nodeFromTree : LNode ) : while True : out_e = nodeFromTree . east [ 0 ] . outgoingEdges if not out_e : return nodeFromTree nextNode = out_e [ 0 ] . dsts [ 0 ] . parentNode if nextNode in reducibleChildren : nodeFromTree = nextNode else : return nodeFromTree
Walk tree of nodes to root
12,713
def collectNodesInTree ( treeRoot : LNode , reducibleChildren : Set [ LNode ] ) : inputEdges = [ ] reducedNodes = [ ] reducedNodesSet = set ( ) nodeStack = [ ] nodeStack . append ( ( treeRoot , None , None ) ) while nodeStack : node , p , e = nodeStack . pop ( ) if node in reducibleChildren and node not in reducedNodesSet : reducedNodes . append ( node ) reducedNodesSet . add ( node ) for _p in node . west : for _e in _p . iterEdges ( ) : nodeStack . append ( ( _e . srcs [ 0 ] . parentNode , _p , _e ) ) else : inputEdges . append ( ( node , p , e ) ) return reducedNodes , inputEdges
Collect nodes which will be reduced and input nodes of tree for tree of nodes .
12,714
def __initLock ( self ) : self . _isLocked = False self . _timer = 0 self . _operation = False
Init lock for sending request to projector when it is busy .
12,715
def __setLock ( self , command ) : if command in ( TURN_ON , TURN_OFF ) : self . _operation = command elif command in INV_SOURCES : self . _operation = SOURCE else : self . _operation = ALL self . _isLocked = True self . _timer = time . time ( )
Set lock on requests .
12,716
def __unLock ( self ) : self . _operation = False self . _timer = 0 self . _isLocked = False
Unlock sending requests to projector .
12,717
def __checkLock ( self ) : if self . _isLocked : if ( time . time ( ) - self . _timer ) > TIMEOUT_TIMES [ self . _operation ] : self . __unLock ( ) return False return True return False
Lock checking .
12,718
async def get_property ( self , command ) : _LOGGER . debug ( "Getting property %s" , command ) if self . __checkLock ( ) : return BUSY timeout = self . __get_timeout ( command ) response = await self . send_request ( timeout = timeout , params = EPSON_KEY_COMMANDS [ command ] , type = 'json_query' ) if not response : return False try : return response [ 'projector' ] [ 'feature' ] [ 'reply' ] except KeyError : return BUSY
Get property state from device .
12,719
async def send_command ( self , command ) : _LOGGER . debug ( "Sending command to projector %s" , command ) if self . __checkLock ( ) : return False self . __setLock ( command ) response = await self . send_request ( timeout = self . __get_timeout ( command ) , params = EPSON_KEY_COMMANDS [ command ] , type = 'directsend' , command = command ) return response
Send command to Epson .
12,720
async def send_request ( self , params , timeout , type = 'json_query' , command = False ) : try : with async_timeout . timeout ( timeout ) : url = '{url}{type}' . format ( url = self . _http_url , type = type ) async with self . websession . get ( url = url , params = params , headers = self . _headers ) as response : if response . status != HTTP_OK : _LOGGER . warning ( "Error message %d from Epson." , response . status ) return False if command == TURN_ON and self . _powering_on : self . _powering_on = False if type == 'json_query' : return await response . json ( ) return response except ( aiohttp . ClientError , aiohttp . ClientConnectionError ) : _LOGGER . error ( "Error request" ) return False
Send request to Epson .
12,721
def remove_instances_by_prefix ( nova_api , prefix ) : for server in nova_api . servers . list ( ) : if server . name . startswith ( prefix ) : LOG . info ( "Remove instance '%s'" % server . name ) server . delete ( )
Remove all the instances on which their name start by a prefix .
12,722
def purge_existing_ovb ( nova_api , neutron ) : LOG . info ( 'Cleaning up OVB environment from the tenant.' ) for server in nova_api . servers . list ( ) : if server . name in ( 'bmc' , 'undercloud' ) : server . delete ( ) if server . name . startswith ( 'baremetal_' ) : server . delete ( ) for router in neutron . list_routers ( ) . get ( 'routers' ) : if router [ 'name' ] not in ( 'router' , 'bmc_router' ) : continue for subnet in neutron . list_subnets ( ) . get ( 'subnets' ) : if not ( subnet [ 'name' ] . startswith ( 'bmc_eth' ) or subnet [ 'name' ] == 'rdo-m-subnet' ) : continue try : neutron . remove_interface_router ( router [ 'id' ] , { 'subnet_id' : subnet [ 'id' ] } ) except neutronclient . common . exceptions . NotFound : pass try : bmc_router = neutron . list_routers ( name = 'bmc_router' ) . get ( 'routers' ) [ 0 ] for port in neutron . list_ports ( device_id = bmc_router [ 'id' ] ) [ 'ports' ] : if port . get ( 'device_owner' ) == 'network:router_gateway' : continue info = { 'id' : router [ 'id' ] , 'port_id' : port [ 'id' ] , 'tenant_id' : bmc_router . get ( 'tenant_id' ) , } neutron . remove_interface_router ( bmc_router [ 'id' ] , info ) neutron . delete_router ( bmc_router [ 'id' ] ) except IndexError : pass for _ in range ( 0 , 5 ) : try : for port in neutron . list_ports ( ) [ 'ports' ] : if port [ 'name' ] . endswith ( '_provision' ) : neutron . delete_port ( port [ 'id' ] ) for net in neutron . list_networks ( ) . get ( 'networks' ) : if not net [ 'name' ] . startswith ( 'provision_' ) : continue for port in neutron . list_ports ( network_id = net [ 'id' ] ) [ 'ports' ] : if port . get ( 'device_owner' ) == 'network:router_interface' : continue try : neutron . delete_port ( port [ 'id' ] ) except neutronclient . common . exceptions . PortNotFoundClient : pass for subnet in neutron . list_subnets ( network_id = net [ 'id' ] ) [ 'subnets' ] : neutron . delete_subnet ( subnet [ 'id' ] ) neutron . delete_network ( net [ 'id' ] ) except neutronclient . common . exceptions . Conflict : LOG . debug ( 'waiting for all the ports to be freed...' ) time . sleep ( 5 ) else : return
Purge any trace of an existing OVB deployment .
12,723
def initialize_network ( neutron ) : body_sample = { "network" : { "name" : 'provision_bob' , "admin_state_up" : True , } } netw = neutron . create_network ( body = body_sample ) [ 'network' ] body_create_subnet = { 'subnets' : [ { 'name' : 'rdo-m-subnet' , 'cidr' : '192.0.2.0/24' , 'ip_version' : 4 , 'network_id' : netw [ 'id' ] , 'host_routes' : [ { 'destination' : '169.254.169.254/32' , 'nexthop' : '192.0.2.240' } ] , 'gateway_ip' : '192.0.2.1' , 'dns_nameservers' : [ '8.8.8.8' , '8.8.4.4' ] , 'allocation_pools' : [ { 'start' : '192.0.2.30' , 'end' : '192.0.2.199' } ] } ] } response = neutron . create_subnet ( body = body_create_subnet ) subnet_id = response [ 'subnets' ] [ 0 ] [ 'id' ] router = neutron . list_routers ( name = 'router' ) . get ( 'routers' ) [ 0 ] response = neutron . add_interface_router ( router [ 'id' ] , { 'subnet_id' : subnet_id } )
Initialize an OVB network called provision_bob .
12,724
def description_director ( ** kwargs ) : description_type = { 'physical' : DCFormat } qualifier = kwargs . get ( 'qualifier' ) element_class = description_type . get ( qualifier , DCDescription ) element = element_class ( qualifier = qualifier , content = kwargs . get ( 'content' ) , ) return element
Direct which class should be used based on the director qualifier .
12,725
def add_child ( self , child ) : if child : if child . tag in self . contained_children : self . children . append ( child ) else : raise DC_StructureException ( 'Invalid child "%s" for parent "%s"' % ( child . tag , self . tag ) )
This adds a child object to the current one . It will check the contained_children list to make sure that the object is allowable and throw an exception if not .
12,726
def determine_vocab ( self , qualifier ) : vocab_value = VOCAB_INDEX . get ( self . tag , None ) if isinstance ( vocab_value , dict ) : if qualifier is None : qualifier = 'None' return vocab_value . get ( qualifier , None ) elif vocab_value is not None : return vocab_value else : return None
Determine the vocab from the qualifier .
12,727
def resolver ( self , vocab_data , attribute ) : term_list = vocab_data . get ( self . content_vocab , [ ] ) for term_dict in term_list : if term_dict [ 'name' ] == self . content : return term_dict [ attribute ] return self . content
Pull the requested attribute based on the given vocabulary and content .
12,728
def check_separator ( self , data ) : sep_list = [ r'\t' , r';' , r',' , r'|' , r'\s+' ] data_copy = data for sep in sep_list : splitted = data_copy . split ( "\n" ) parts = [ len ( re . split ( sep , line ) ) for line in splitted ] if sum ( parts ) == len ( splitted ) : continue diff = 0 for i in range ( len ( parts [ 1 : - 1 ] ) ) : diff += abs ( parts [ i ] - parts [ i + 1 ] ) if diff == 0 : return sep , parts [ 0 ] return None
THis method evaluates a list of separators on the input data to check which one is correct . This is done by first splitting the input by newline and then checking if the split by separator is equal for each input row except the last that might be incomplete due to the limited input data
12,729
def head ( self , file_path ) : processor = lambda path , node , tail_only = True , append = False : self . _handle_head ( path , node ) for item in self . _client . _find_items ( [ file_path ] , processor , include_toplevel = True , include_children = False , recurse = False ) : if item : return item
Onlye read the first packets that come try to max out at 1024kb
12,730
def packageExists ( self , package ) : url = "%s/packages" % self . base_url params = { "pattern" : package } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : return False return True
Check if the package already exists
12,731
def getGolangPackages ( self ) : packages = { } url = "%s/packages" % self . base_url params = { "pattern" : "golang-*" , "limit" : 200 } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : return { } data = response . json ( ) for package in data [ "packages" ] : packages [ package [ "name" ] ] = self . _processPackageData ( package ) for page in range ( 2 , data [ "page_total" ] + 1 ) : params = { "pattern" : "golang-*" , "limit" : 200 , "page" : page } response = requests . get ( url , params = params ) if response . status_code != requests . codes . ok : continue data = response . json ( ) for package in data [ "packages" ] : packages [ package [ "name" ] ] = self . _processPackageData ( package ) MAX_LEN = 30 package_names = packages . keys ( ) packages_total = len ( package_names ) packages_counter = 0 logger . info ( "%s packages to process" % packages_total ) for i in range ( 0 , packages_total , MAX_LEN ) : sublist = package_names [ i : i + MAX_LEN ] branches = self . _getPackageBranches ( sublist ) for package in sublist : packages [ package ] [ "branches" ] = branches [ package ] packages_counter = packages_counter + len ( branches ) logger . info ( "%s/%s packages processed" % ( packages_counter , packages_total ) ) return packages
Get a list of all golang packages for all available branches
12,732
def onClose ( self , wasClean ) : self . log . error ( 'lost connection to crossbar on session %' + str ( self . session_id ) ) for task in asyncio . Task . all_tasks ( ) : task . cancel ( ) asyncio . get_event_loop ( ) . stop ( )
Disconnect when connection to message broker is lost
12,733
def onUserError ( self , fail , message ) : self . log . error ( fail ) self . log . error ( message )
Handle user errors
12,734
async def show_sessions ( self ) : res = await self . call ( "wamp.session.list" ) for session_id in res : session = await self . call ( "wamp.session.get" , session_id ) self . log . info ( session )
Returns an object with a lists of the session IDs for all sessions currently attached to the realm
12,735
async def lookup_session ( self , topic_name ) : res = await self . call ( "wamp.subscription.lookup" , topic_name ) self . log . info ( res )
Attempts to find the session id for a given topic
12,736
def setup_runner ( self ) : runner = ApplicationRunner ( url = self . config [ 'transport_host' ] , realm = u'realm1' , extra = { 'config' : self . config , 'handlers' : self . handlers , } ) return runner
Setup instance of runner var
12,737
def reconnect ( self ) : connect_attempt = 0 max_retries = self . config [ 'max_reconnect_retries' ] logging . info ( 'attempting to reconnect to crossbar' ) runner = self . setup_runner ( ) while True : if connect_attempt == max_retries : logging . info ( 'max retries reached; stopping service' ) sys . exit ( 1 ) self . check_event_loop ( ) try : logging . info ( 'waiting 5 seconds' ) time . sleep ( 5 ) if self . check_transport_host ( ) : logging . info ( 'waiting 10 seconds to ensure that crossbar has initialized before reconnecting' ) time . sleep ( 10 ) runner . run ( Component ) else : logging . error ( 'crossbar host port 8080 not available...' ) except RuntimeError as error : logging . error ( error ) except ConnectionRefusedError as error : logging . error ( error ) except ConnectionError as error : logging . error ( error ) except KeyboardInterrupt : logging . info ( 'User initiated shutdown' ) loop = asyncio . get_event_loop ( ) loop . stop ( ) sys . exit ( 1 ) connect_attempt += 1
Handle reconnect logic if connection to crossbar is lost
12,738
def reduceUselessAssignments ( root : LNode ) : for n in root . children : if n . children : reduceUselessAssignments ( n ) do_update = False for n in root . children : if isinstance ( n . originObj , Assignment ) and not n . originObj . indexes and len ( n . west ) == 1 : src = n . originObj . src if isinstance ( src , RtlSignalBase ) and src . hidden : continue if not do_update : nodes = set ( root . children ) do_update = True nodes . remove ( n ) srcPorts = [ ] dstPorts = [ ] edgesToRemove = [ ] inP = getSinglePort ( n . west ) outP = getSinglePort ( n . east ) for e in inP . incomingEdges : sPort = e . src srcPorts . append ( ( sPort , e . originObj ) ) edgesToRemove . append ( e ) for e in outP . outgoingEdges : dPort = e . dst dstPorts . append ( dPort ) edgesToRemove . append ( e ) for e in edgesToRemove : e . remove ( ) for srcPort , originObj in srcPorts : for dstPort in dstPorts : root . addEdge ( srcPort , dstPort , originObj = originObj ) if do_update : root . children = list ( nodes )
Remove assignments if it is only a direct connection and can be replaced with direct link
12,739
def _constructTypeQualifiedName ( self , type , full = False ) : t = type [ "type" ] if t == TYPE_IDENT : return type [ "def" ] elif t == TYPE_POINTER : return self . _constructTypeQualifiedName ( type [ "def" ] ) elif t == TYPE_SELECTOR : if full : return "%s.%s" % ( type [ "prefix" ] , type [ "item" ] ) else : return type [ "item" ] else : raise ValueError ( "Type %s can not be used for FQN" % t )
For given type construct its full qualified name .
12,740
def crop_to_bounding_box ( image , offset_height , offset_width , target_height , target_width , dynamic_shape = False ) : image = ops . convert_to_tensor ( image , name = 'image' ) _Check3DImage ( image , require_static = ( not dynamic_shape ) ) height , width , _ = _ImageDimensions ( image , dynamic_shape = dynamic_shape ) if not dynamic_shape : if offset_width < 0 : raise ValueError ( 'offset_width must be >= 0.' ) if offset_height < 0 : raise ValueError ( 'offset_height must be >= 0.' ) if width < ( target_width + offset_width ) : raise ValueError ( 'width must be >= target + offset.' ) if height < ( target_height + offset_height ) : raise ValueError ( 'height must be >= target + offset.' ) cropped = array_ops . slice ( image , array_ops . pack ( [ offset_height , offset_width , 0 ] ) , array_ops . pack ( [ target_height , target_width , - 1 ] ) ) return cropped
Crops an image to a specified bounding box .
12,741
def pad_to_bounding_box ( image , offset_height , offset_width , target_height , target_width , dynamic_shape = False ) : image = ops . convert_to_tensor ( image , name = 'image' ) _Check3DImage ( image , require_static = ( not dynamic_shape ) ) height , width , depth = _ImageDimensions ( image , dynamic_shape = dynamic_shape ) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height if not dynamic_shape : if target_width < width : raise ValueError ( 'target_width must be >= width' ) if target_height < height : raise ValueError ( 'target_height must be >= height' ) if after_padding_width < 0 : raise ValueError ( 'target_width not possible given ' 'offset_width and image width' ) if after_padding_height < 0 : raise ValueError ( 'target_height not possible given ' 'offset_height and image height' ) if ( dynamic_shape or offset_width or offset_height or after_padding_width or after_padding_height ) : paddings = array_ops . reshape ( array_ops . pack ( [ offset_height , after_padding_height , offset_width , after_padding_width , 0 , 0 ] ) , [ 3 , 2 ] ) padded = array_ops . pad ( image , paddings ) if not dynamic_shape : padded . set_shape ( [ target_height , target_width , depth ] ) else : padded = image return padded
Pad image with zeros to the specified height and width .
12,742
def determine_completeness ( py_untl ) : completeness_dict = { 'title' : { 'present' : False , 'weight' : 10 , } , 'description' : { 'present' : False , 'weight' : 1 , } , 'language' : { 'present' : False , 'weight' : 1 , } , 'collection' : { 'present' : False , 'weight' : 10 , } , 'institution' : { 'present' : False , 'weight' : 10 , } , 'resourceType' : { 'present' : False , 'weight' : 5 , } , 'format' : { 'present' : False , 'weight' : 1 , } , 'subject' : { 'present' : False , 'weight' : 1 , } , 'meta' : { 'present' : False , 'weight' : 20 , } , } total_points = sum ( item [ 'weight' ] for item in completeness_dict . values ( ) ) py_untl_object_score = 0.0 for i in py_untl . children : if i . tag in PYUNTL_COMPLETENESS_SCORED_ATTRIBUTES : if i . content : content = i . content . lower ( ) match = bool ( DEFAULT_VALUE_REGEX . search ( content ) ) if content not in COMMON_DEFAULT_ATTRIBUTE_VALUES and not match : if i . tag == 'meta' : if i . qualifier == 'system' : completeness_dict [ '%s' % i . tag ] [ 'present' ] = True else : completeness_dict [ '%s' % i . tag ] [ 'present' ] = True for k , v in completeness_dict . iteritems ( ) : if v [ 'present' ] : py_untl_object_score += completeness_dict [ k ] [ 'weight' ] completeness = py_untl_object_score / total_points return completeness
Take a Python untl and calculate the completeness .
12,743
def init_app ( self , app , config_prefix = None ) : self . kill_session = self . original_kill_session config_prefix = ( config_prefix or 'JIRA' ) . rstrip ( '_' ) . upper ( ) if not hasattr ( app , 'extensions' ) : app . extensions = dict ( ) if config_prefix . lower ( ) in app . extensions : raise ValueError ( 'Already registered config prefix {0!r}.' . format ( config_prefix ) ) app . extensions [ config_prefix . lower ( ) ] = _JIRAState ( self , app ) args = read_config ( app . config , config_prefix ) try : super ( JIRA , self ) . __init__ ( ** args ) except ConnectionError : if not app . config . get ( '{0}_IGNORE_INITIAL_CONNECTION_FAILURE' . format ( config_prefix ) ) : raise LOG . exception ( 'Ignoring ConnectionError.' )
Actual method to read JIRA settings from app configuration and initialize the JIRA instance .
12,744
def zip_dict ( a : Dict [ str , A ] , b : Dict [ str , B ] ) -> Dict [ str , Tuple [ Optional [ A ] , Optional [ B ] ] ] : return { key : ( a . get ( key ) , b . get ( key ) ) for key in a . keys ( ) | b . keys ( ) }
Combine the values within two dictionaries by key .
12,745
def flattenPort ( port : LPort ) : yield port if port . children : for ch in port . children : yield from flattenPort ( ch ) port . children . clear ( )
Flatten hierarchical ports
12,746
def _flattenPortsSide ( side : List [ LNode ] ) -> List [ LNode ] : new_side = [ ] for i in side : for new_p in flattenPort ( i ) : new_side . append ( new_p ) return new_side
Flatten hierarchical ports on node side
12,747
def flattenPorts ( root : LNode ) : for u in root . children : u . west = _flattenPortsSide ( u . west ) u . east = _flattenPortsSide ( u . east ) u . north = _flattenPortsSide ( u . north ) u . south = _flattenPortsSide ( u . south )
Flatten ports to simplify layout generation
12,748
def set_missing_defaults ( self ) : if 'pub_options' not in self . config : self . config [ 'pub_options' ] = { 'acknowledge' : True , 'retain' : True } if 'sub_options' not in self . config : self . config [ 'sub_options' ] = { 'get_retained' : False } if 'subscribed_topics' not in self . config : self . config [ 'subscribed_topics' ] = None if 'replay_events' not in self . config : self . config [ 'replay_events' ] = False if 'max_reconnect_retries' not in self . config : self . config [ 'max_reconnect_retries' ] = 10
Ensure that minimal configuration is setup and set defaults for missing values
12,749
def config_sanity_check ( self ) : if 'name' not in self . config : raise EventifyConfigError ( ) if 'publish_topic' not in self . config : raise EventifyConfigError ( ) if 'topic' not in self . config [ 'publish_topic' ] : raise EventifyConfigError ( )
Base configuration sanity checks
12,750
def load_config ( self ) : logger . debug ( 'loading config file: %s' , self . config_file ) if os . path . exists ( self . config_file ) : with open ( self . config_file ) as file_handle : return json . load ( file_handle ) else : logger . error ( 'configuration file is required for eventify' ) logger . error ( 'unable to load configuration for service' ) raise EventifyConfigError ( 'Configuration is required! Missing: %s' % self . config_file )
Load configuration for the service
12,751
def check_event_loop ( ) : loop = asyncio . get_event_loop ( ) if loop . is_closed ( ) : asyncio . set_event_loop ( asyncio . new_event_loop ( ) )
Check if event loop is closed and create a new event loop
12,752
def is_tomodir ( subdirectories ) : required = ( 'exe' , 'config' , 'rho' , 'mod' , 'inv' ) is_tomodir = True for subdir in required : if subdir not in subdirectories : is_tomodir = False return is_tomodir
provided with the subdirectories of a given directory check if this is a tomodir
12,753
def check_if_needs_modeling ( tomodir ) : print ( 'check for modeling' , tomodir ) required_files = ( 'config' + os . sep + 'config.dat' , 'rho' + os . sep + 'rho.dat' , 'grid' + os . sep + 'elem.dat' , 'grid' + os . sep + 'elec.dat' , 'exe' + os . sep + 'crmod.cfg' , ) not_allowed = ( 'mod' + os . sep + 'volt.dat' , ) needs_modeling = True for filename in not_allowed : if os . path . isfile ( tomodir + os . sep + filename ) : needs_modeling = False for filename in required_files : full_file = tomodir + os . sep + filename if not os . path . isfile ( full_file ) : print ( 'does not exist: ' , full_file ) needs_modeling = False return needs_modeling
check of we need to run CRMod in a given tomodir
12,754
def check_if_needs_inversion ( tomodir ) : required_files = ( 'grid' + os . sep + 'elem.dat' , 'grid' + os . sep + 'elec.dat' , 'exe' + os . sep + 'crtomo.cfg' , ) needs_inversion = True for filename in required_files : if not os . path . isfile ( tomodir + os . sep + filename ) : needs_inversion = False if not os . path . isfile ( tomodir + os . sep + 'mod' + os . sep + 'volt.dat' ) : if not check_if_needs_modeling ( tomodir ) : print ( 'no volt.dat and no modeling possible' ) needs_inversion = False inv_ctr_file = tomodir + os . sep + 'inv' + os . sep + 'inv.ctr' if os . path . isfile ( inv_ctr_file ) : inv_lines = open ( inv_ctr_file , 'r' ) . readlines ( ) print ( 'inv_lines' , inv_lines [ - 1 ] ) if inv_lines [ - 1 ] . startswith ( '***finished***' ) : needs_inversion = False return needs_inversion
check of we need to run CRTomo in a given tomodir
12,755
def add_boundary ( self , p1 , p2 , btype ) : index = self . add_line ( p1 , p2 , self . char_lengths [ 'boundary' ] ) self . BoundaryIndices . append ( index ) self . Boundaries . append ( ( p1 , p2 , btype ) )
Add a boundary line
12,756
def add_line ( self , p1 , p2 , char_length ) : p1_id = self . get_point_id ( p1 , char_length ) p2_id = self . get_point_id ( p2 , char_length ) self . Lines . append ( ( p1_id , p2_id ) ) return len ( self . Lines )
Add a line to the list . Check if the nodes already exist and add them if not .
12,757
def is_in ( self , search_list , pair ) : index = - 1 for nr , i in enumerate ( search_list ) : if ( np . all ( i == pair ) ) : return nr return index
If pair is in search_list return the index . Otherwise return - 1
12,758
def read_electrodes ( self , electrodes ) : for nr , electrode in enumerate ( electrodes ) : index = self . get_point_id ( electrode , self . char_lengths [ 'electrode' ] ) self . Electrodes . append ( index )
Read in electrodes check if points already exist
12,759
def write_electrodes ( self , filename ) : fid = open ( filename , 'w' ) for i in self . Electrodes : fid . write ( '{0} {1}\n' . format ( self . Points [ i ] [ 0 ] , self . Points [ i ] [ 1 ] ) ) fid . close ( )
Write X Y coordinates of electrodes
12,760
def write_boundaries ( self , filename ) : fid = open ( filename , 'w' ) for i in self . Boundaries : print ( i ) fid . write ( '{0} {1} {2} {3} {4}\n' . format ( i [ 0 ] [ 0 ] , i [ 0 ] [ 1 ] , i [ 1 ] [ 0 ] , i [ 1 ] [ 1 ] , i [ 2 ] ) ) fid . close ( )
Write boundary lines X1 Y1 X2 Y2 TYPE to file
12,761
def read_char_lengths ( self , filename , electrode_filename ) : if os . path . isfile ( filename ) : data = np . atleast_1d ( np . loadtxt ( filename ) ) if data . size == 4 : characteristic_length = data if characteristic_length [ 0 ] < 0 : try : elec_positions = np . loadtxt ( electrode_filename ) except : raise IOError ( 'The was an error opening the electrode file' ) import scipy . spatial . distance distances = scipy . spatial . distance . pdist ( elec_positions ) characteristic_length [ 0 ] = min ( distances ) * np . abs ( characteristic_length [ 0 ] ) if characteristic_length [ 0 ] == 0 : raise Exception ( 'Error computing electrode ' + 'distances (got a minimal distance of zero' ) else : characteristic_length = np . ones ( 4 ) * data [ 0 ] else : characteristic_length = np . ones ( 4 ) if np . any ( characteristic_length <= 0 ) : raise Exception ( 'No negative characteristic lengths allowed ' + '(except for electrode length' ) self . char_lengths = { } for key , item in zip ( ( 'electrode' , 'boundary' , 'extra_line' , 'extra_node' ) , characteristic_length ) : self . char_lengths [ key ] = item
Read characteristic lengths from the given file .
12,762
def write_points ( self , fid ) : for nr , point in enumerate ( self . Points ) : fid . write ( 'Point({0}) = {{{1}, {2}, 0, {3}}};\n' . format ( nr + 1 , point [ 0 ] , point [ 1 ] , self . Charlengths [ nr ] ) )
Write the grid points to the GMSH - command file .
12,763
def get_output ( cls , response : requests . Response ) -> str : output = response . headers [ 'X-Lizzy-Output' ] output = output . replace ( '\\n' , '\n' ) lines = ( '[AGENT] {}' . format ( line ) for line in output . splitlines ( ) ) return '\n' . join ( lines )
Extracts the senza cli output from the response
12,764
def new_stack ( self , keep_stacks : int , new_traffic : int , senza_yaml : dict , stack_version : str , disable_rollback : bool , parameters : List [ str ] , region : Optional [ str ] , dry_run : bool , tags : List [ str ] ) -> ( Dict [ str , str ] , str ) : header = make_header ( self . access_token ) data = { 'senza_yaml' : yaml . dump ( senza_yaml ) , 'stack_version' : stack_version , 'disable_rollback' : disable_rollback , 'dry_run' : dry_run , 'keep_stacks' : keep_stacks , 'new_traffic' : new_traffic , 'parameters' : parameters , 'tags' : tags } if region : data [ 'region' ] = region request = self . stacks_url . post ( json = data , headers = header , verify = False ) request . raise_for_status ( ) return request . json ( ) , self . get_output ( request )
Requests a new stack .
12,765
def pot_ana ( r , rho ) : I = 1.0 sigma = 1.0 / rho phi = np . divide ( I , ( 2.0 * np . pi * sigma * r ) ) return phi
Return the analytical potential in distance r over a homogeneous half - space
12,766
def compute_potentials_analytical_hs ( grid , configs_raw , rho ) : potentials = [ ] nodes_sorted = grid . nodes [ 'sorted' ] nodes_raw = grid . nodes [ 'sorted' ] for config in configs_raw : print ( 'potential configs' , config ) e1_node = grid . get_electrode_node ( config [ 0 ] ) print ( 'e1_node' , e1_node ) electrode1 = nodes_sorted [ e1_node ] [ 1 : 3 ] r1 = np . sqrt ( ( nodes_raw [ : , 1 ] - electrode1 [ 0 ] ) ** 2 + ( nodes_raw [ : , 2 ] - electrode1 [ 1 ] ) ** 2 ) e2_node = grid . get_electrode_node ( config [ 1 ] ) print ( 'e2_node' , e2_node ) electrode2 = nodes_sorted [ e2_node ] [ 1 : 3 ] r2 = np . sqrt ( ( nodes_raw [ : , 1 ] - electrode2 [ 0 ] ) ** 2 + ( nodes_raw [ : , 2 ] - electrode2 [ 1 ] ) ** 2 ) pot1 = pot_ana ( r1 , rho ) pot2 = - pot_ana ( r2 , rho ) pot12 = pot1 + pot2 potentials . append ( pot12 ) return potentials
Compute the potential superpositions of each current dipole in the configurations using the provided resistivity
12,767
def compute_voltages ( grid , configs_raw , potentials_raw ) : voltages = [ ] for config , potentials in zip ( configs_raw , potentials_raw ) : print ( 'config' , config ) e3_node = grid . get_electrode_node ( config [ 2 ] ) e4_node = grid . get_electrode_node ( config [ 3 ] ) print ( e3_node , e4_node ) print ( 'pot1' , potentials [ e3_node ] ) print ( 'pot2' , potentials [ e4_node ] ) voltage = potentials [ e3_node ] - potentials [ e4_node ] voltages . append ( voltage ) return voltages
Given a list of potential distribution and corresponding four - point spreads compute the voltages
12,768
def vcf_writer ( parser , keep , extract , args ) : output = sys . stdout if args . output == "-" else open ( args . output , "w" ) try : samples = np . array ( parser . get_samples ( ) , dtype = str ) k = _get_sample_select ( samples = samples , keep = keep ) output . write ( _VCF_HEADER . format ( date = datetime . today ( ) . strftime ( "%Y%m%d" ) , version = __version__ , samples = "\t" . join ( samples [ k ] ) , ) ) generator = _get_generator ( parser = parser , extract = extract , keep = k , check_maf = args . maf ) nb_extracted = 0 for data in generator : genotypes = data . genotypes af = np . nanmean ( genotypes ) / 2 print ( data . variant . chrom , data . variant . pos , data . variant . name , data . reference , data . coded , "." , "PASS" , "AF={}" . format ( af ) , "GT:DS" , sep = "\t" , end = "" , file = output ) for geno in genotypes : if np . isnan ( geno ) : output . write ( "\t./.:." ) else : rounded_geno = int ( round ( geno , 0 ) ) output . write ( "\t{}:{}" . format ( _VCF_GT_MAP [ rounded_geno ] , geno , ) ) output . write ( "\n" ) nb_extracted += 1 if nb_extracted == 0 : logger . warning ( "No markers matched the extract list" ) finally : output . close ( )
Writes the data in VCF format .
12,769
def csv_writer ( parser , keep , extract , args ) : output = sys . stdout if args . output == "-" else open ( args . output , "w" ) try : samples = np . array ( parser . get_samples ( ) , dtype = str ) k = _get_sample_select ( samples = samples , keep = keep ) print ( "sample_id" , "variant_id" , "chromosome" , "position" , "reference" , "coded" , "dosage" , "hard_call" , sep = "," , file = output ) generator = _get_generator ( parser = parser , extract = extract , keep = k , check_maf = args . maf ) nb_extracted = 0 for data in generator : genotypes = data . genotypes hard_call_mapping = { 0 : "{ref}/{ref}" . format ( ref = data . reference ) , 1 : "{ref}/{alt}" . format ( ref = data . reference , alt = data . coded ) , 2 : "{alt}/{alt}" . format ( alt = data . coded ) , } for sample , geno in zip ( samples [ k ] , genotypes ) : is_missing = np . isnan ( geno ) hard_coded = None if is_missing : geno = "" hard_coded = "" else : hard_coded = hard_call_mapping [ int ( round ( geno , 0 ) ) ] print ( sample , data . variant . name , data . variant . chrom , data . variant . pos , data . reference , data . coded , geno , hard_coded , sep = "," , file = output ) nb_extracted += 1 if nb_extracted == 0 : logger . warning ( "No markers matched the extract list" ) finally : output . close ( )
Writes the data in CSV format .
12,770
def _get_generator ( parser , extract , keep , check_maf ) : if extract is not None : parser = Extractor ( parser , names = extract ) for data in parser . iter_genotypes ( ) : data . genotypes = data . genotypes [ keep ] if check_maf : data . code_minor ( ) yield data
Generates the data ( with extract markers and keep if required .
12,771
def bitterness ( self , ibu_method , early_og , batch_size ) : "Calculate bitterness based on chosen method" if ibu_method == "tinseth" : bitterness = 1.65 * math . pow ( 0.000125 , early_og - 1.0 ) * ( ( 1 - math . pow ( math . e , - 0.04 * self . time ) ) / 4.15 ) * ( ( self . alpha / 100.0 * self . amount * 1000000 ) / batch_size ) * self . utilization_factor ( ) elif ibu_method == "rager" : utilization = 18.11 + 13.86 * math . tanh ( ( self . time - 31.32 ) / 18.27 ) adjustment = max ( 0 , ( early_og - 1.050 ) / 0.2 ) bitterness = self . amount * 100 * utilization * self . utilization_factor ( ) * self . alpha / ( batch_size * ( 1 + adjustment ) ) else : raise Exception ( "Unknown IBU method %s!" % ibu_method ) return bitterness
Calculate bitterness based on chosen method
12,772
def _check_error ( response ) : if ( not response . ok ) or ( response . status_code != 200 ) : raise Exception ( response . json ( ) [ 'error' ] + ': ' + response . json ( ) [ 'error_description' ] )
Raises an exception if the Spark Cloud returned an error .
12,773
def _login ( self , username , password ) : data = { 'username' : username , 'password' : password , 'grant_type' : 'password' } r = self . spark_api . oauth . token . POST ( auth = ( 'spark' , 'spark' ) , data = data , timeout = self . timeout ) self . _check_error ( r ) return r . json ( ) [ 'access_token' ]
Proceed to login to the Spark Cloud and returns an access token .
12,774
def devices ( self ) : params = { 'access_token' : self . access_token } r = self . spark_api . GET ( params = params , timeout = self . timeout ) self . _check_error ( r ) json_list = r . json ( ) devices_dict = { } if json_list : allKeys = { 'functions' , 'variables' , 'api' , 'requires_deep_update' , 'status' } for device_json in json_list : allKeys . update ( device_json . keys ( ) ) Device = _BaseDevice . make_device_class ( self , allKeys , timeout = self . timeout ) for d in json_list : if d [ "connected" ] : info = self . _get_device_info ( d [ 'id' ] ) d [ 'functions' ] = info . get ( 'functions' ) d [ 'variables' ] = info . get ( 'variables' ) d [ 'api' ] = self . spark_api ( d [ 'id' ] ) d [ 'requires_deep_update' ] = d . get ( 'requires_deep_update' , False ) d [ 'status' ] = info . get ( 'status' ) [ d . setdefault ( key , None ) for key in allKeys ] devices_dict [ d [ 'name' ] ] = Device ( ** d ) return devices_dict
Create a dictionary of devices known to the user account .
12,775
def _get_device_info ( self , device_id ) : params = { 'access_token' : self . access_token } r = self . spark_api ( device_id ) . GET ( params = params , timeout = 30 ) self . _check_error ( r ) return r . json ( )
Queries the Spark Cloud for detailed information about a device .
12,776
def make_device_class ( spark_cloud , entries , timeout = 30 ) : attrs = list ( set ( list ( entries ) + [ 'requires_deep_update' , 'functions' , 'variables' , 'api' , 'status' ] ) ) return type ( 'Device' , ( _BaseDevice , namedtuple ( 'Device' , attrs ) ) , { '__slots__' : ( ) , 'spark_cloud' : spark_cloud , 'timeout' : timeout } )
Returns a dynamic Device class based on what a GET device list from the Spark Cloud returns . spark_cloud parameter should be the caller instance of SparkCloud . entries parameter should be the list of fields the Spark Cloud API is returning .
12,777
def report_metric ( metric_name : str , value : int , fail_silently : bool = True ) : if metricz is None : return configuration = Configuration ( ) try : lizzy_domain = urlparse ( configuration . lizzy_url ) . netloc lizzy_name , _ = lizzy_domain . split ( '.' , 1 ) except Exception : lizzy_name = 'UNKNOWN' tags = { 'version' : VERSION , 'lizzy' : lizzy_name } try : writer = metricz . MetricWriter ( url = configuration . token_url , directory = configuration . credentials_dir , fail_silently = False ) writer . write_metric ( metric_name , value , tags , timeout = 10 ) except Exception : if not fail_silently : raise
Tries to report a metric ignoring all errors
12,778
def get_form_bound_field ( form , field_name ) : field = form . fields [ field_name ] field = field . get_bound_field ( form , field_name ) return field
Intends to get the bound field from the form regarding the field name
12,779
def read ( self , module_name ) : self . parser . read ( "{}/{}.ini" . format ( self . path , module_name . split ( "." ) [ - 1 ] ) )
Read a particular config file
12,780
def get_for_nearest_ancestor ( self , cls , attribute_name ) : for family_cls in family ( cls ) : if self . has ( family_cls . __module__ , family_cls . __name__ , attribute_name ) : return self . get ( family_cls . __module__ , family_cls . __name__ , attribute_name ) ini_filename = cls . __module__ . split ( "." ) [ - 1 ] raise exc . PriorException ( "The prior config at {}/{} does not contain {} in {} or any of its parents" . format ( self . path , ini_filename , attribute_name , cls . __name__ ) )
Find a prior with the attribute analysis_path from the config for this class or one of its ancestors
12,781
def fib ( number : int ) -> int : if number < 2 : return number return fib ( number - 1 ) + fib ( number - 2 )
Simple Fibonacci function .
12,782
def add_data ( self , data ) : subdata = np . atleast_2d ( data ) if subdata . shape [ 1 ] != self . grid . nr_of_nodes : if subdata . shape [ 0 ] == self . grid . nr_of_nodes : subdata = subdata . T else : raise Exception ( 'Number of values does not match the number of ' + 'nodes in the grid {0} grid nodes vs {1} data' . format ( self . grid . nr_of_nodes , subdata . shape , ) ) return_ids = [ ] for dataset in subdata : cid = self . _get_next_index ( ) self . nodevals [ cid ] = dataset . copy ( ) return_ids . append ( cid ) if len ( return_ids ) == 1 : return return_ids [ 0 ] else : return return_ids
Add data to the node value sets
12,783
def instance ( cls , * args , ** kwgs ) : if not hasattr ( cls , "_instance" ) : cls . _instance = cls ( * args , ** kwgs ) return cls . _instance
Will be the only instance
12,784
def configure_logger ( logger , filename , folder , log_level ) : fmt = logging . Formatter ( '%(asctime)s %(levelname)s: %(message)s' ) if folder is not None : log_file = os . path . join ( folder , filename ) hdl = logging . FileHandler ( log_file ) hdl . setFormatter ( fmt ) hdl . setLevel ( log_level ) logger . addHandler ( hdl ) shdl = logging . StreamHandler ( ) shdl . setLevel ( log_level ) shdl . setFormatter ( fmt ) logger . addHandler ( shdl ) logger . setLevel ( log_level )
Configure logging behvior for the simulations .
12,785
def _nargs ( f ) -> Optional [ int ] : if isinstance ( f , Function ) : return f . nargs spec = inspect . getfullargspec ( f ) if spec . varargs is not None : return None return len ( spec . args )
number of positional arguments values . Dynamically computed from the arguments attribute .
12,786
def _ndefs ( f ) : if isinstance ( f , Function ) : return f . ndefs spec = inspect . getfullargspec ( f ) if spec . defaults is None : return 0 return len ( spec . defaults )
number of any default values for positional or keyword parameters
12,787
def singledispatch ( * , nargs = None , nouts = None , ndefs = None ) : def wrapper ( f ) : return wraps ( f ) ( SingleDispatchFunction ( f , nargs = nargs , nouts = nouts , ndefs = ndefs ) ) return wrapper
singledispatch decorate of both functools . singledispatch and func
12,788
def multidispatch ( * , nargs = None , nouts = None ) : def wrapper ( f ) : return wraps ( f ) ( MultiDispatchFunction ( f , nargs = nargs , nouts = nouts ) ) return wrapper
multidispatch decorate of both functools . singledispatch and func
12,789
def flip ( f : Callable ) -> Function : nargs_ , nouts_ , ndefs_ = nargs ( f ) , nouts ( f ) , ndefs ( f ) return WrappedFunction ( lambda * args , ** kwargs : f ( args [ 1 ] , args [ 0 ] , * args [ 2 : ] , ** kwargs ) , nargs = nargs_ , nouts = nouts_ , ndefs = ndefs_ )
flip order of first two arguments to function .
12,790
def tagfunc ( nargs = None , ndefs = None , nouts = None ) : def wrapper ( f ) : return wraps ( f ) ( FunctionWithTag ( f , nargs = nargs , nouts = nouts , ndefs = ndefs ) ) return wrapper
decorate of tagged function
12,791
def fmap ( self , f : 'WrappedFunction' ) -> 'WrappedFunction' : if not isinstance ( f , WrappedFunction ) : f = WrappedFunction ( f ) return WrappedFunction ( lambda * args , ** kwargs : self ( f ( * args , ** kwargs ) ) , nargs = f . nargs , nouts = self . nouts )
function map for Wrapped Function . A forced transfermation to WrappedFunction would be applied . async def
12,792
def parse_atoms ( self ) : atom_site_header_tag = self . main_tag . getElementsByTagName ( "PDBx:atom_siteCategory" ) assert ( len ( atom_site_header_tag ) == 1 ) atom_site_header_tag = atom_site_header_tag [ 0 ] atom_site_tags = atom_site_header_tag . getElementsByTagName ( "PDBx:atom_site" ) residue_map = { } residues_read = { } int_type = types . IntType for t in atom_site_tags : r , seqres , ResidueAA , Residue3AA = PDBML_slow . parse_atom_site ( t , self . modified_residues ) if r : if not ( self . pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE' ) : full_residue_id = str ( r ) if residues_read . get ( full_residue_id ) : assert ( residues_read [ full_residue_id ] == ( r . ResidueAA , seqres ) ) else : residues_read [ full_residue_id ] = ( r . ResidueAA , seqres ) residue_map [ r . Chain ] = residue_map . get ( r . Chain , { } ) assert ( type ( seqres ) == int_type ) residue_map [ r . Chain ] [ str ( r ) ] = seqres atom_to_seqres_sequence_maps = { } for chain_id , atom_seqres_mapping in residue_map . iteritems ( ) : atom_to_seqres_sequence_maps [ chain_id ] = SequenceMap . from_dict ( atom_seqres_mapping ) self . atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
All ATOM lines are parsed even though only one per residue needs to be parsed . The reason for parsing all the lines is just to sanity - checks that the ATOMs within one residue are consistent with each other .
12,793
def parse_atom_site ( self , name , attributes ) : if name == "PDBx:pdbx_PDB_ins_code" : assert ( not ( self . current_atom_site . ATOMResidueiCodeIsNull ) ) if attributes . get ( 'xsi:nil' ) == 'true' : self . current_atom_site . ATOMResidueiCodeIsNull = True if name == "PDBx:auth_asym_id" : assert ( not ( self . current_atom_site . PDBChainIDIsNull ) ) if attributes . get ( 'xsi:nil' ) == 'true' : self . current_atom_site . PDBChainIDIsNull = True
Parse the atom tag attributes . Most atom tags do not have attributes .
12,794
def parse_atom_tag_data ( self , name , tag_content ) : current_atom_site = self . current_atom_site if current_atom_site . IsHETATM : return elif name == 'PDBx:atom_site' : self . _BLOCK = None current_atom_site = self . current_atom_site current_atom_site . validate ( ) if current_atom_site . IsATOM : r , seqres , ResidueAA , Residue3AA = current_atom_site . convert_to_residue ( self . modified_residues ) if r : if not ( self . pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE' ) : full_residue_id = str ( r ) if self . _residues_read . get ( full_residue_id ) : assert ( self . _residues_read [ full_residue_id ] == ( r . ResidueAA , seqres ) ) else : self . _residues_read [ full_residue_id ] = ( r . ResidueAA , seqres ) self . _residue_map [ r . Chain ] = self . _residue_map . get ( r . Chain , { } ) assert ( type ( seqres ) == int_type ) self . _residue_map [ r . Chain ] [ str ( r ) ] = seqres elif name == 'PDBx:group_PDB' : if tag_content == 'ATOM' : current_atom_site . IsATOM = True elif tag_content == 'HETATM' : current_atom_site . IsHETATM = True else : raise Exception ( "PDBx:group_PDB was expected to be 'ATOM' or 'HETATM'. '%s' read instead." % tag_content ) elif name == 'PDBx:auth_asym_id' : assert ( not ( current_atom_site . PDBChainID ) ) current_atom_site . PDBChainID = tag_content if not tag_content : assert ( current_atom_site . PDBChainIDIsNull ) if self . pdb_id . upper ( ) == '2MBP' : current_atom_site . PDBChainID = 'A' else : current_atom_site . PDBChainID = ' ' elif name == 'PDBx:auth_seq_id' : assert ( not ( current_atom_site . ATOMResidueID ) ) current_atom_site . ATOMResidueID = int ( tag_content ) elif name == "PDBx:pdbx_PDB_ins_code" : if current_atom_site . ATOMResidueiCodeIsNull : assert ( len ( tag_content ) == 0 ) else : assert ( current_atom_site . ATOMResidueiCode == ' ' ) current_atom_site . ATOMResidueiCode = tag_content elif name == "PDBx:auth_comp_id" : assert ( not ( current_atom_site . ATOMResidueAA ) ) current_atom_site . ATOMResidueAA = tag_content elif name == "PDBx:label_seq_id" : assert ( not ( current_atom_site . SEQRESIndex ) ) current_atom_site . SEQRESIndex = int ( tag_content ) elif name == "PDBx:label_comp_id" : assert ( not ( current_atom_site . ATOMSeqresResidueAA ) ) current_atom_site . ATOMSeqresResidueAA = tag_content
Parse the atom tag data .
12,795
def create_atom_data ( self ) : current_atom_site = self . current_atom_site if current_atom_site . IsHETATM : return None , None , None , None elif current_atom_site . IsATOM : return current_atom_site . convert_to_residue ( self . modified_residues ) else : raise Exception ( 'current_atom_site' )
The atom site work is split into two parts . This function type - converts the tags .
12,796
def import_source ( module , path , pass_errors = False ) : try : m = imp . load_source ( module , path ) return m except Exception as e : return None
Function imports a module given full path
12,797
def import_module ( module , pass_errors = False ) : frm = module . split ( '.' ) try : m = __import__ ( module , fromlist = [ frm [ 1 ] ] ) return m except ImportError as e : if pass_errors : return None else : print ( traceback . format_exc ( ) ) return None except Exception as e : print ( traceback . format_exc ( ) ) return None
Function imports a module given module name
12,798
def copytree ( src , dst , symlinks = False , ignore = None ) : if not os . path . exists ( dst ) : os . mkdir ( dst ) try : for item in os . listdir ( src ) : s = os . path . join ( src , item ) d = os . path . join ( dst , item ) if os . path . isdir ( s ) : shutil . copytree ( s , d , symlinks , ignore ) else : shutil . copy2 ( s , d ) except Exception as e : raise FolderExistsError ( "Folder already exists in %s" % dst )
Function recursively copies from directory to directory .
12,799
def empty ( key , dict ) : if key in dict . keys ( ) : if dict [ key ] : return False return True
Function determines if the dict key exists or it is empty