idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
49,400 | def add ( self , layer , items ) : for k in items . iterkeys ( ) : if k in self . model [ layer ] : raise Exception ( 'item %s is already in layer %s' % ( k , layer ) ) self . model [ layer ] . update ( items ) for k , v in items . iteritems ( ) : getattr ( self , layer ) . add ( k , v [ 'module' ] , v . get ( 'package' ) ) | Add items in model . |
49,401 | def delete ( self , layer , items ) : items = _listify ( items ) layer_obj = self . edit ( layer , dict . fromkeys ( items ) , delete = True ) for k in items : if k in layer_obj . layer : layer_obj . delete ( k ) else : raise AttributeError ( 'item %s missing from layer %s' % ( k , layer ) ) | Delete items in model . |
49,402 | def save ( self , modelfile , layer = None ) : if layer : obj = { layer : self . model [ layer ] } else : obj = self . model with open ( modelfile , 'w' ) as fp : json . dump ( obj , fp , indent = 2 , sort_keys = True ) | Save a model file . |
49,403 | def command ( self , cmd , progress_hook = None , * args , ** kwargs ) : cmds = cmd . split ( None , 1 ) sim_names = cmds [ 1 : ] if not sim_names : sim_names = self . cmd_layer . reg . iterkeys ( ) for sim_name in sim_names : sim_cmd = getattr ( self . cmd_layer . reg [ sim_name ] , cmd ) sim_cmd ( self , progress_hook = progress_hook , * args , ** kwargs ) | Execute a model command . |
49,404 | def login ( self , password ) : if self . logged_in : raise RuntimeError ( "User already logged in!" ) params = { "name" : self . nick , "password" : password } resp = self . conn . make_api_call ( "login" , params ) if "error" in resp : raise RuntimeError ( f"Login failed: {resp['error'].get('message') or resp['error']}" ) self . session = resp [ "session" ] self . conn . make_call ( "useSession" , self . session ) self . conn . cookies . update ( { "session" : self . session } ) self . logged_in = True return True | Attempts to log in as the current user with given password |
49,405 | def login_transplant ( self , other ) : if not other . logged_in : raise ValueError ( "Other room is not logged in" ) cookie = other . session if not cookie : raise ValueError ( "Other room has no cookie" ) self . conn . cookies . update ( { "session" : cookie } ) self . session = cookie self . logged_in = True return True | Attempts to carry over the login state from another room |
49,406 | def logout ( self ) : if not self . logged_in : raise RuntimeError ( "User is not logged in" ) if self . conn . connected : params = { "room" : self . conn . room . room_id } resp = self . conn . make_api_call ( "logout" , params ) if not resp . get ( "success" , False ) : raise RuntimeError ( f"Logout unsuccessful: " f"{resp['error'].get('message') or resp['error']}" ) self . conn . make_call ( "logout" , params ) self . conn . cookies . pop ( "session" ) self . logged_in = False | Logs your user out |
49,407 | def register ( self , password ) : if len ( password ) < 8 : raise ValueError ( "Password must be at least 8 characters." ) params = { "name" : self . nick , "password" : password } resp = self . conn . make_api_call ( "register" , params ) if "error" in resp : raise RuntimeError ( f"{resp['error'].get('message') or resp['error']}" ) self . conn . make_call ( "useSession" , resp [ "session" ] ) self . conn . cookies . update ( { "session" : resp [ "session" ] } ) self . logged_in = True | Registers the current user with the given password . |
49,408 | def __verify_username ( self , username ) : if len ( username ) > self . __max_length or len ( username ) < 3 : raise ValueError ( f"Username must be between 3 and {self.__max_length} characters." ) if any ( c not in string . ascii_letters + string . digits for c in username ) : raise ValueError ( "Usernames can only contain alphanumeric characters." ) | Raises an exception if the given username is not valid . |
49,409 | def update ( self , value , timestamp = None ) : if timestamp is None : timestamp = self . current_time_in_fractional_seconds ( ) self . rescale_if_needed ( ) priority = self . weight ( timestamp - self . start_time ) / random . random ( ) self . values [ priority ] = value if len ( self . values ) > self . size : self . values . remove_min ( ) | Add a value to the reservoir . |
49,410 | def get_objects ( classpath , calling_classpath = "" ) : module_name , class_name = classpath . rsplit ( '.' , 1 ) module = importlib . import_module ( module_name , calling_classpath ) try : klass = getattr ( module , class_name ) except AttributeError : raise AttributeError ( "module {} has no attribute {} parsing {}" . format ( module . __name__ , class_name , classpath ) ) return module , klass | given a classpath like foo . bar . Baz return module foo . bar and class Baz objects |
49,411 | def make_dict ( fields , fields_kwargs ) : ret = { } if fields : ret . update ( fields ) if fields_kwargs : ret . update ( fields_kwargs ) return ret | lot s of methods take a dict or kwargs this combines those |
49,412 | def write_line ( self , line , count = 1 ) : self . write ( line ) self . write_newlines ( count ) | writes the line and count newlines after the line |
49,413 | def add ( self , key , val , priority = None ) : if key in self . item_finder : self . remove ( key ) else : if self . full ( ) : raise OverflowError ( "Queue is full" ) if priority is None : priority = next ( self . counter ) item = [ priority , key , val ] self . item_finder [ key ] = item heapq . heappush ( self . pq , item ) | add a value to the queue with priority using the key to know uniqueness |
49,414 | def remove ( self , key ) : item = self . item_finder . pop ( key ) item [ - 1 ] = None self . removed_count += 1 | remove the value found at key from the queue |
49,415 | def full ( self ) : if not self . size : return False return len ( self . pq ) == ( self . size + self . removed_count ) | Return True if the queue is full |
49,416 | def check_photometry_categorize ( x , y , levels , tags = None ) : x = numpy . asarray ( x ) y = numpy . asarray ( y ) ys = y . copy ( ) ys . sort ( ) m = ys [ len ( ys ) // 2 : ] . mean ( ) y /= m m = 1.0 s = ys [ len ( ys ) // 2 : ] . std ( ) result = [ ] if tags is None : tags = list ( six . moves . range ( len ( levels ) + 1 ) ) for l , t in zip ( levels , tags ) : indc = y < l if indc . any ( ) : x1 = x [ indc ] y1 = y [ indc ] result . append ( ( x1 , y1 , t ) ) x = x [ ~ indc ] y = y [ ~ indc ] else : result . append ( ( x , y , tags [ - 1 ] ) ) return result , ( m , s ) | Put every point in its category . |
49,417 | def client_authentication ( self , request , auth = None , ** kwargs ) : return verify_client ( self . endpoint_context , request , auth ) | Do client authentication |
49,418 | def construct ( self , response_args , request , ** kwargs ) : response_args = self . do_pre_construct ( response_args , request , ** kwargs ) response = self . response_cls ( ** response_args ) return self . do_post_construct ( response , request , ** kwargs ) | Construct the response |
49,419 | def inputs ( form_args ) : element = [ ] for name , value in form_args . items ( ) : element . append ( '<input type="hidden" name="{}" value="{}"/>' . format ( name , value ) ) return "\n" . join ( element ) | Creates list of input elements |
49,420 | def verify_uri ( endpoint_context , request , uri_type , client_id = None ) : try : _cid = request [ "client_id" ] except KeyError : _cid = client_id if not _cid : logger . error ( 'No client id found' ) raise UnknownClient ( 'No client_id provided' ) _redirect_uri = unquote ( request [ uri_type ] ) part = urlparse ( _redirect_uri ) if part . fragment : raise URIError ( "Contains fragment" ) ( _base , _query ) = splitquery ( _redirect_uri ) if _query : _query = parse_qs ( _query ) match = False try : values = endpoint_context . cdb [ _cid ] [ '{}s' . format ( uri_type ) ] except KeyError : raise ValueError ( 'No registered {}' . format ( uri_type ) ) else : for regbase , rquery in values : if _base == regbase : if rquery : if not _query : raise ValueError ( 'Missing query part' ) for key , vals in rquery . items ( ) : if key not in _query : raise ValueError ( '"{}" not in query part' . format ( key ) ) for val in vals : if val not in _query [ key ] : raise ValueError ( '{}={} value not in query part' . format ( key , val ) ) if _query : if not rquery : raise ValueError ( 'No registered query part' ) for key , vals in _query . items ( ) : if key not in rquery : raise ValueError ( '"{}" extra in query part' . format ( key ) ) for val in vals : if val not in rquery [ key ] : raise ValueError ( 'Extra {}={} value in query part' . format ( key , val ) ) match = True break if not match : raise RedirectURIError ( "Doesn't match any registered uris" ) | A redirect URI MUST NOT contain a fragment MAY contain query component |
49,421 | def get_uri ( endpoint_context , request , uri_type ) : if uri_type in request : verify_uri ( endpoint_context , request , uri_type ) uri = request [ uri_type ] else : try : _specs = endpoint_context . cdb [ str ( request [ "client_id" ] ) ] [ "{}s" . format ( uri_type ) ] except KeyError : raise ParameterError ( "Missing {} and none registered" . format ( uri_type ) ) else : if len ( _specs ) > 1 : raise ParameterError ( "Missing {} and more than one registered" . format ( uri_type ) ) else : uri = join_query ( * _specs [ 0 ] ) return uri | verify that the redirect URI is reasonable |
49,422 | def post_authentication ( self , user , request , sid , ** kwargs ) : response_info = { } try : permission = self . endpoint_context . authz ( user , client_id = request [ 'client_id' ] ) except ToOld as err : return self . error_response ( response_info , 'access_denied' , 'Authentication to old {}' . format ( err . args ) ) except Exception as err : return self . error_response ( response_info , 'access_denied' , '{}' . format ( err . args ) ) else : try : self . endpoint_context . sdb . update ( sid , permission = permission ) except Exception as err : return self . error_response ( response_info , 'server_error' , '{}' . format ( err . args ) ) logger . debug ( "response type: %s" % request [ "response_type" ] ) if self . endpoint_context . sdb . is_session_revoked ( sid ) : return self . error_response ( response_info , "access_denied" , "Session is revoked" ) response_info = create_authn_response ( self , request , sid ) try : redirect_uri = get_uri ( self . endpoint_context , request , 'redirect_uri' ) except ( RedirectURIError , ParameterError ) as err : return self . error_response ( response_info , 'invalid_request' , '{}' . format ( err . args ) ) else : response_info [ 'return_uri' ] = redirect_uri _cookie = new_cookie ( self . endpoint_context , sub = user , sid = sid , state = request [ 'state' ] , client_id = request [ 'client_id' ] , cookie_name = self . endpoint_context . cookie_name [ 'session' ] ) if "response_mode" in request : try : response_info = self . response_mode ( request , ** response_info ) except InvalidRequest as err : return self . error_response ( response_info , 'invalid_request' , '{}' . format ( err . args ) ) response_info [ 'cookie' ] = [ _cookie ] return response_info | Things that are done after a successful authentication . |
49,423 | def authz_part2 ( self , user , authn_event , request , ** kwargs ) : sid = setup_session ( self . endpoint_context , request , user , authn_event = authn_event ) try : resp_info = self . post_authentication ( user , request , sid , ** kwargs ) except Exception as err : return self . error_response ( { } , 'server_error' , err ) if "check_session_iframe" in self . endpoint_context . provider_info : ec = self . endpoint_context salt = rndstr ( ) if ec . sdb . is_session_revoked ( sid ) : pass else : authn_event = ec . sdb . get_authentication_event ( sid ) _state = json . dumps ( { 'authn_time' : authn_event [ 'authn_time' ] } ) session_cookie = ec . cookie_dealer . create_cookie ( json . dumps ( _state ) , typ = "session" , cookie_name = ec . cookie_name [ 'session_management' ] ) opbs = session_cookie [ ec . cookie_name [ 'session_management' ] ] _session_state = compute_session_state ( opbs . value , salt , request [ "client_id" ] , resp_info [ 'return_uri' ] ) if 'cookie' in resp_info : if isinstance ( resp_info [ 'cookie' ] , list ) : resp_info [ 'cookie' ] . append ( session_cookie ) else : append_cookie ( resp_info [ 'cookie' ] , session_cookie ) else : resp_info [ 'cookie' ] = session_cookie resp_info [ 'response_args' ] [ 'session_state' ] = _session_state resp_info [ 'response_args' ] [ 'iss' ] = self . endpoint_context . issuer resp_info [ 'response_args' ] [ 'client_id' ] = request [ 'client_id' ] return resp_info | After the authentication this is where you should end up |
49,424 | def process_request ( self , request_info = None , ** kwargs ) : if isinstance ( request_info , AuthorizationErrorResponse ) : return request_info _cid = request_info [ "client_id" ] cinfo = self . endpoint_context . cdb [ _cid ] try : cookie = kwargs [ 'cookie' ] except KeyError : cookie = '' else : del kwargs [ 'cookie' ] if proposed_user ( request_info ) : kwargs [ 'req_user' ] = proposed_user ( request_info ) else : try : _login_hint = request_info [ 'login_hint' ] except KeyError : pass else : if self . endpoint_context . login_hint_lookup : kwargs [ 'req_user' ] = self . endpoint_context . login_hint_lookup [ _login_hint ] info = self . setup_auth ( request_info , request_info [ "redirect_uri" ] , cinfo , cookie , ** kwargs ) if 'error' in info : return info try : _function = info [ 'function' ] except KeyError : logger . debug ( "- authenticated -" ) logger . debug ( "AREQ keys: %s" % request_info . keys ( ) ) res = self . authz_part2 ( info [ 'user' ] , info [ 'authn_event' ] , request_info , cookie = cookie ) return res else : try : return { 'http_response' : _function ( ** info [ 'args' ] ) , 'return_uri' : request_info [ "redirect_uri" ] } except Exception as err : logger . exception ( err ) return { 'http_response' : 'Internal error: {}' . format ( err ) } | The AuthorizationRequest endpoint |
49,425 | def setup_session ( endpoint_context , areq , uid , client_id = '' , acr = '' , salt = 'salt' , authn_event = None ) : if authn_event is None and acr : authn_event = AuthnEvent ( uid = uid , salt = salt , authn_info = acr , authn_time = time . time ( ) ) if not client_id : client_id = areq [ 'client_id' ] sid = endpoint_context . sdb . create_authz_session ( authn_event , areq , client_id = client_id , uid = uid ) endpoint_context . sdb . do_sub ( sid , uid , '' ) return sid | Setting up a user session |
49,426 | def update ( self , sid , ** kwargs ) : item = self [ sid ] for attribute , value in kwargs . items ( ) : item [ attribute ] = value self [ sid ] = item | Add attribute value assertion to a special session |
49,427 | def update_by_token ( self , token , ** kwargs ) : _sid = self . handler . sid ( token ) return self . update ( _sid , ** kwargs ) | Updated the session info . Any type of known token can be used |
49,428 | def replace_token ( self , sid , sinfo , token_type ) : try : refresh_token = self . handler [ token_type ] ( sid , sinfo = sinfo ) except KeyError : pass else : try : self . handler [ token_type ] . black_list ( sinfo [ token_type ] ) except KeyError : pass sinfo [ token_type ] = refresh_token return sinfo | Replace an old refresh_token with a new one |
49,429 | def refresh_token ( self , token , new_refresh = False ) : try : _tinfo = self . handler [ 'refresh_token' ] . info ( token ) except KeyError : return False if is_expired ( int ( _tinfo [ 'exp' ] ) ) or _tinfo [ 'black_listed' ] : raise ExpiredToken ( ) _sid = _tinfo [ 'sid' ] session_info = self [ _sid ] session_info = self . replace_token ( _sid , session_info , 'access_token' ) session_info [ "token_type" ] = self . handler [ 'access_token' ] . token_type if new_refresh : session_info = self . replace_token ( _sid , session_info , 'refresh_token' ) self [ _sid ] = session_info return session_info | Issue a new access token using a valid refresh token |
49,430 | def is_token_valid ( self , token ) : try : _tinfo = self . handler . info ( token ) except KeyError : return False if is_expired ( int ( _tinfo [ 'exp' ] ) ) or _tinfo [ 'black_listed' ] : return False session_info = self [ _tinfo [ 'sid' ] ] if session_info [ "oauth_state" ] == "authz" : if _tinfo [ 'handler' ] != self . handler [ 'code' ] : return False elif session_info [ "oauth_state" ] == "token" : if _tinfo [ 'handler' ] != self . handler [ 'access_token' ] : return False return True | Checks validity of a given token |
49,431 | def revoke_token ( self , token , token_type = '' ) : if token_type : self . handler [ token_type ] . black_list ( token ) else : self . handler . black_list ( token ) | Revokes access token |
49,432 | def revoke_session ( self , sid = '' , token = '' ) : if not sid : if token : sid = self . handler . sid ( token ) else : raise ValueError ( 'Need one of "sid" or "token"' ) for typ in [ 'access_token' , 'refresh_token' , 'code' ] : try : self . revoke_token ( self [ sid ] [ typ ] , typ ) except KeyError : pass self . update ( sid , revoked = True ) | Mark session as revoked but also explicitly revoke all issued tokens |
49,433 | def pick_auth ( endpoint_context , areq , all = False ) : acrs = [ ] try : if len ( endpoint_context . authn_broker ) == 1 : return endpoint_context . authn_broker . default ( ) if "acr_values" in areq : if not isinstance ( areq [ "acr_values" ] , list ) : areq [ "acr_values" ] = [ areq [ "acr_values" ] ] acrs = areq [ "acr_values" ] else : try : acrs = areq [ "claims" ] [ "id_token" ] [ "acr" ] [ "values" ] except KeyError : try : _ith = areq [ verified_claim_name ( "id_token_hint" ) ] except KeyError : try : _hint = areq [ 'login_hint' ] except KeyError : pass else : if endpoint_context . login_hint2acrs : acrs = endpoint_context . login_hint2acrs ( _hint ) else : try : acrs = [ _ith [ 'acr' ] ] except KeyError : pass if not acrs : return endpoint_context . authn_broker . default ( ) for acr in acrs : res = endpoint_context . authn_broker . pick ( acr ) logger . debug ( "Picked AuthN broker for ACR %s: %s" % ( str ( acr ) , str ( res ) ) ) if res : if all : return res else : return res [ 0 ] except KeyError as exc : logger . debug ( "An error occurred while picking the authN broker: %s" % str ( exc ) ) return None | Pick authentication method |
49,434 | def get_method ( self , cls_name ) : for id , spec in self . db . items ( ) : if spec [ "method" ] . __class__ . __name__ == cls_name : yield spec [ "method" ] | Generator that returns all registered authenticators based on a specific authentication class . |
49,435 | def pick ( self , acr = None ) : if acr is None : return self . db . values ( ) else : return self . _pick_by_class_ref ( acr ) | Given the authentication context find zero or more authn methods that could be used . |
49,436 | def update_config ( self ) : filter = self . config [ 'FILTER_MASK' ] rows = len ( filter ) cols = len ( filter [ 0 ] ) filter_f = __builtin__ . open ( self . config [ 'FILTER_NAME' ] , 'w' ) filter_f . write ( "CONV NORM\n" ) filter_f . write ( "# %dx%d Generated from sextractor.py module.\n" % ( rows , cols ) ) for row in filter : filter_f . write ( " " . join ( map ( repr , row ) ) ) filter_f . write ( "\n" ) filter_f . close ( ) parameters_f = __builtin__ . open ( self . config [ 'PARAMETERS_NAME' ] , 'w' ) for parameter in self . config [ 'PARAMETERS_LIST' ] : print ( parameter , file = parameters_f ) parameters_f . close ( ) nnw_f = __builtin__ . open ( self . config [ 'STARNNW_NAME' ] , 'w' ) nnw_f . write ( nnw_config ) nnw_f . close ( ) main_f = __builtin__ . open ( self . config [ 'CONFIG_FILE' ] , 'w' ) for key in self . config . keys ( ) : if ( key in SExtractor . _SE_config_special_keys ) : continue if ( key == "PHOT_AUTOPARAMS" ) : value = " " . join ( map ( str , self . config [ key ] ) ) else : value = str ( self . config [ key ] ) print ( ( "%-16s %-16s # %s" % ( key , value , SExtractor . _SE_config [ key ] [ 'comment' ] ) ) , file = main_f ) main_f . close ( ) | Update the configuration files according to the current in - memory SExtractor configuration . |
49,437 | def run ( self , file , updateconfig = True , clean = False , path = None ) : if updateconfig : self . update_config ( ) self . program , self . version = self . setup ( path ) commandline = ( self . program + " -c " + self . config [ 'CONFIG_FILE' ] + " " + file ) rcode = os . system ( commandline ) if ( rcode ) : raise SExtractorException ( "SExtractor command [%s] failed." % commandline ) if clean : self . clean ( ) | Run SExtractor . |
49,438 | def offsets_from_wcs ( frames , pixref ) : result = numpy . zeros ( ( len ( frames ) , pixref . shape [ 1 ] ) ) with frames [ 0 ] . open ( ) as hdulist : wcsh = wcs . WCS ( hdulist [ 0 ] . header ) skyref = wcsh . wcs_pix2world ( pixref , 1 ) for idx , frame in enumerate ( frames [ 1 : ] ) : with frame . open ( ) as hdulist : wcsh = wcs . WCS ( hdulist [ 0 ] . header ) pixval = wcsh . wcs_world2pix ( skyref , 1 ) result [ idx + 1 ] = - ( pixval [ 0 ] - pixref [ 0 ] ) return result | Compute offsets between frames using WCS information . |
49,439 | def _check_deployed_nodes ( nodes ) : deployed = [ ] undeployed = [ ] cmd = "! (mount | grep -E '^/dev/[[:alpha:]]+2 on / ')" deployed_check = get_execo_remote ( cmd , nodes , DEFAULT_CONN_PARAMS ) for p in deployed_check . processes : p . nolog_exit_code = True p . nolog_timeout = True p . nolog_error = True p . timeout = 10 deployed_check . run ( ) for p in deployed_check . processes : if p . ok : deployed . append ( p . host . address ) else : undeployed . append ( p . host . address ) return deployed , undeployed | This is borrowed from execo . |
49,440 | def get_networks ( self ) : networks = self . c_resources [ "networks" ] result = [ ] for net in networks : _c_network = net . get ( "_c_network" ) if _c_network is None : continue roles = utils . get_roles_as_list ( net ) result . append ( ( roles , _c_network ) ) return result | Get the networks assoiated with the resource description . |
49,441 | def get_roles ( self ) : machines = self . c_resources [ "machines" ] result = { } for desc in machines : roles = utils . get_roles_as_list ( desc ) hosts = self . _denormalize ( desc ) for role in roles : result . setdefault ( role , [ ] ) result [ role ] . extend ( hosts ) return result | Get the roles associated with the hosts . |
49,442 | async def start_client ( self , sock : anyio . abc . SocketStream , addr , path : str , headers : Optional [ List ] = None , subprotocols : Optional [ List [ str ] ] = None ) : self . _sock = sock self . _connection = WSConnection ( ConnectionType . CLIENT ) if headers is None : headers = [ ] if subprotocols is None : subprotocols = [ ] data = self . _connection . send ( Request ( host = addr [ 0 ] , target = path , extra_headers = headers , subprotocols = subprotocols ) ) await self . _sock . send_all ( data ) assert self . _scope is None self . _scope = True try : event = await self . _next_event ( ) if not isinstance ( event , AcceptConnection ) : raise ConnectionError ( "Failed to establish a connection" , event ) return event finally : self . _scope = None | Start a client WS connection on this socket . |
49,443 | async def start_server ( self , sock : anyio . abc . SocketStream , filter = None ) : assert self . _scope is None self . _scope = True self . _sock = sock self . _connection = WSConnection ( ConnectionType . SERVER ) try : event = await self . _next_event ( ) if not isinstance ( event , Request ) : raise ConnectionError ( "Failed to establish a connection" , event ) msg = None if filter is not None : msg = await filter ( event ) if not msg : msg = RejectConnection ( ) elif msg is True : msg = None elif isinstance ( msg , str ) : msg = AcceptConnection ( subprotocol = msg ) if not msg : msg = AcceptConnection ( subprotocol = event . subprotocols [ 0 ] ) data = self . _connection . send ( msg ) await self . _sock . send_all ( data ) if not isinstance ( msg , AcceptConnection ) : raise ConnectionError ( "Not accepted" , msg ) finally : self . _scope = None | Start a server WS connection on this socket . |
49,444 | async def _next_event ( self ) : while True : for event in self . _connection . events ( ) : if isinstance ( event , Message ) : if event . message_finished : return self . _wrap_data ( self . _gather_buffers ( event ) ) self . _buffer ( event ) break else : return event data = await self . _sock . receive_some ( 4096 ) if not data : return CloseConnection ( code = 500 , reason = "Socket closed" ) self . _connection . receive_data ( data ) | Gets the next event . |
49,445 | async def close ( self , code : int = 1006 , reason : str = "Connection closed" ) : if self . _closed : return self . _closed = True if self . _scope is not None : await self . _scope . cancel ( ) data = self . _connection . send ( CloseConnection ( code = code , reason = reason ) ) await self . _sock . send_all ( data ) await self . _sock . close ( ) | Closes the websocket . |
49,446 | async def send ( self , data : Union [ bytes , str ] , final : bool = True ) : MsgType = TextMessage if isinstance ( data , str ) else BytesMessage data = MsgType ( data = data , message_finished = final ) data = self . _connection . send ( event = data ) await self . _sock . send_all ( data ) | Sends some data down the connection . |
49,447 | def _buffer ( self , event : Message ) : if isinstance ( event , BytesMessage ) : self . _byte_buffer . write ( event . data ) elif isinstance ( event , TextMessage ) : self . _string_buffer . write ( event . data ) | Buffers an event if applicable . |
49,448 | def _gather_buffers ( self , event : Message ) : if isinstance ( event , BytesMessage ) : buf = self . _byte_buffer else : buf = self . _string_buffer buf . write ( event . data ) buf . seek ( 0 ) data = buf . read ( ) buf . seek ( 0 ) buf . truncate ( ) return data | Gathers all the data from a buffer . |
49,449 | def _wrap_data ( data : Union [ str , bytes ] ) : MsgType = TextMessage if isinstance ( data , str ) else BytesMessage return MsgType ( data = data , frame_finished = True , message_finished = True ) | Wraps data into the right event . |
49,450 | def get_field_SQL ( self , field_name , field ) : field_type = "" is_pk = field . options . get ( 'pk' , False ) if issubclass ( field . type , bool ) : field_type = 'BOOLEAN' elif issubclass ( field . type , long ) : if is_pk : field_type = 'INTEGER PRIMARY KEY' else : field_type = 'BIGINT' elif issubclass ( field . type , int ) : field_type = 'INTEGER' if is_pk : field_type += ' PRIMARY KEY' elif issubclass ( field . type , basestring ) : fo = field . options if field . is_ref ( ) : ref_s = field . schema fo = ref_s . pk . options if 'size' in fo : field_type = 'CHARACTER({})' . format ( fo [ 'size' ] ) elif 'max_size' in fo : field_type = 'VARCHAR({})' . format ( fo [ 'max_size' ] ) else : field_type = 'TEXT' if fo . get ( 'ignore_case' , False ) : field_type += ' COLLATE NOCASE' if is_pk : field_type += ' PRIMARY KEY' elif issubclass ( field . type , datetime . datetime ) : field_type = 'TIMESTAMP' elif issubclass ( field . type , datetime . date ) : field_type = 'DATE' elif issubclass ( field . type , float ) : field_type = 'REAL' size = field . options . get ( 'size' , field . options . get ( 'max_size' , 0 ) ) if size > 6 : field_type = 'DOUBLE PRECISION' elif issubclass ( field . type , decimal . Decimal ) : field_type = 'NUMERIC' elif issubclass ( field . type , bytearray ) : field_type = 'BLOB' else : raise ValueError ( 'unknown python type: {}' . format ( field . type . __name__ ) ) if field . required : field_type += ' NOT NULL' else : field_type += ' NULL' if not is_pk : if field . is_ref ( ) : ref_s = field . schema if field . required : field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE' . format ( ref_s , ref_s . pk . name ) else : field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL' . format ( ref_s , ref_s . pk . name ) return '{} {}' . format ( self . _normalize_name ( field_name ) , field_type ) | returns the SQL for a given field with full type information |
49,451 | def _get_fields ( self , table_name , ** kwargs ) : ret = { } query_str = 'PRAGMA table_info({})' . format ( self . _normalize_table_name ( table_name ) ) fields = self . _query ( query_str , ** kwargs ) query_str = 'PRAGMA foreign_key_list({})' . format ( self . _normalize_table_name ( table_name ) ) fks = { f [ "from" ] : f for f in self . _query ( query_str , ** kwargs ) } pg_types = { "INTEGER" : int , "BIGINT" : long , "DOUBLE PRECISION" : float , "FLOAT" : float , "REAL" : float , "NUMERIC" : decimal . Decimal , "BOOLEAN" : bool , "DATE" : datetime . date , "TIMESTAMP" : datetime . datetime , "CHARACTER" : str , "VARCHAR" : str , "TEXT" : str , "BLOB" : bytearray , } for row in fields : field = { "name" : row [ "name" ] , "field_required" : bool ( row [ "notnull" ] ) or bool ( row [ "pk" ] ) , "pk" : bool ( row [ "pk" ] ) , } for tname , ty in pg_types . items ( ) : if row [ "type" ] . startswith ( tname ) : field [ "field_type" ] = ty break if field [ "pk" ] and field [ "field_type" ] is int : field [ "field_type" ] = long if row [ "name" ] in fks : field [ "schema_table_name" ] = fks [ row [ "name" ] ] [ "table" ] field [ "ref_table_name" ] = fks [ row [ "name" ] ] [ "table" ] ret [ field [ "name" ] ] = field return ret | return all the fields for the given table |
49,452 | def _normalize_date_SQL ( self , field_name , field_kwargs , symbol ) : fstrs = [ ] k_opts = { 'day' : "CAST(strftime('%d', {}) AS integer)" , 'hour' : "CAST(strftime('%H', {}) AS integer)" , 'doy' : "CAST(strftime('%j', {}) AS integer)" , 'julian_day' : "strftime('%J', {})" , 'month' : "CAST(strftime('%m', {}) AS integer)" , 'minute' : "CAST(strftime('%M', {}) AS integer)" , 'dow' : "CAST(strftime('%w', {}) AS integer)" , 'week' : "CAST(strftime('%W', {}) AS integer)" , 'year' : "CAST(strftime('%Y', {}) AS integer)" } for k , v in field_kwargs . items ( ) : fstrs . append ( [ k_opts [ k ] . format ( self . _normalize_name ( field_name ) ) , self . val_placeholder , v ] ) return fstrs | allow extracting information from date |
49,453 | def _normalize_sort_SQL ( self , field_name , field_vals , sort_dir_str ) : fvi = None if sort_dir_str == 'ASC' : fvi = ( t for t in enumerate ( field_vals ) ) else : fvi = ( t for t in enumerate ( reversed ( field_vals ) ) ) query_sort_str = [ ' CASE {}' . format ( self . _normalize_name ( field_name ) ) ] query_args = [ ] for i , v in fvi : query_sort_str . append ( ' WHEN {} THEN {}' . format ( self . val_placeholder , i ) ) query_args . append ( v ) query_sort_str . append ( ' END' ) query_sort_str = "\n" . join ( query_sort_str ) return query_sort_str , query_args | allow sorting by a set of values |
49,454 | def register ( self , new_calc , * args , ** kwargs ) : kwargs . update ( zip ( self . meta_names , args ) ) if isinstance ( kwargs [ 'dependencies' ] , basestring ) : kwargs [ 'dependencies' ] = [ kwargs [ 'dependencies' ] ] super ( CalcRegistry , self ) . register ( new_calc , ** kwargs ) | Register calculations and meta data . |
49,455 | def slits_to_ds9_reg ( ds9reg , slits ) : ds9reg . write ( '# Region file format: DS9 version 4.1\n' ) ds9reg . write ( 'global color=green dashlist=8 3 width=1 font="helvetica 10 ' 'normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 ' 'move=1 delete=1 include=1 source=1\n' ) ds9reg . write ( 'physical\n' ) for idx , slit in enumerate ( slits , 1 ) : xpos1 , y2 , xpos2 , y2 , xpos2 , y1 , xpos1 , y1 = slit xc = 0.5 * ( xpos1 + xpos2 ) + 1 yc = 0.5 * ( y1 + y2 ) + 1 xd = ( xpos2 - xpos1 ) yd = ( y2 - y1 ) ds9reg . write ( 'box({0},{1},{2},{3},0)\n' . format ( xc , yc , xd , yd ) ) ds9reg . write ( '# text({0},{1}) color=red text={{{2}}}\n' . format ( xpos1 - 5 , yc , idx ) ) ds9reg . write ( '# text({0},{1}) color=red text={{{2}}}\n' . format ( xpos2 + 5 , yc , idx + EMIR_NBARS ) ) | Transform fiber traces to ds9 - region format . |
49,456 | def id_maker ( obj ) : dtfmt = '%Y%m%d-%H%M%S' return '%s-%s' % ( obj . __class__ . __name__ , datetime . now ( ) . strftime ( dtfmt ) ) | Makes an ID from the object s class name and the datetime now in ISO format . |
49,457 | def register ( self , sim , * args , ** kwargs ) : kwargs . update ( zip ( self . meta_names , args ) ) super ( SimRegistry , self ) . register ( sim , ** kwargs ) | register simulation and metadata . |
49,458 | def check_data ( self , data ) : data_objs = { data_src : data . objects . get ( data_src ) for data_src in data . layer } self . _is_data_loaded = all ( data_objs . values ( ) ) return data_objs | Check if data loaded for all sources in data layer . |
49,459 | def initialize ( self , calc_reg ) : self . _isinitialized = True self . calc_order = topological_sort ( calc_reg . dependencies ) | Initialize the simulation . Organize calculations by dependency . |
49,460 | def index_iterator ( self ) : idx = 0 while idx < self . number_intervals : new_idx = yield idx idx += 1 if new_idx : idx = new_idx - 1 | Generator that resumes from same index or restarts from sent index . |
49,461 | def save_ds9 ( output , filename ) : ds9_file = open ( filename , 'wt' ) ds9_file . write ( output ) ds9_file . close ( ) | Save ds9 region output info filename . |
49,462 | def save_four_ds9 ( rectwv_coeff , debugplot = 0 ) : for limits , rectified , suffix in zip ( [ 'frontiers' , 'frontiers' , 'boundaries' , 'boundaries' ] , [ False , True , False , True ] , [ 'rawimage' , 'rectified' , 'rawimage' , 'rectified' ] ) : output = rectwv_coeff_to_ds9 ( rectwv_coeff = rectwv_coeff , limits = limits , rectified = rectified ) filename = 'ds9_' + limits + '_' + suffix + '.reg' if abs ( debugplot ) >= 10 : print ( '>>> Saving: ' , filename ) save_ds9 ( output , filename ) | Save the 4 possible ds9 region files . |
49,463 | def save_spectral_lines_ds9 ( rectwv_coeff , debugplot = 0 ) : for spectral_lines , rectified , suffix in zip ( [ 'arc' , 'arc' , 'oh' , 'oh' ] , [ False , True , False , True ] , [ 'rawimage' , 'rectified' , 'rawimage' , 'rectified' ] ) : output = spectral_lines_to_ds9 ( rectwv_coeff = rectwv_coeff , spectral_lines = spectral_lines , rectified = rectified ) filename = 'ds9_' + spectral_lines + '_' + suffix + '.reg' if abs ( debugplot ) >= 10 : print ( '>>> Saving: ' , filename ) save_ds9 ( output , filename ) | Save expected location of arc and OH airglow to ds9 region files . |
49,464 | def create_providerinfo ( self , capabilities ) : _pinfo = self . package_capabilities ( ) not_supported = { } for key , val in capabilities . items ( ) : try : allowed = _pinfo [ key ] except KeyError : _pinfo [ key ] = val else : if isinstance ( allowed , bool ) : if allowed is False : if val is True : not_supported [ key ] = True else : _pinfo [ key ] = val elif isinstance ( allowed , str ) : if val != allowed : not_supported [ key ] = val elif isinstance ( allowed , list ) : if isinstance ( val , str ) : sv = { val } else : try : sv = set ( val ) except TypeError : if key == 'response_types_supported' : sv = set ( ) for v in val : v . sort ( ) sv . add ( ' ' . join ( v ) ) else : raise else : sv = set ( ) for v in val : vs = v . split ( ' ' ) vs . sort ( ) sv . add ( ' ' . join ( vs ) ) sa = set ( allowed ) if ( sv & sa ) == sv : _pinfo [ key ] = list ( sv ) else : not_supported [ key ] = list ( sv - sa ) if not_supported : _msg = "Server doesn't support the following features: {}" . format ( not_supported ) logger . error ( _msg ) raise ConfigurationError ( _msg ) if self . jwks_uri and self . keyjar : _pinfo [ "jwks_uri" ] = self . jwks_uri for name , instance in self . endpoint . items ( ) : if name not in [ 'webfinger' , 'provider_info' ] : _pinfo [ '{}_endpoint' . format ( name ) ] = instance . full_path return _pinfo | Dynamically create the provider info response |
49,465 | def _write_utf8 ( write , value ) : write ( 'h' , len ( value ) ) write . io . write ( value . encode ( 'utf-8' ) ) | Writes a length - prefixed UTF - 8 string . |
49,466 | def read ( cls , read , has_name = True ) : name = cls . _read_utf8 ( read ) if has_name else None if cls is TAG_Compound : final = { } while True : tag = read ( 'b' , 1 ) [ 0 ] if tag == 0 : break tmp = _tags [ tag ] . read ( read ) final [ tmp . name ] = tmp return cls ( final , name = name ) elif cls is TAG_List : tag_type , length = read ( 'bi' , 5 ) tag_read = _tags [ tag_type ] . read return cls ( _tags [ tag_type ] , [ tag_read ( read , has_name = False ) for x in range ( 0 , length ) ] , name = name ) elif cls is TAG_String : value = cls . _read_utf8 ( read ) return cls ( value , name = name ) elif cls is TAG_Byte_Array : length = read ( 'i' , 4 ) [ 0 ] return cls ( read ( '{0}b' . format ( length ) , length ) , name = name ) elif cls is TAG_Int_Array : length = read ( 'i' , 4 ) [ 0 ] return cls ( read ( '{0}i' . format ( length ) , length * 4 ) , name = name ) elif cls is TAG_Long_Array : length = read ( 'i' , 4 ) [ 0 ] return cls ( read ( '{0}q' . format ( length ) , length * 8 ) , name = name ) elif cls is TAG_Byte : return cls ( read ( 'b' , 1 ) [ 0 ] , name = name ) elif cls is TAG_Short : return cls ( read ( 'h' , 2 ) [ 0 ] , name = name ) elif cls is TAG_Int : return cls ( read ( 'i' , 4 ) [ 0 ] , name = name ) elif cls is TAG_Long : return cls ( read ( 'q' , 8 ) [ 0 ] , name = name ) elif cls is TAG_Float : return cls ( read ( 'f' , 4 ) [ 0 ] , name = name ) elif cls is TAG_Double : return cls ( read ( 'd' , 8 ) [ 0 ] , name = name ) elif cls is TAG_End : return cls ( read ( '2b' , 2 ) [ 0 ] , name = name ) | Read the tag in using the reader rd . If has_name is False skip reading the tag name . |
49,467 | def pretty ( self , indent = 0 , indent_str = ' ' ) : return '{0}{1}({2!r}): {3!r}' . format ( indent_str * indent , self . __class__ . __name__ , self . name , self . value ) | Pretty - print a tag in the same general style as Markus s example output . |
49,468 | def factory ( ec , code = None , token = None , refresh = None , ** kwargs ) : TTYPE = { 'code' : 'A' , 'token' : 'T' , 'refresh' : 'R' } args = { } if code : args [ 'code_handler' ] = init_token_handler ( ec , code , TTYPE [ 'code' ] ) if token : args [ 'access_token_handler' ] = init_token_handler ( ec , token , TTYPE [ 'token' ] ) if refresh : args [ 'refresh_token_handler' ] = init_token_handler ( ec , token , TTYPE [ 'refresh' ] ) return TokenHandler ( ** args ) | Create a token handler |
49,469 | def info ( self , token ) : _res = dict ( zip ( [ '_id' , 'type' , 'sid' , 'exp' ] , self . split_token ( token ) ) ) if _res [ 'type' ] != self . type : raise WrongTokenType ( _res [ 'type' ] ) else : _res [ 'handler' ] = self _res [ 'black_listed' ] = self . is_black_listed ( token ) return _res | Return token information . |
49,470 | def process_wildcard ( fractions ) : wildcard_zs = set ( ) total_fraction = 0.0 for z , fraction in fractions . items ( ) : if fraction == '?' : wildcard_zs . add ( z ) else : total_fraction += fraction if not wildcard_zs : return fractions balance_fraction = ( 1.0 - total_fraction ) / len ( wildcard_zs ) for z in wildcard_zs : fractions [ z ] = balance_fraction return fractions | Processes element with a wildcard ? weight fraction and returns composition balanced to 1 . 0 . |
49,471 | def generate_name ( atomic_fractions ) : if not atomic_fractions : return '' if len ( atomic_fractions ) == 1 : z = list ( atomic_fractions . keys ( ) ) [ 0 ] return pyxray . element_symbol ( z ) symbols = [ ] fractions = [ ] for z in sorted ( atomic_fractions . keys ( ) , reverse = True ) : symbols . append ( pyxray . element_symbol ( z ) ) fractions . append ( Fraction ( atomic_fractions [ z ] ) . limit_denominator ( ) ) gcds = [ ] for a , b in itertools . combinations ( fractions , 2 ) : gcds . append ( math . gcd ( a . denominator , b . denominator ) ) smallest_gcd = min ( gcds ) name = '' for symbol , fraction in zip ( symbols , fractions ) : mole_fraction = int ( fraction * smallest_gcd ) if mole_fraction == 0 : continue elif mole_fraction == 1 : name += "%s" % symbol else : name += '%s%i' % ( symbol , mole_fraction ) return name | Generates a name from the composition . The name is generated on the basis of a classical chemical formula . |
49,472 | def from_pure ( cls , z ) : return cls ( cls . _key , { z : 1.0 } , { z : 1.0 } , pyxray . element_symbol ( z ) ) | Creates a pure composition . |
49,473 | async def _collect_sample ( self , url , url_pattern ) : samples = [ ] urls = [ self . path_generator . generate_url ( url , url_pattern ) for _ in range ( self . confirmation_factor ) ] iterator = asyncio . as_completed ( [ self . _fetch_sample ( url ) for url in urls ] ) for promise in iterator : try : sig = await promise if sig : samples . append ( sig ) except RejectRequest as e : pass if not samples : raise StopRequest ( "Impossible to obtain sample" ) else : return samples | Sample collection is meant to be very tolerant to generic failures as failing to obtain the sample has important consequences on the results . |
49,474 | def get_boundaries ( bounddict_file , slitlet_number ) : bounddict = json . loads ( open ( bounddict_file . name ) . read ( ) ) pol_lower_boundary = None pol_upper_boundary = None xmin_lower = None xmax_lower = None xmin_upper = None xmax_upper = None csu_bar_slit_center = None slitlet_label = "slitlet" + str ( slitlet_number ) . zfill ( 2 ) if slitlet_label in bounddict [ 'contents' ] . keys ( ) : list_date_obs = list ( bounddict [ 'contents' ] [ slitlet_label ] . keys ( ) ) list_date_obs . sort ( ) num_date_obs = len ( list_date_obs ) if num_date_obs == 1 : date_obs = list_date_obs [ 0 ] tmp_dict = bounddict [ 'contents' ] [ slitlet_label ] [ date_obs ] pol_lower_boundary = Polynomial ( tmp_dict [ 'boundary_coef_lower' ] ) pol_upper_boundary = Polynomial ( tmp_dict [ 'boundary_coef_upper' ] ) xmin_lower = tmp_dict [ 'boundary_xmin_lower' ] xmax_lower = tmp_dict [ 'boundary_xmax_lower' ] xmin_upper = tmp_dict [ 'boundary_xmin_upper' ] xmax_upper = tmp_dict [ 'boundary_xmax_upper' ] csu_bar_slit_center = tmp_dict [ 'csu_bar_slit_center' ] else : raise ValueError ( "num_date_obs =" , num_date_obs , " (must be 1)" ) else : print ( "WARNING: slitlet number " + str ( slitlet_number ) + " is not available in " + bounddict_file . name ) return pol_lower_boundary , pol_upper_boundary , xmin_lower , xmax_lower , xmin_upper , xmax_upper , csu_bar_slit_center | Read the bounddict json file and return the polynomial boundaries . |
49,475 | def _to_enos_roles ( roles ) : def to_host ( h ) : extra = { } for nic , roles in h [ "nics" ] : for role in roles : extra [ role ] = nic return Host ( h [ "host" ] , user = "root" , extra = extra ) enos_roles = { } for role , hosts in roles . items ( ) : enos_roles [ role ] = [ to_host ( h ) for h in hosts ] logger . debug ( enos_roles ) return enos_roles | Transform the roles to use enoslib . host . Host hosts . |
49,476 | def _to_enos_networks ( networks ) : nets = [ ] for roles , network in networks : nets . append ( network . to_enos ( roles ) ) logger . debug ( nets ) return nets | Transform the networks returned by deploy5k . |
49,477 | def init ( self , force_deploy = False , client = None ) : _force_deploy = self . provider_conf . force_deploy self . provider_conf . force_deploy = _force_deploy or force_deploy self . _provider_conf = self . provider_conf . to_dict ( ) r = api . Resources ( self . _provider_conf , client = client ) r . launch ( ) roles = r . get_roles ( ) networks = r . get_networks ( ) return ( _to_enos_roles ( roles ) , _to_enos_networks ( networks ) ) | Reserve and deploys the nodes according to the resources section |
49,478 | def destroy ( self ) : r = api . Resources ( self . provider_conf . to_dict ( ) ) r . destroy ( ) | Destroys the jobs . |
49,479 | def _find_wikipedia_names ( self , name_en ) : url = 'https://en.wikipedia.org/w/api.php' params = { 'action' : 'query' , 'titles' : name_en , 'prop' : 'langlinks' , 'lllimit' : 500 , 'format' : 'json' } r = requests . get ( url , params = params ) if not r : raise ValueError ( 'Could not find wikipedia page: {0}' . format ( name_en ) ) out = r . json ( ) names = { } pages = out [ 'query' ] [ 'pages' ] for page in pages : for langlink in pages [ page ] . get ( 'langlinks' , [ ] ) : names [ langlink [ 'lang' ] ] = langlink [ '*' ] return names | Finds all Wikipedia pages referring to the specified name in English and returns a dictionary where the keys are the language code and the values are the titles of the corresponding pages . |
49,480 | def append_response ( self , response ) : self . _responses . append ( response ) if 'Warning' in response . headers : LOGGER . warning ( 'HTTP %s %s Warning (%s): %s (attempt %s)' , response . request . method , response . request . url , response . code , response . headers [ 'Warning' ] , len ( self . _responses ) ) | Append the response to the stack of responses . |
49,481 | def body ( self ) : if not self . _responses : return None if self . _responses [ - 1 ] . code >= 400 : return self . _error_message ( ) return self . _deserialize ( ) | Returns the HTTP response body deserialized if possible . |
49,482 | def links ( self ) : if not self . _responses : return None if 'Link' in self . _responses [ - 1 ] . headers : links = [ ] for l in headers . parse_link ( self . _responses [ - 1 ] . headers [ 'Link' ] ) : link = { 'target' : l . target } link . update ( { k : v for ( k , v ) in l . parameters } ) links . append ( link ) return links | Return the parsed link header if it was set returning a list of the links as a dict . |
49,483 | def _decode ( self , value ) : if isinstance ( value , list ) : return [ self . _decode ( v ) for v in value ] elif isinstance ( value , dict ) : return { self . _decode ( k ) : self . _decode ( v ) for k , v in value . items ( ) } elif isinstance ( value , bytes ) : return value . decode ( 'utf-8' ) return value | Decode bytes to UTF - 8 strings as a singe value list or dict . |
49,484 | def _deserialize ( self ) : if not self . _responses or not self . _responses [ - 1 ] . body : return None if 'Content-Type' not in self . _responses [ - 1 ] . headers : return self . _responses [ - 1 ] . body try : content_type = algorithms . select_content_type ( [ headers . parse_content_type ( self . _responses [ - 1 ] . headers [ 'Content-Type' ] ) ] , AVAILABLE_CONTENT_TYPES ) except errors . NoMatch : return self . _responses [ - 1 ] . body if content_type [ 0 ] == CONTENT_TYPE_JSON : return self . _decode ( self . _json . loads ( self . _decode ( self . _responses [ - 1 ] . body ) ) ) elif content_type [ 0 ] == CONTENT_TYPE_MSGPACK : return self . _decode ( self . _msgpack . unpackb ( self . _responses [ - 1 ] . body ) ) | Try and deserialize a response body based upon the specified content type . |
49,485 | def _error_message ( self ) : body = self . _deserialize ( ) return body . get ( 'message' , body ) if isinstance ( body , dict ) else body | Try and extract the error message from a HTTP error response . |
49,486 | async def http_fetch ( self , url , method = 'GET' , request_headers = None , body = None , content_type = CONTENT_TYPE_MSGPACK , follow_redirects = False , max_redirects = MAX_REDIRECTS , connect_timeout = DEFAULT_CONNECT_TIMEOUT , request_timeout = DEFAULT_REQUEST_TIMEOUT , max_http_attempts = MAX_HTTP_RETRIES , auth_username = None , auth_password = None , user_agent = None , validate_cert = True , allow_nonstandard_methods = False , dont_retry = None ) : response = HTTPResponse ( ) request_headers = self . _http_req_apply_default_headers ( request_headers , content_type , body ) if body : body = self . _http_req_body_serialize ( body , request_headers [ 'Content-Type' ] ) if not dont_retry : dont_retry = set ( { } ) client = httpclient . AsyncHTTPClient ( ) if hasattr ( client , 'max_clients' ) and os . getenv ( 'HTTP_MAX_CLIENTS' ) : client . max_clients = int ( os . getenv ( 'HTTP_MAX_CLIENTS' ) ) for attempt in range ( 0 , max_http_attempts ) : LOGGER . debug ( '%s %s (Attempt %i of %i) %r' , method , url , attempt + 1 , max_http_attempts , request_headers ) if attempt > 0 : request_headers [ 'X-Retry-Attempt' ] = str ( attempt + 1 ) try : resp = await client . fetch ( url , method = method , headers = request_headers , body = body , auth_username = auth_username , auth_password = auth_password , connect_timeout = connect_timeout , request_timeout = request_timeout , user_agent = user_agent or self . _http_req_user_agent ( ) , follow_redirects = follow_redirects , max_redirects = max_redirects , raise_error = False , validate_cert = validate_cert , allow_nonstandard_methods = allow_nonstandard_methods ) except ( ConnectionError , CurlError , OSError , socket . gaierror ) as error : response . append_exception ( error ) LOGGER . warning ( 'HTTP Request Error for %s to %s attempt %i of %i: %s' , method , url , attempt + 1 , max_http_attempts , error ) continue response . append_response ( resp ) if response . ok : response . finish ( ) return response elif resp . code in dont_retry : break elif resp . code in { 423 , 429 } : await self . _http_resp_rate_limited ( resp ) elif resp . code < 500 : LOGGER . debug ( 'HTTP Response Error for %s to %s' 'attempt %i of %i (%s): %s' , method , url , resp . code , attempt + 1 , max_http_attempts , response . body ) response . finish ( ) return response LOGGER . warning ( 'HTTP Error for %s to %s, attempt %i of %i (%s): %s' , method , url , attempt + 1 , max_http_attempts , resp . code , response . body ) LOGGER . warning ( 'HTTP %s to %s failed after %i attempts' , method , url , max_http_attempts ) response . finish ( ) return response | Perform a HTTP request |
49,487 | def _http_req_apply_default_headers ( self , request_headers , content_type , body ) : if not request_headers : request_headers = { } request_headers . setdefault ( 'Accept' , ', ' . join ( [ str ( ct ) for ct in AVAILABLE_CONTENT_TYPES ] ) ) if body : request_headers . setdefault ( 'Content-Type' , str ( content_type ) or str ( CONTENT_TYPE_MSGPACK ) ) if hasattr ( self , 'correlation_id' ) : request_headers . setdefault ( 'Correlation-Id' , self . correlation_id ) elif hasattr ( self , 'request' ) and self . request . headers . get ( 'Correlation-Id' ) : request_headers . setdefault ( 'Correlation-Id' , self . request . headers [ 'Correlation-Id' ] ) return request_headers | Set default values for common HTTP request headers |
49,488 | def _http_req_body_serialize ( self , body , content_type ) : if not body or not isinstance ( body , ( dict , list ) ) : return body content_type = headers . parse_content_type ( content_type ) if content_type == CONTENT_TYPE_JSON : return self . __hcm_json . dumps ( body ) elif content_type == CONTENT_TYPE_MSGPACK : return self . __hcm_msgpack . packb ( body ) raise ValueError ( 'Unsupported Content Type' ) | Conditionally serialize the request body value if mime_type is set and it s serializable . |
49,489 | def _http_resp_rate_limited ( response ) : parsed = parse . urlparse ( response . request . url ) duration = int ( response . headers . get ( 'Retry-After' , 3 ) ) LOGGER . warning ( 'Rate Limited by %s, retrying in %i seconds' , parsed . netloc , duration ) return asyncio . sleep ( duration ) | Extract the Retry - After header value if the request was rate limited and return a future to sleep for the specified duration . |
49,490 | def acquires_lock ( expires , should_fail = True , should_wait = False , resource = None , prefix = DEFAULT_PREFIX ) : if isinstance ( expires , timedelta ) : expires = expires . total_seconds ( ) def decorator ( f ) : nonlocal resource if resource is None : resource = f . __name__ resource = '%s%s' % ( prefix , resource ) @ wraps ( f ) def wrapper ( * args , ** kwargs ) : lock = redis_lock . Lock ( _redis_conn , resource , expire = expires ) lock_acquired = False nonlocal should_wait is_blocking = should_wait should_execute_if_lock_fails = False if 'should_execute_if_lock_fails' in kwargs : should_execute_if_lock_fails = kwargs . pop ( "should_execute_if_lock_fails" ) if 'should_wait' in kwargs : is_blocking = kwargs . pop ( 'should_wait' ) if is_blocking : logger . debug ( 'Waiting for resource "%s"' , resource ) if not lock . acquire ( blocking = is_blocking ) : if should_fail : raise RuntimeError ( "Failed to acquire lock: %s" % resource ) logger . warning ( 'Failed to acquire lock: %s' , resource ) if not should_execute_if_lock_fails : return False else : lock_acquired = True try : return f ( * args , ** kwargs ) finally : try : if lock_acquired : lock . release ( ) except Exception as e : logger . exception ( 'Failed to release lock: %s' , str ( e ) , exc_info = False ) return wrapper return decorator | Decorator to ensure function only runs when it is unique holder of the resource . |
49,491 | def get_session ( ) : if os . environ . get ( "OS_IDENTITY_API_VERSION" ) == "3" : logging . info ( "Creating a v3 Keystone Session" ) auth = v3 . Password ( auth_url = os . environ [ "OS_AUTH_URL" ] , username = os . environ [ "OS_USERNAME" ] , password = os . environ [ "OS_PASSWORD" ] , project_id = os . environ [ "OS_PROJECT_ID" ] , user_domain_name = os . environ [ "OS_USER_DOMAIN_NAME" ] ) else : logging . info ( "Creating a v2 Keystone Session" ) auth = v2 . Password ( auth_url = os . environ [ "OS_AUTH_URL" ] , username = os . environ [ "OS_USERNAME" ] , password = os . environ [ "OS_PASSWORD" ] , tenant_id = os . environ [ "OS_TENANT_ID" ] ) return session . Session ( auth = auth ) | Build the session object . |
49,492 | def check_glance ( session , image_name ) : gclient = glance . Client ( GLANCE_VERSION , session = session , region_name = os . environ [ 'OS_REGION_NAME' ] ) images = gclient . images . list ( ) name_ids = [ { 'name' : i [ 'name' ] , 'id' : i [ 'id' ] } for i in images ] if image_name not in list ( map ( itemgetter ( 'name' ) , name_ids ) ) : logger . error ( "[glance]: Image %s is missing" % image_name ) raise Exception ( "Image %s is missing" % image_name ) else : image = [ i for i in name_ids if i [ 'name' ] == image_name ] image_id = image [ 0 ] [ 'id' ] logger . info ( "[glance]: Using image %s:%s" % ( image_name , image_id ) ) return image_id | Check that the base image is available . |
49,493 | def check_flavors ( session ) : nclient = nova . Client ( NOVA_VERSION , session = session , region_name = os . environ [ 'OS_REGION_NAME' ] ) flavors = nclient . flavors . list ( ) to_id = dict ( list ( map ( lambda n : [ n . name , n . id ] , flavors ) ) ) to_flavor = dict ( list ( map ( lambda n : [ n . id , n . name ] , flavors ) ) ) return to_id , to_flavor | Build the flavors mapping |
49,494 | def wait_for_servers ( session , servers ) : nclient = nova . Client ( NOVA_VERSION , session = session , region_name = os . environ [ 'OS_REGION_NAME' ] ) while True : deployed = [ ] undeployed = [ ] for server in servers : c = nclient . servers . get ( server . id ) if c . addresses != { } and c . status == 'ACTIVE' : deployed . append ( server ) if c . status == 'ERROR' : undeployed . append ( server ) logger . info ( "[nova]: Polling the Deployment" ) logger . info ( "[nova]: %s deployed servers" % len ( deployed ) ) logger . info ( "[nova]: %s undeployed servers" % len ( undeployed ) ) if len ( deployed ) + len ( undeployed ) >= len ( servers ) : break time . sleep ( 3 ) return deployed , undeployed | Wait for the servers to be ready . |
49,495 | def check_servers ( session , machines , extra_prefix = "" , force_deploy = False , key_name = None , image_id = None , flavors = 'm1.medium' , network = None , ext_net = None , scheduler_hints = None ) : scheduler_hints = scheduler_hints or [ ] nclient = nova . Client ( NOVA_VERSION , session = session , region_name = os . environ [ 'OS_REGION_NAME' ] ) servers = nclient . servers . list ( search_opts = { 'name' : '-' . join ( [ DEFAULT_PREFIX , extra_prefix ] ) } ) wanted = _get_total_wanted_machines ( machines ) if force_deploy : for server in servers : server . delete ( ) servers = [ ] if len ( servers ) == wanted : logger . info ( "[nova]: Reusing existing servers : %s" , servers ) return servers elif len ( servers ) > 0 and len ( servers ) < wanted : raise Exception ( "Only %s/%s servers found" % ( servers , wanted ) ) total = 0 for machine in machines : number = machine . number roles = machine . roles logger . info ( "[nova]: Starting %s servers" % number ) logger . info ( "[nova]: for roles %s" % roles ) logger . info ( "[nova]: with extra hints %s" % scheduler_hints ) for _ in range ( number ) : flavor = machine . flavour if isinstance ( flavors , str ) : flavor = flavors else : flavor_to_id , _ = flavors flavor = flavor_to_id [ flavor ] if scheduler_hints : _scheduler_hints = scheduler_hints [ total % len ( scheduler_hints ) ] else : _scheduler_hints = [ ] server = nclient . servers . create ( name = '-' . join ( [ DEFAULT_PREFIX , extra_prefix , str ( total ) ] ) , image = image_id , flavor = flavor , nics = [ { 'net-id' : network [ 'id' ] } ] , key_name = key_name , security_groups = [ SECGROUP_NAME ] , scheduler_hints = _scheduler_hints ) servers . append ( server ) total = total + 1 return servers | Checks the servers status for the deployment . |
49,496 | def is_in_current_deployment ( server , extra_prefix = "" ) : return re . match ( r"^%s" % '-' . join ( [ DEFAULT_PREFIX , extra_prefix ] ) , server . name ) is not None | Check if an existing server in the system take part to the current deployment |
49,497 | def allow_address_pairs ( session , network , subnet ) : nclient = neutron . Client ( '2' , session = session , region_name = os . environ [ 'OS_REGION_NAME' ] ) ports = nclient . list_ports ( ) ports_to_update = filter ( lambda p : p [ 'network_id' ] == network [ 'id' ] , ports [ 'ports' ] ) logger . info ( '[nova]: Allowing address pairs for ports %s' % list ( map ( lambda p : p [ 'fixed_ips' ] , ports_to_update ) ) ) for port in ports_to_update : try : nclient . update_port ( port [ 'id' ] , { 'port' : { 'allowed_address_pairs' : [ { 'ip_address' : subnet } ] } } ) except Exception : logger . warn ( "Can't update port %s" % port ) | Allow several interfaces to be added and accessed from the other machines . |
49,498 | def check_environment ( provider_conf ) : session = get_session ( ) image_id = check_glance ( session , provider_conf . image ) flavor_to_id , id_to_flavor = check_flavors ( session ) ext_net , network , subnet = check_network ( session , provider_conf . configure_network , provider_conf . network , subnet = provider_conf . subnet , dns_nameservers = provider_conf . dns_nameservers , allocation_pool = provider_conf . allocation_pool ) return { 'session' : session , 'image_id' : image_id , 'flavor_to_id' : flavor_to_id , 'id_to_flavor' : id_to_flavor , 'ext_net' : ext_net , 'network' : network , 'subnet' : subnet } | Check all ressources needed by Enos . |
49,499 | def collect_user_info ( endpoint_context , session , userinfo_claims = None ) : authn_req = session [ 'authn_req' ] if userinfo_claims is None : uic = scope2claims ( authn_req [ "scope" ] ) perm_set = session . get ( 'permission' ) if perm_set : uic = { key : uic [ key ] for key in uic if key in perm_set } uic = update_claims ( session , "userinfo" , uic ) if uic : userinfo_claims = Claims ( ** uic ) else : userinfo_claims = None logger . debug ( "userinfo_claim: %s" % sanitize ( userinfo_claims . to_dict ( ) ) ) logger . debug ( "Session info: %s" % sanitize ( session ) ) authn_event = session [ 'authn_event' ] if authn_event : uid = authn_event [ "uid" ] else : uid = session [ 'uid' ] info = endpoint_context . userinfo ( uid , authn_req [ 'client_id' ] , userinfo_claims ) if "sub" in userinfo_claims : if not claims_match ( session [ "sub" ] , userinfo_claims [ "sub" ] ) : raise FailedAuthentication ( "Unmatched sub claim" ) info [ "sub" ] = session [ "sub" ] try : logger . debug ( "user_info_response: {}" . format ( info ) ) except UnicodeEncodeError : try : logger . debug ( "user_info_response: {}" . format ( info . encode ( 'utf-8' ) ) ) except Exception : pass return info | Collect information about a user . This can happen in two cases either when constructing an IdToken or when returning user info through the UserInfo endpoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.