idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
57,400 | def get_reservation_resources ( session , reservation_id , * models ) : models_resources = [ ] reservation = session . GetReservationDetails ( reservation_id ) . ReservationDescription for resource in reservation . Resources : if resource . ResourceModelName in models : models_resources . append ( resource ) return models_resources | Get all resources of given models in reservation . |
57,401 | def get_current_container_id ( read_from = '/proc/self/cgroup' ) : if not os . path . exists ( read_from ) : return with open ( read_from , 'r' ) as cgroup : for line in cgroup : if re . match ( '.*/[0-9a-f]{64}$' , line . strip ( ) ) : return re . sub ( '.*/([0-9a-f]{64})$' , '\\1' , line . strip ( ) ) | Get the ID of the container the application is currently running in otherwise return None if not running in a container . |
57,402 | def read_configuration ( key , path = None , default = None , single_config = False , fallback_to_env = True ) : if path and os . path . exists ( path ) : with open ( path , 'r' ) as config_file : if single_config : return config_file . read ( ) for line in config_file : if line . startswith ( '%s=' % key ) : return line . split ( '=' , 1 ) [ 1 ] . strip ( ) if fallback_to_env and key in os . environ : return os . environ [ key ] return default | Read configuration from a file Docker config or secret or from the environment variables . |
57,403 | def CleanString ( s ) : punc = ( ' ' , '-' , '\'' , '.' , '&' , '&' , '+' , '@' ) pieces = [ ] for part in s . split ( ) : part = part . strip ( ) for p in punc : part = part . replace ( p , '_' ) part = part . strip ( '_' ) part = part . lower ( ) pieces . append ( part ) return '_' . join ( pieces ) | Cleans up string . |
57,404 | def DedupVcardFilenames ( vcard_dict ) : remove_keys = [ ] add_pairs = [ ] for k , v in vcard_dict . items ( ) : if not len ( v ) > 1 : continue for idx , vcard in enumerate ( v ) : fname , ext = os . path . splitext ( k ) fname = '{}-{}' . format ( fname , idx + 1 ) fname = fname + ext assert fname not in vcard_dict add_pairs . append ( ( fname , vcard ) ) remove_keys . append ( k ) for k , v in add_pairs : vcard_dict [ k ] . append ( v ) for k in remove_keys : vcard_dict . pop ( k ) return vcard_dict | Make sure every vCard in the dictionary has a unique filename . |
57,405 | def WriteVcard ( filename , vcard , fopen = codecs . open ) : if os . access ( filename , os . F_OK ) : logger . warning ( 'File exists at "{}", skipping.' . format ( filename ) ) return False try : with fopen ( filename , 'w' , encoding = 'utf-8' ) as f : logger . debug ( 'Writing {}:\n{}' . format ( filename , u ( vcard . serialize ( ) ) ) ) f . write ( u ( vcard . serialize ( ) ) ) except OSError : logger . error ( 'Error writing to file "{}", skipping.' . format ( filename ) ) return False return True | Writes a vCard into the given filename . |
57,406 | def dial ( self , target ) : if not target : return None , "target network must be specified with -t or --target" url = get_url ( self . config , target ) try : if url . startswith ( 'ws' ) : self . w3 = Web3 ( WebsocketProvider ( url ) ) elif url . startswith ( 'http' ) : self . w3 = Web3 ( HTTPProvider ( url ) ) elif url . endswith ( 'ipc' ) : if url == 'ipc' : url = None self . w3 = Web3 ( Web3 . IPCProvider ( url ) ) else : return None , "Invalid Provider URL: {}" . format ( url ) except Exception as e : return None , e return self . w3 , None | connects to a node |
57,407 | def load ( filepath ) : try : with open ( filepath , "r" ) as fh : source = fh . read ( ) except Exception as e : return None , e return source , None | loads a contract file |
57,408 | def compile ( source , ezo ) : try : compiled = compile_source ( source ) compiled_list = [ ] for name in compiled : c = Contract ( name , ezo ) interface = compiled [ name ] c . abi = interface [ 'abi' ] c . bin = interface [ 'bin' ] compiled_list . append ( c ) except Exception as e : return None , e return compiled_list , None | compiles the source code |
57,409 | def get_address ( name , hash , db , target = None ) : key = DB . pkey ( [ EZO . DEPLOYED , name , target , hash ] ) d , err = db . get ( key ) if err : return None , err if not d : return None , None return d [ 'address' ] . lower ( ) , None | fetches the contract address of deployment |
57,410 | def put ( contract_name , abi ) : if not Catalog . path : return None , "path to catalog must be set before saving to it" if not contract_name : return None , "contract name must be provided before saving" if not abi : return None , "contract ABI missing" abi_file = "{}/{}.abi" . format ( Catalog . path , contract_name ) try : with open ( abi_file , "w+" ) as file : file . write ( abi ) except Exception as e : return None , "Catalog.put error: {}" . format ( e ) return None , None | save the contract s ABI |
57,411 | def get_creators ( self , attribute = 'creatorName' ) : if 'creators' in self . xml : if isinstance ( self . xml [ 'creators' ] [ 'creator' ] , list ) : return [ c [ attribute ] for c in self . xml [ 'creators' ] [ 'creator' ] ] else : return self . xml [ 'creators' ] [ 'creator' ] [ attribute ] return None | Get DataCite creators . |
57,412 | def get_dates ( self ) : if 'dates' in self . xml : if isinstance ( self . xml [ 'dates' ] [ 'date' ] , dict ) : return self . xml [ 'dates' ] [ 'date' ] . values ( ) [ 0 ] return self . xml [ 'dates' ] [ 'date' ] return None | Get DataCite dates . |
57,413 | def get_description ( self , description_type = 'Abstract' ) : if 'descriptions' in self . xml : if isinstance ( self . xml [ 'descriptions' ] [ 'description' ] , list ) : for description in self . xml [ 'descriptions' ] [ 'description' ] : if description_type in description : return description [ description_type ] elif isinstance ( self . xml [ 'descriptions' ] [ 'description' ] , dict ) : description = self . xml [ 'descriptions' ] [ 'description' ] if description_type in description : return description [ description_type ] elif len ( description ) == 1 : return description . values ( ) [ 0 ] return None | Get DataCite description . |
57,414 | def itemgetter ( iterable , indexes ) : indexes = indexes if isinstance ( indexes , tuple ) else tuple ( indexes ) assert all ( isinstance ( i , int ) for i in indexes ) , 'indexes needs to be a tuple of ints' positive_indexes = [ i for i in indexes if i >= 0 ] negative_indexes = [ i for i in indexes if i < 0 ] out = { } if len ( negative_indexes ) : negative_index_buffer = deque ( maxlen = min ( indexes ) * - 1 ) for i , x in enumerate ( iterable ) : if i in positive_indexes : out [ i ] = x negative_index_buffer . append ( x ) out . update ( { ni : negative_index_buffer [ ni ] for ni in negative_indexes } ) else : out . update ( { i : x for i , x in enumerate ( iterable ) if i in positive_indexes } ) return itemgetter ( * indexes ) ( out ) | same functionality as operator . itemgetter except this one supports both positive and negative indexing of generators as well |
57,415 | def render ( self , context ) : user = self . _get_value ( self . user_key , context ) feature = self . _get_value ( self . feature , context ) if feature is None : return '' allowed = show_feature ( user , feature ) return self . nodelist . render ( context ) if allowed else '' | Handle the actual rendering . |
57,416 | def _get_value ( self , key , context ) : string_quotes = ( '"' , "'" ) if key [ 0 ] in string_quotes and key [ - 1 ] in string_quotes : return key [ 1 : - 1 ] if key in string . digits : return int ( key ) return context . get ( key , None ) | Works out whether key is a value or if it s a variable referencing a value in context and returns the correct value . |
57,417 | def client ( self , client_name , ** params ) : if client_name not in self . cfg . clients : raise OAuthException ( 'Unconfigured client: %s' % client_name ) if client_name not in ClientRegistry . clients : raise OAuthException ( 'Unsupported services: %s' % client_name ) params = dict ( self . cfg . clients [ client_name ] , ** params ) return ClientRegistry . clients [ client_name ] ( ** params ) | Initialize OAuth client from registry . |
57,418 | async def login ( self , client_name , request , redirect_uri = None , ** params ) : client = self . client ( client_name , logger = self . app . logger ) redirect_uri = redirect_uri or self . cfg . redirect_uri or '%s://%s%s' % ( request . scheme , request . host , request . path ) session = await self . app . ps . session ( request ) if isinstance ( client , OAuth1Client ) : oauth_verifier = request . query . get ( 'oauth_verifier' ) if not oauth_verifier : token , secret = await client . get_request_token ( oauth_callback = redirect_uri ) session [ 'oauth_token' ] = token session [ 'oauth_token_secret' ] = secret url = client . get_authorize_url ( ) raise muffin . HTTPFound ( url ) oauth_token = request . query . get ( 'oauth_token' ) if session [ 'oauth_token' ] != oauth_token : raise muffin . HTTPForbidden ( reason = 'Invalid token.' ) client . oauth_token = oauth_token client . oauth_token_secret = session . get ( 'oauth_token_secret' ) return client , await client . get_access_token ( oauth_verifier ) if isinstance ( client , OAuth2Client ) : code = request . query . get ( 'code' ) if not code : state = sha1 ( str ( random ( ) ) . encode ( 'ascii' ) ) . hexdigest ( ) session [ 'oauth_secret' ] = state url = client . get_authorize_url ( redirect_uri = redirect_uri , state = state , ** params ) raise muffin . HTTPFound ( url ) state = request . query . get ( 'state' ) oauth_secret = session . pop ( 'oauth_secret' , '' ) if oauth_secret != state : raise muffin . HTTPForbidden ( reason = 'Invalid token "%s".' % oauth_secret ) return client , await client . get_access_token ( code , redirect_uri = redirect_uri ) return client | Process login with OAuth . |
57,419 | def refresh ( self , client_name , refresh_token , ** params ) : client = self . client ( client_name , logger = self . app . logger ) return client . get_access_token ( refresh_token , grant_type = 'refresh_token' , ** params ) | Get refresh token . |
57,420 | def chain ( * args ) : has_iter = partial ( hasattr , name = '__iter__' ) if len ( args ) == 1 and hasattr ( args [ 0 ] , '__iter__' ) : args = args [ 0 ] for arg in args : if hasattr ( arg , '__iter__' ) : for i in arg : yield i else : yield arg | itertools . chain just better |
57,421 | def get_all_celcius_commands ( ) : p = subprocess . Popen ( [ "crontab" , "-l" ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) out , err = p . communicate ( ) return [ x for x in out . split ( '\n' ) if 'CJOBID' in x ] | Query cron for all celcius commands |
57,422 | def VcardFieldsEqual ( field1 , field2 ) : field1_vals = set ( [ str ( f . value ) for f in field1 ] ) field2_vals = set ( [ str ( f . value ) for f in field2 ] ) if field1_vals == field2_vals : return True else : return False | Handle comparing vCard fields where inputs are lists of components . |
57,423 | def VcardMergeListFields ( field1 , field2 ) : field_dict = { } for f in field1 + field2 : field_dict [ str ( f ) ] = f return list ( field_dict . values ( ) ) | Handle merging list fields that may include some overlap . |
57,424 | def SetVcardField ( new_vcard , field_name , values ) : for val in values : new_field = new_vcard . add ( field_name ) new_field . value = val . value if val . params : new_field . params = val . params return new_vcard | Set vCard field values and parameters on a new vCard . |
57,425 | def CopyVcardFields ( new_vcard , auth_vcard , field_names ) : for field in field_names : value_list = auth_vcard . contents . get ( field ) new_vcard = SetVcardField ( new_vcard , field , value_list ) return new_vcard | Copy vCard field values from an authoritative vCard into a new one . |
57,426 | def MergeVcards ( vcard1 , vcard2 ) : new_vcard = vobject . vCard ( ) vcard1_fields = set ( vcard1 . contents . keys ( ) ) vcard2_fields = set ( vcard2 . contents . keys ( ) ) mutual_fields = vcard1_fields . intersection ( vcard2_fields ) logger . debug ( 'Potentially conflicting fields: {}' . format ( mutual_fields ) ) for field in mutual_fields : val1 = vcard1 . contents . get ( field ) val2 = vcard2 . contents . get ( field ) new_values = [ ] if not VcardFieldsEqual ( val1 , val2 ) : if field not in MERGEABLE_FIELDS : context_str = GetVcardContextString ( vcard1 , vcard2 ) new_values . extend ( SelectFieldPrompt ( field , context_str , val1 , val2 ) ) else : new_values . extend ( VcardMergeListFields ( val1 , val2 ) ) else : new_values . extend ( val1 ) logger . debug ( 'Merged values for field {}: {}' . format ( field . upper ( ) , u ( str ( new_values ) ) ) ) new_vcard = SetVcardField ( new_vcard , field , new_values ) new_vcard = CopyVcardFields ( new_vcard , vcard1 , vcard1_fields - vcard2_fields ) new_vcard = CopyVcardFields ( new_vcard , vcard2 , vcard2_fields - vcard1_fields ) return new_vcard | Create a new vCard and populate it . |
57,427 | def SelectFieldPrompt ( field_name , context_str , * options ) : option_format_str = '[ {} ] "{}"' option_dict = { } print ( context_str ) print ( 'Please select one of the following options for field "{}"' . format ( field_name ) ) for cnt , option in enumerate ( options ) : option_dict [ '{}' . format ( cnt + 1 ) ] = option if not callable ( option ) : print ( option_format_str . format ( cnt + 1 , u ( str ( option ) ) ) ) else : print ( option_format_str . format ( cnt + 1 , option . __name__ ) ) choice = None while choice not in option_dict : choice = input ( 'option> ' ) . strip ( ) new_value = option_dict [ choice ] if callable ( new_value ) : return new_value ( ) else : return new_value | Prompts user to pick from provided options . |
57,428 | def make_fixture ( model_class , ** kwargs ) : all_fields = get_fields ( model_class ) fields_for_random_generation = map ( lambda x : getattr ( model_class , x ) , all_fields ) overrides = { } for kwarg , value in kwargs . items ( ) : if kwarg in all_fields : kwarg_field = getattr ( model_class , kwarg ) fields_for_random_generation . remove ( kwarg_field ) overrides . update ( { kwarg_field : value } ) random_values = get_random_values ( fields_for_random_generation ) values = dict ( overrides , ** random_values ) assert len ( all_fields ) == len ( values ) , ( "Mismatch in values, {} != {}" . format ( len ( all_fields ) , len ( values ) ) ) data = { k . name : v for k , v in values . items ( ) } return model_class ( ** data ) | Take the model_klass and generate a fixure for it |
57,429 | def get_fields ( model_class ) : return [ attr for attr , value in model_class . __dict__ . items ( ) if issubclass ( type ( value ) , ( mongo . base . BaseField , mongo . EmbeddedDocumentField ) ) ] | Pass in a mongo model class and extract all the attributes which are mongoengine fields |
57,430 | def _get_page_meta ( self , page ) : meta = self . _pages_meta . get ( page ) if not meta : src_file = os . path . join ( self . pages_dir , page ) with open ( src_file ) as f : _ , _ext = os . path . splitext ( src_file ) markup = _ext . replace ( "." , "" ) _meta , _ = frontmatter . parse ( f . read ( ) ) meta = self . default_page_meta . copy ( ) meta [ "meta" ] . update ( self . config . get ( "site.meta" , { } ) ) meta . update ( _meta ) dest_file , url = self . _get_dest_file_and_url ( page , meta ) meta [ "url" ] = url meta [ "filepath" ] = dest_file if meta . get ( "markup" ) is None : meta [ "markup" ] = markup self . _pages_meta [ page ] = meta return meta | Cache the page meta from the frontmatter and assign new keys The cache data will be used to build links or other properties |
57,431 | def _get_page_content ( self , page ) : src_file = os . path . join ( self . pages_dir , page ) with open ( src_file ) as f : _meta , content = frontmatter . parse ( f . read ( ) ) return content | Get the page content without the frontmatter |
57,432 | def _link_to ( self , page , text = None , title = None , _class = "" , id = "" , alt = "" , ** kwargs ) : anchor = "" if "#" in page : page , anchor = page . split ( "#" ) anchor = "#" + anchor meta = self . _get_page_meta ( page ) return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>" . format ( url = meta . get ( "url" , "/" ) + anchor , text = text or meta . get ( "title" ) or title , title = title or "" , _class = _class , id = id ) | Build the A HREF LINK To a page . |
57,433 | def _url_to ( self , page ) : anchor = "" if "#" in page : page , anchor = page . split ( "#" ) anchor = "#" + anchor meta = self . _get_page_meta ( page ) return meta . get ( "url" ) | Get the url of a page |
57,434 | def _get_dest_file_and_url ( self , filepath , page_meta = { } ) : filename = filepath . split ( "/" ) [ - 1 ] filepath_base = filepath . replace ( filename , "" ) . rstrip ( "/" ) slug = page_meta . get ( "slug" ) fname = slugify ( slug ) if slug else filename . replace ( ".html" , "" ) . replace ( ".md" , "" ) . replace ( ".jade" , "" ) if page_meta . get ( "pretty_url" ) is False : dest_file = os . path . join ( filepath_base , "%s.html" % fname ) else : dest_dir = filepath_base if filename not in [ "index.html" , "index.md" , "index.jade" ] : dest_dir = os . path . join ( filepath_base , fname ) dest_file = os . path . join ( dest_dir , "index.html" ) url = "/" + dest_file . replace ( "index.html" , "" ) return dest_file , url | Return tuple of the file destination and url |
57,435 | def build_static ( self ) : if not os . path . isdir ( self . build_static_dir ) : os . makedirs ( self . build_static_dir ) copy_tree ( self . static_dir , self . build_static_dir ) if self . webassets_cmd : self . webassets_cmd . build ( ) | Build static files |
57,436 | def build_pages ( self ) : for root , _ , files in os . walk ( self . pages_dir ) : base_dir = root . replace ( self . pages_dir , "" ) . lstrip ( "/" ) if not base_dir . startswith ( "_" ) : for f in files : src_file = os . path . join ( base_dir , f ) self . _build_page ( src_file ) | Iterate over the pages_dir and build the pages |
57,437 | def publish ( self , target = "S3" , sitename = None , purge_files = True ) : self . build ( ) endpoint = self . config . get ( "hosting.%s" % target ) if target . upper ( ) == "S3" : p = publisher . S3Website ( sitename = sitename or self . config . get ( "sitename" ) , aws_access_key_id = endpoint . get ( "aws_access_key_id" ) , aws_secret_access_key = endpoint . get ( "aws_secret_access_key" ) , region = endpoint . get ( "aws_region" ) ) if not p . website_exists : if p . create_website ( ) is True : time . sleep ( 10 ) p . create_www_website ( ) p . create_manifest_from_s3_files ( ) if purge_files : exclude_files = endpoint . get ( "purge_exclude_files" , [ ] ) p . purge_files ( exclude_files = exclude_files ) p . upload ( self . build_dir ) return p . website_endpoint_url | To publish programatically |
57,438 | def sort_matches ( matches ) : multipliers = { 'exact' : 10 ** 5 , 'fname' : 10 ** 4 , 'fuzzy' : 10 ** 2 , 'fuzzy_fragment' : 1 } matches = [ ( multipliers [ x . type ] * ( x . amount if x . amount else 1 ) , x ) for x in matches ] return [ x [ 1 ] for x in sorted ( matches , reverse = True ) ] | Sorts a list of matches best to worst |
57,439 | def all ( self ) : response = requests . get ( self . _url , ** self . _default_request_kwargs ) data = self . _get_response_data ( response ) return self . _concrete_instance_list ( data ) | Get all ObjectRocket instances the current client has access to . |
57,440 | def create ( self , name , plan , zone , service_type = 'mongodb' , instance_type = 'mongodb_sharded' , version = '2.4.6' ) : url = self . _url request_data = { 'name' : name , 'service' : service_type , 'plan' : plan , 'type' : instance_type , 'version' : version , 'zone' : zone } response = requests . post ( url , data = json . dumps ( request_data ) , ** self . _default_request_kwargs ) if response . status_code == 200 : logger . info ( 'Successfully created a new instance with: {}' . format ( request_data ) ) else : logger . info ( 'Failed to create instance with: {}' . format ( request_data ) ) logger . info ( 'Response: [{0}] {1}' . format ( response . status_code , response . content ) ) data = self . _get_response_data ( response ) return self . _concrete_instance ( data ) | Create an ObjectRocket instance . |
57,441 | def get ( self , instance_name ) : url = self . _url + instance_name + '/' response = requests . get ( url , ** self . _default_request_kwargs ) data = self . _get_response_data ( response ) return self . _concrete_instance ( data ) | Get an ObjectRocket instance by name . |
57,442 | def _concrete_instance ( self , instance_doc ) : if not isinstance ( instance_doc , dict ) : return None try : service = instance_doc [ 'service' ] cls = self . _service_class_map [ service ] return cls ( instance_document = instance_doc , instances = self ) except Exception as ex : logger . exception ( ex ) logger . error ( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}' . format ( instance_doc ) ) return None | Concretize an instance document . |
57,443 | def _concrete_instance_list ( self , instance_docs ) : if not instance_docs : return [ ] return list ( filter ( None , [ self . _concrete_instance ( instance_doc = doc ) for doc in instance_docs ] ) ) | Concretize a list of instance documents . |
57,444 | def get ( self , name ) : path = self . _get_cluster_storage_path ( name ) try : with open ( path , 'r' ) as storage : cluster = self . load ( storage ) for node in sum ( cluster . nodes . values ( ) , [ ] ) : if not hasattr ( node , 'ips' ) : log . debug ( "Monkey patching old version of `Node` class: %s" , node . name ) node . ips = [ node . ip_public , node . ip_private ] node . preferred_ip = None cluster . storage_file = path return cluster except IOError as ex : raise ClusterNotFound ( "Error accessing storage file %s: %s" % ( path , ex ) ) | Retrieves the cluster with the given name . |
57,445 | def save_or_update ( self , cluster ) : if not os . path . exists ( self . storage_path ) : os . makedirs ( self . storage_path ) path = self . _get_cluster_storage_path ( cluster . name ) cluster . storage_file = path with open ( path , 'wb' ) as storage : self . dump ( cluster , storage ) | Save or update the cluster to persistent state . |
57,446 | def _format ( color , style = '' ) : _color = QColor ( ) _color . setNamedColor ( color ) _format = QTextCharFormat ( ) _format . setForeground ( _color ) if 'bold' in style : _format . setFontWeight ( QFont . Bold ) if 'italic' in style : _format . setFontItalic ( True ) return _format | Return a QTextCharFormat with the given attributes . |
57,447 | def highlightBlock ( self , text ) : for expression , nth , format in self . rules : index = expression . indexIn ( text , 0 ) while index >= 0 : index = expression . pos ( nth ) length = len ( expression . cap ( nth ) ) self . setFormat ( index , length , format ) index = expression . indexIn ( text , index + length ) self . setCurrentBlockState ( 0 ) in_multiline = self . match_multiline ( text , * self . tri_single ) if not in_multiline : in_multiline = self . match_multiline ( text , * self . tri_double ) | Apply syntax highlighting to the given block of text . |
57,448 | def match_multiline ( self , text , delimiter , in_state , style ) : if self . previousBlockState ( ) == in_state : start = 0 add = 0 else : start = delimiter . indexIn ( text ) add = delimiter . matchedLength ( ) while start >= 0 : end = delimiter . indexIn ( text , start + add ) if end >= add : length = end - start + add + delimiter . matchedLength ( ) self . setCurrentBlockState ( 0 ) else : self . setCurrentBlockState ( in_state ) length = len ( text ) - start + add self . setFormat ( start , length , style ) start = delimiter . indexIn ( text , start + length ) if self . currentBlockState ( ) == in_state : return True else : return False | Do highlighting of multi - line strings . delimiter should be a QRegExp for triple - single - quotes or triple - double - quotes and in_state should be a unique integer to represent the corresponding state changes when inside those strings . Returns True if we re still inside a multi - line string when this function is finished . |
57,449 | def config ( name = 'SEARCH_URL' , default = 'simple://' ) : config = { } s = env ( name , default ) if s : config = parse_search_url ( s ) return config | Returns configured SEARCH dictionary from SEARCH_URL |
57,450 | def wash_urlargd ( form , content ) : result = { } for k , ( dst_type , default ) in content . items ( ) : try : value = form [ k ] except KeyError : result [ k ] = default continue src_type = type ( value ) if src_type in ( list , tuple ) : if dst_type is list : result [ k ] = [ x for x in value ] continue if dst_type is tuple : result [ k ] = tuple ( [ x for x in value ] ) continue value = value [ 0 ] if isinstance ( dst_type , types . FunctionType ) : result [ k ] = dst_type ( value ) continue if isinstance ( value , dst_type ) : result [ k ] = value continue if dst_type in ( int , float , long , bool ) : try : result [ k ] = dst_type ( value ) except : result [ k ] = default elif dst_type is tuple : result [ k ] = ( value , ) elif dst_type is list : result [ k ] = [ value ] else : raise ValueError ( 'cannot cast form value %s of type %r into type %r' % ( value , src_type , dst_type ) ) return result | Wash the complete form based on the specification in content . |
57,451 | def wash_html_id ( dirty ) : import re if not dirty [ 0 ] . isalpha ( ) : dirty = 'i' + dirty non_word = re . compile ( r'[^\w]+' ) return non_word . sub ( '' , dirty ) | Strip non - alphabetic or newline characters from a given string . |
57,452 | def quote ( self , text = None ) : text = text or re . sub ( r'\[quote=.+?\[/quote\]' , '' , self . text , flags = re . DOTALL ) . strip ( '\n' ) return f'[quote={self.author.id};{self.id}]{text}[/quote]' | Quote this post . |
57,453 | async def factbook ( self , root ) : return html . unescape ( html . unescape ( root . find ( 'FACTBOOK' ) . text ) ) | Region s World Factbook Entry . |
57,454 | async def delegate ( self , root ) : nation = root . find ( 'DELEGATE' ) . text if nation == '0' : return None return aionationstates . Nation ( nation ) | Regional World Assembly Delegate . |
57,455 | async def founder ( self , root ) : nation = root . find ( 'FOUNDER' ) . text if nation == '0' : return None return aionationstates . Nation ( nation ) | Regional Founder . Returned even if the nation has ceased to exist . |
57,456 | async def officers ( self , root ) : officers = sorted ( root . find ( 'OFFICERS' ) , key = lambda elem : int ( elem . find ( 'ORDER' ) . text ) ) return [ Officer ( elem ) for elem in officers ] | Regional Officers . Does not include the Founder or the Delegate unless they have additional titles as Officers . |
57,457 | async def messages ( self ) : oldest_id_seen = float ( 'inf' ) for offset in count ( step = 100 ) : posts_bunch = await self . _get_messages ( offset = offset ) for post in reversed ( posts_bunch ) : if post . id < oldest_id_seen : yield post oldest_id_seen = posts_bunch [ 0 ] . id if len ( posts_bunch ) < 100 : break | Iterate through RMB posts from newest to oldest . |
57,458 | def filter_empty ( values , default = None ) : if values is None : return default elif hasattr ( values , '__len__' ) and len ( values ) == 0 : return default elif hasattr ( values , '__iter__' ) and not isinstance ( values , _filtered_types ) : filtered = type ( values ) if isinstance ( values , _filter_types ) else list values = filtered ( v for v in values if not ( v is None or ( hasattr ( v , '__len__' ) and len ( v ) == 0 ) ) ) return default if len ( values ) == 0 else values return values | Eliminates None or empty items from lists tuples or sets passed in . If values is None or empty after filtering the default is returned . |
57,459 | def unit ( self , unit ) : allowed_values = [ "cm" , "inch" , "foot" ] if unit is not None and unit not in allowed_values : raise ValueError ( "Invalid value for `unit` ({0}), must be one of {1}" . format ( unit , allowed_values ) ) self . _unit = unit | Sets the unit of this Dimensions . |
57,460 | def map_names ( lang = "en" ) : cache_name = "map_names.%s.json" % lang data = get_cached ( "map_names.json" , cache_name , params = dict ( lang = lang ) ) return dict ( [ ( item [ "id" ] , item [ "name" ] ) for item in data ] ) | This resource returns an dictionary of the localized map names for the specified language . Only maps with events are listed - if you need a list of all maps use maps . json instead . |
57,461 | def maps ( map_id = None , lang = "en" ) : if map_id : cache_name = "maps.%s.%s.json" % ( map_id , lang ) params = { "map_id" : map_id , "lang" : lang } else : cache_name = "maps.%s.json" % lang params = { "lang" : lang } data = get_cached ( "maps.json" , cache_name , params = params ) . get ( "maps" ) return data . get ( str ( map_id ) ) if map_id else data | This resource returns details about maps in the game including details about floor and translation data on how to translate between world coordinates and map coordinates . |
57,462 | def map_floor ( continent_id , floor , lang = "en" ) : cache_name = "map_floor.%s-%s.%s.json" % ( continent_id , floor , lang ) params = { "continent_id" : continent_id , "floor" : floor , "lang" : lang } return get_cached ( "map_floor.json" , cache_name , params = params ) | This resource returns details about a map floor used to populate a world map . All coordinates are map coordinates . |
57,463 | def get_value_based_inclusive_interval ( cls , field , max_value = None ) : if field . max_value is None : field . max_value = max_value or MAX_LENGTH if field . min_value is None : field . min_value = 0 Interval = namedtuple ( 'interval' , [ 'start' , 'stop' ] ) return Interval ( start = field . min_value , stop = field . max_value ) | This is applicable to fields with max_value and min_value as validators . |
57,464 | def _async_recv ( self ) : logging . info ( "Receive loop started" ) recbuffer = b"" while not self . _stop_event . is_set ( ) : time . sleep ( 0.01 ) try : recbuffer = recbuffer + self . _socket . recv ( 1024 ) data = recbuffer . split ( b'\r\n' ) recbuffer = data . pop ( ) if data : for line in data : self . _process_data ( line . decode ( encoding = 'UTF-8' , errors = 'ignore' ) ) except BlockingIOError as e : pass logging . info ( "Receive loop stopped" ) | No raw bytes should escape from this all byte encoding and decoding should be handling inside this function |
57,465 | def main ( arguments = None ) : su = tools ( arguments = arguments , docString = __doc__ , logLevel = "WARNING" , options_first = False , projectName = "fundmentals" ) arguments , settings , log , dbConn = su . setup ( ) for arg , val in arguments . iteritems ( ) : if arg [ 0 ] == "-" : varname = arg . replace ( "-" , "" ) + "Flag" else : varname = arg . replace ( "<" , "" ) . replace ( ">" , "" ) if isinstance ( val , str ) or isinstance ( val , unicode ) : exec ( varname + " = '%s'" % ( val , ) ) else : exec ( varname + " = %s" % ( val , ) ) if arg == "--dbConn" : dbConn = val log . debug ( '%s = %s' % ( varname , val , ) ) if successFlag and successFlag . lower ( ) == "none" : successFlag = None if failureFlag and failureFlag . lower ( ) == "none" : failureFlag = None directory_script_runner ( log = log , pathToScriptDirectory = pathToDirectory , databaseName = databaseName , loginPath = loginPath , successRule = successFlag , failureRule = failureFlag ) return | The main function used when directory_script_runner . py is run as a single script from the cl or when installed as a cl command |
57,466 | def weight_unit ( self , weight_unit ) : allowed_values = [ "pound" , "kilogram" ] if weight_unit is not None and weight_unit not in allowed_values : raise ValueError ( "Invalid value for `weight_unit` ({0}), must be one of {1}" . format ( weight_unit , allowed_values ) ) self . _weight_unit = weight_unit | Sets the weight_unit of this MeasurementSettings . |
57,467 | def dimensions_unit ( self , dimensions_unit ) : allowed_values = [ "inch" , "cm" , "foot" , "meter" ] if dimensions_unit is not None and dimensions_unit not in allowed_values : raise ValueError ( "Invalid value for `dimensions_unit` ({0}), must be one of {1}" . format ( dimensions_unit , allowed_values ) ) self . _dimensions_unit = dimensions_unit | Sets the dimensions_unit of this MeasurementSettings . |
57,468 | def already_resolved ( self , pattern : QueryTriple ) -> bool : if self . sparql_locked or pattern == ( None , None , None ) : return True for resolved_node in self . resolved_nodes : if resolved_node != ( None , None , None ) and ( pattern [ 0 ] == resolved_node [ 0 ] or resolved_node [ 0 ] is None ) and ( pattern [ 1 ] == resolved_node [ 1 ] or resolved_node [ 1 ] is None ) and ( pattern [ 2 ] == resolved_node [ 2 ] or resolved_node [ 2 ] is None ) : return True return False | Determine whether pattern has already been loaded into the cache . |
57,469 | def fix_base ( fix_environ ) : def _is_android ( ) : import os vm_path = os . sep + "system" + os . sep + "bin" + os . sep + "dalvikvm" if os . path . exists ( vm_path ) or os . path . exists ( os . sep + "system" + vm_path ) : return True try : import android del android return True except ImportError : pass return False def _fix_android_environ ( ) : import os if "LD_LIBRARY_PATH" not in os . environ : os . environ [ "LD_LIBRARY_PATH" ] = "" lib_path = os . pathsep + "/system/lib" + os . pathsep + "/vendor/lib" if sys . python_bits == 64 : lib_path = os . pathsep + "/system/lib64" + os . pathsep + "/vendor/lib64" + lib_path os . environ [ "LD_LIBRARY_PATH" ] += lib_path if sys . platform . startswith ( "linux" ) and sys . platform != "linux-android" : if _is_android ( ) : sys . platform = "linux-android" elif "-" not in sys . platform : sys . platform = "linux" sys . platform_codename = sys . platform if sys . platform_codename == "win32" : sys . platform_codename = "win" elif sys . platform_codename == "linux-android" : sys . platform_codename = "android" if 'maxsize' in sys . __dict__ : if sys . maxsize > 2 ** 32 : sys . python_bits = 64 else : sys . python_bits = 32 else : import struct sys . python_bits = 8 * struct . calcsize ( "P" ) if sys . python_bits == 32 : sys . maxsize = 2147483647 else : sys . maxsize = int ( "9223372036854775807" ) if fix_environ and sys . platform == "linux-android" : _fix_android_environ ( ) | Activate the base compatibility . |
57,470 | def fix_subprocess ( override_debug = False , override_exception = False ) : import subprocess if subprocess . __dict__ . get ( "SubprocessError" ) is None : subprocess . SubprocessError = _Internal . SubprocessError if _InternalReferences . UsedCalledProcessError is None : if "CalledProcessError" in subprocess . __dict__ : _subprocess_called_process_error ( True , subprocess ) else : _subprocess_called_process_error ( False , subprocess ) subprocess . CalledProcessError = _InternalReferences . UsedCalledProcessError def _check_output ( * args , ** kwargs ) : if "stdout" in kwargs : raise ValueError ( "stdout argument not allowed, " "it will be overridden." ) process = subprocess . Popen ( stdout = subprocess . PIPE , * args , ** kwargs ) stdout_data , __ = process . communicate ( ) ret_code = process . poll ( ) if ret_code is None : raise RuntimeWarning ( "The process is not yet terminated." ) if ret_code : cmd = kwargs . get ( "args" ) if cmd is None : cmd = args [ 0 ] raise _InternalReferences . UsedCalledProcessError ( returncode = ret_code , cmd = cmd , output = stdout_data ) return stdout_data try : subprocess . check_output except AttributeError : subprocess . check_output = _check_output | Activate the subprocess compatibility . |
57,471 | def fix_all ( override_debug = False , override_all = False ) : fix_base ( True ) fix_builtins ( override_debug ) fix_subprocess ( override_debug , override_all ) return True | Activate the full compatibility . |
57,472 | def smart_scrub ( df , col_name , error_rate = 0 ) : scrubf = smart_scrubf ( df , col_name , error_rate ) scrubb = smart_scrubb ( df , col_name , error_rate ) return ( scrubf , scrubb ) | Scrubs from the front and back of an object column in a DataFrame until the scrub would semantically alter the contents of the column . If only a subset of the elements in the column are scrubbed then a boolean array indicating which elements have been scrubbed is appended to the dataframe . Returns a tuple of the strings removed from the front and back of the elements df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number default 0 The maximum amount of values this function can ignore while scrubbing expressed as a fraction of the total amount of rows in the dataframe . |
57,473 | def smart_scrubf ( df , col_name , error_rate = 0 ) : scrubbed = "" while True : valcounts = df [ col_name ] . str [ : len ( scrubbed ) + 1 ] . value_counts ( ) if not len ( valcounts ) : break if not valcounts [ 0 ] >= ( 1 - error_rate ) * _utils . rows ( df ) : break scrubbed = valcounts . index [ 0 ] if scrubbed == '' : return None which = df [ col_name ] . str . startswith ( scrubbed ) _basics . col_scrubf ( df , col_name , which , len ( scrubbed ) , True ) if not which . all ( ) : new_col_name = _basics . colname_gen ( df , "{}_sf-{}" . format ( col_name , scrubbed ) ) df [ new_col_name ] = which return scrubbed | Scrubs from the front of an object column in a DataFrame until the scrub would semantically alter the contents of the column . If only a subset of the elements in the column are scrubbed then a boolean array indicating which elements have been scrubbed is appended to the dataframe . Returns the string that was scrubbed df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number default 0 The maximum amount of values this function can ignore while scrubbing expressed as a fraction of the total amount of rows in the dataframe . |
57,474 | def smart_scrubb ( df , col_name , error_rate = 0 ) : scrubbed = "" while True : valcounts = df [ col_name ] . str [ - len ( scrubbed ) - 1 : ] . value_counts ( ) if not len ( valcounts ) : break if not valcounts [ 0 ] >= ( 1 - error_rate ) * _utils . rows ( df ) : break scrubbed = valcounts . index [ 0 ] if scrubbed == '' : return None which = df [ col_name ] . str . endswith ( scrubbed ) _basics . col_scrubb ( df , col_name , which , len ( scrubbed ) , True ) if not which . all ( ) : new_col_name = _basics . colname_gen ( df , "{}_sb-{}" . format ( col_name , scrubbed ) ) df [ new_col_name ] = which return scrubbed | Scrubs from the back of an object column in a DataFrame until the scrub would semantically alter the contents of the column . If only a subset of the elements in the column are scrubbed then a boolean array indicating which elements have been scrubbed is appended to the dataframe . Returns the string that was scrubbed . df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number default 0 The maximum amount of values this function can ignore while scrubbing expressed as a fraction of the total amount of rows in the dataframe . |
57,475 | def find_all_for_order ( cls , order_id , ** kwargs ) : kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async' ) : return cls . _find_all_for_order_with_http_info ( order_id , ** kwargs ) else : ( data ) = cls . _find_all_for_order_with_http_info ( order_id , ** kwargs ) return data | Find shipping methods for order . |
57,476 | def extract ( cls , obj ) : span = cls . extract_span ( obj ) if span : return span . context | Extract span context from the given object |
57,477 | def fix_additional_fields ( data ) : result = dict ( ) for key , value in data . items ( ) : if isinstance ( value , dict ) : result . update ( KserSpan . to_flat_dict ( key , value ) ) else : result [ key ] = value return result | description of fix_additional_fields |
57,478 | def filter_keys ( cls , data ) : keys = list ( data . keys ( ) ) for pattern in cls . EXCLUDE_PATTERNS : for key in keys : if re . match ( pattern , key ) : keys . remove ( key ) return dict ( filter ( lambda x : x [ 0 ] in keys , data . items ( ) ) ) | Filter GELF record keys using exclude_patterns |
57,479 | def write_config ( self ) : json . dump ( self . config , open ( CONFIG_FILE , 'w' ) , indent = 4 , separators = ( ',' , ': ' ) ) return True | Write the configuration to a local file . |
57,480 | def load_config ( self , ** kwargs ) : virgin_config = False if not os . path . exists ( CONFIG_PATH ) : virgin_config = True os . makedirs ( CONFIG_PATH ) if not os . path . exists ( CONFIG_FILE ) : virgin_config = True if not virgin_config : self . config = json . load ( open ( CONFIG_FILE ) ) else : self . logger . info ( '[!] Processing whitelists, this may take a few minutes...' ) process_whitelists ( ) if kwargs : self . config . update ( kwargs ) if virgin_config or kwargs : self . write_config ( ) if 'api_key' not in self . config : sys . stderr . write ( 'configuration missing API key\n' ) if 'email' not in self . config : sys . stderr . write ( 'configuration missing email\n' ) if not ( 'api_key' in self . config and 'email' in self . config ) : sys . stderr . write ( 'Errors have been reported. Run blockade-cfg ' 'to fix these warnings.\n' ) try : last_update = datetime . strptime ( self . config [ 'whitelist_date' ] , "%Y-%m-%d" ) current = datetime . now ( ) delta = ( current - last_update ) . days if delta > 14 : self . logger . info ( '[!] Refreshing whitelists, this may take a few minutes...' ) process_whitelists ( ) self . config [ 'whitelist_date' ] = datetime . now ( ) . strftime ( "%Y-%m-%d" ) self . write_config ( ) except Exception as e : self . logger . error ( str ( e ) ) self . logger . info ( '[!] Processing whitelists, this may take a few minutes...' ) process_whitelists ( ) self . config [ 'whitelist_date' ] = datetime . now ( ) . strftime ( "%Y-%m-%d" ) self . write_config ( ) return True | Load the configuration for the user or seed it with defaults . |
57,481 | def chi2comb_cdf ( q , chi2s , gcoef , lim = 1000 , atol = 1e-4 ) : r int_type = "i" if array ( int_type , [ 0 ] ) . itemsize != ffi . sizeof ( "int" ) : int_type = "l" if array ( int_type , [ 0 ] ) . itemsize != ffi . sizeof ( "int" ) : raise RuntimeError ( "Could not infer a proper integer representation." ) if array ( "d" , [ 0.0 ] ) . itemsize != ffi . sizeof ( "double" ) : raise RuntimeError ( "Could not infer a proper double representation." ) q = float ( q ) c_chi2s = ffi . new ( "struct chi2comb_chisquareds *" ) c_info = ffi . new ( "struct chi2comb_info *" ) ncents = array ( "d" , [ float ( i . ncent ) for i in chi2s ] ) coefs = array ( "d" , [ float ( i . coef ) for i in chi2s ] ) dofs = array ( int_type , [ int ( i . dof ) for i in chi2s ] ) c_chi2s . ncents = ffi . cast ( "double *" , ncents . buffer_info ( ) [ 0 ] ) c_chi2s . coefs = ffi . cast ( "double *" , coefs . buffer_info ( ) [ 0 ] ) c_chi2s . dofs = ffi . cast ( "int *" , dofs . buffer_info ( ) [ 0 ] ) c_chi2s . n = len ( chi2s ) result = ffi . new ( "double *" ) errno = c_chi2comb_cdf ( q , c_chi2s , gcoef , lim , atol , c_info , result ) info = Info ( ) methods = [ "emag" , "niterms" , "nints" , "intv" , "truc" , "sd" , "ncycles" ] for i in methods : setattr ( info , i , getattr ( c_info , i ) ) return ( result [ 0 ] , errno , info ) | r Function distribution of combination of chi - squared distributions . |
57,482 | def get_feature_model ( ) : try : return apps . get_model ( flipper_settings . FEATURE_FLIPPER_MODEL ) except ValueError : raise ImproperlyConfigured ( "FEATURE_FLIPPER_MODEL must be of the form 'app_label.model_name'" ) except LookupError : raise ImproperlyConfigured ( "FEATURE_FLIPPER_MODEL refers to model '{}' that has not been" " installed" . format ( flipper_settings . FEATURE_FLIPPER_MODEL ) ) | Return the FeatureFlipper model defined in settings . py |
57,483 | def show_feature ( self , user , feature ) : user_filter = { self . model . USER_FEATURE_FIELD : user , } return self . get_feature ( feature ) . filter ( models . Q ( ** user_filter ) | models . Q ( everyone = True ) ) . exists ( ) | Return True or False for the given feature . |
57,484 | def calc_z ( bands , filling , interaction , hund_cu , name ) : while True : try : data = np . load ( name + '.npz' ) break except IOError : dopout = [ ] for dop in filling : slsp = Spinon ( slaves = 2 * bands , orbitals = bands , hopping = [ 0.5 ] * 2 * bands , populations = [ dop ] * 2 * bands ) dopout . append ( solve_loop ( slsp , interaction , hund_cu ) [ 0 ] [ 0 ] ) np . savez ( name , zeta = dopout , u_int = interaction , doping = filling , hund = hund_cu ) return data | Calculates the quasiparticle weight of degenerate system of N - bands at a given filling within an interaction range and saves the file |
57,485 | def label_saves ( name ) : plt . legend ( loc = 0 ) plt . ylim ( [ 0 , 1.025 ] ) plt . xlabel ( '$U/D$' , fontsize = 20 ) plt . ylabel ( '$Z$' , fontsize = 20 ) plt . savefig ( name , dpi = 300 , format = 'png' , transparent = False , bbox_inches = 'tight' , pad_inches = 0.05 ) | Labels plots and saves file |
57,486 | def plot_curves_z ( data , name , title = None ) : plt . figure ( ) for zet , c in zip ( data [ 'zeta' ] , data [ 'doping' ] ) : plt . plot ( data [ 'u_int' ] , zet [ : , 0 ] , label = '$n={}$' . format ( str ( c ) ) ) if title != None : plt . title ( title ) label_saves ( name + '.png' ) | Generates a simple plot of the quasiparticle weight decay curves given data object with doping setup |
57,487 | def pick_flat_z ( data ) : zmes = [ ] for i in data [ 'zeta' ] : zmes . append ( i [ : , 0 ] ) return np . asarray ( zmes ) | Generate a 2D array of the quasiparticle weight by only selecting the first particle data |
57,488 | def imshow_z ( data , name ) : zmes = pick_flat_z ( data ) plt . figure ( ) plt . imshow ( zmes . T , origin = 'lower' , extent = [ data [ 'doping' ] . min ( ) , data [ 'doping' ] . max ( ) , 0 , data [ 'u_int' ] . max ( ) ] , aspect = .16 ) plt . colorbar ( ) plt . xlabel ( '$n$' , fontsize = 20 ) plt . ylabel ( '$U/D$' , fontsize = 20 ) plt . savefig ( name + '_imshow.png' , dpi = 300 , format = 'png' , transparent = False , bbox_inches = 'tight' , pad_inches = 0.05 ) | 2D color plot of the quasiparticle weight as a function of interaction and doping |
57,489 | def plot_mean_field_conv ( N = 1 , n = 0.5 , Uspan = np . arange ( 0 , 3.6 , 0.5 ) ) : sl = Spinon ( slaves = 2 * N , orbitals = N , avg_particles = 2 * n , hopping = [ 0.5 ] * 2 * N , orbital_e = [ 0 ] * 2 * N ) hlog = solve_loop ( sl , Uspan , [ 0. ] ) [ 1 ] f , ( ax1 , ax2 ) = plt . subplots ( 2 , sharex = True ) for field in hlog : field = np . asarray ( field ) ax1 . semilogy ( abs ( field [ 1 : ] - field [ : - 1 ] ) ) ax2 . plot ( field ) plt . title ( 'Convergence of selfconsintent mean field' ) ax1 . set_ylabel ( '$\\Delta h$' ) ax2 . set_ylabel ( 'mean field $h$' ) plt . xlabel ( 'iterations' ) return hlog | Generates the plot on the convergenge of the mean field in single site spin hamiltonian under with N degenerate half - filled orbitals |
57,490 | def to_dict ( self ) : result = { } for attr , _ in six . iteritems ( self . swagger_types ) : value = getattr ( self , attr ) if isinstance ( value , list ) : result [ attr ] = list ( map ( lambda x : x . to_dict ( ) if hasattr ( x , "to_dict" ) else x , value ) ) elif hasattr ( value , "to_dict" ) : result [ attr ] = value . to_dict ( ) elif isinstance ( value , dict ) : result [ attr ] = dict ( map ( lambda item : ( item [ 0 ] , item [ 1 ] . to_dict ( ) ) if hasattr ( item [ 1 ] , "to_dict" ) else item , value . items ( ) ) ) else : result [ attr ] = value if issubclass ( Payment , dict ) : for key , value in self . items ( ) : result [ key ] = value return result | Returns the model properties as a dict |
57,491 | def from_config ( cls ) : config = Config ( ) client = cls ( email = config . get ( 'email' ) , api_key = config . get ( 'api_key' ) , server = config . get ( 'api_server' ) , http_proxy = config . get ( 'http_proxy' ) , https_proxy = config . get ( 'https_proxy' ) , ) return client | Method to return back a loaded instance . |
57,492 | def set_debug ( self , status ) : if status : self . logger . setLevel ( 'DEBUG' ) else : self . logger . setLevel ( 'INFO' ) | Control the logging state . |
57,493 | def _endpoint ( self , endpoint , action , * url_args ) : args = ( self . api_base , endpoint , action ) if action == '' : args = ( self . api_base , endpoint ) api_url = "/" . join ( args ) if url_args : if len ( url_args ) == 1 : api_url += "/" + url_args [ 0 ] else : api_url += "/" . join ( url_args ) return api_url | Return the URL for the action . |
57,494 | def _json ( self , response ) : if response . status_code == 204 : return None try : return response . json ( ) except ValueError as e : raise ValueError ( 'Exception: %s\n' 'request: %s, response code: %s, response: %s' % ( str ( e ) , response . request . url , response . status_code , response . content , ) ) | JSON response from server . |
57,495 | def _get ( self , endpoint , action , * url_args , ** url_params ) : api_url = self . _endpoint ( endpoint , action , * url_args ) kwargs = { 'headers' : self . headers , 'params' : url_params , 'timeout' : Client . TIMEOUT , 'verify' : self . verify } if self . proxies : kwargs [ 'proxies' ] = self . proxies self . logger . debug ( "Requesting: %s, %s" % ( api_url , str ( kwargs ) ) ) response = requests . get ( api_url , ** kwargs ) return self . _json ( response ) | Request API Endpoint - for GET methods . |
57,496 | def _send_data ( self , method , endpoint , action , data , * url_args , ** url_params ) : api_url = self . _endpoint ( endpoint , action , * url_args ) data . update ( { 'email' : self . email , 'api_key' : self . api_key } ) data = json . dumps ( data ) kwargs = { 'headers' : self . headers , 'params' : url_params , 'verify' : self . verify , 'data' : data } if self . proxies : kwargs [ 'proxies' ] = self . proxies self . logger . debug ( "Requesting: %s %s, %s" % ( method , api_url , str ( kwargs ) ) ) response = requests . request ( method , api_url , ** kwargs ) self . logger . debug ( "Response: %d, %s" % ( response . status_code , response . content ) ) return self . _json ( response ) | Submit to API Endpoint - for DELETE PUT POST methods . |
57,497 | def process_events ( events , source_ip ) : s3 = boto3 . resource ( 's3' ) table = boto3 . resource ( "dynamodb" ) . Table ( os . environ [ 'database' ] ) with table . batch_writer ( ) as batch : for idx , event in enumerate ( events ) : event = convert_keys_to_string ( event ) event [ 'sourceIp' ] = source_ip event [ 'event' ] = hashlib . sha256 ( str ( event ) ) . hexdigest ( ) metadata = event [ 'metadata' ] timestamp = str ( event [ 'metadata' ] [ 'timeStamp' ] ) event [ 'metadata' ] [ 'timeStamp' ] = timestamp kwargs = { 'match' : event [ 'indicatorMatch' ] , 'type' : metadata [ 'type' ] , 'method' : metadata [ 'method' ] . lower ( ) , 'time' : event [ 'analysisTime' ] , 'ip' : source_ip } file_struct = '{match}_{type}_{method}_{ip}_{time}.json' file_name = file_struct . format ( ** kwargs ) key_path = '/tmp/%s' % file_name output = json . dumps ( event , indent = 4 , sort_keys = True ) open ( key_path , "w" ) . write ( output ) data = open ( key_path , 'rb' ) s3 . Bucket ( os . environ [ 's3_bucket' ] ) . put_object ( Key = file_name , Body = data ) logger . info ( "EVENT: %s" % str ( event ) ) batch . put_item ( Item = event ) return True | Process all the events for logging and S3 . |
57,498 | def cost_type ( self , cost_type ) : allowed_values = [ "orderSubtotal" , "weight" ] if cost_type is not None and cost_type not in allowed_values : raise ValueError ( "Invalid value for `cost_type` ({0}), must be one of {1}" . format ( cost_type , allowed_values ) ) self . _cost_type = cost_type | Sets the cost_type of this TableRateShipping . |
57,499 | def get_source ( path ) : for line in read ( path ) : if 'import' in line or len ( line . strip ( ) ) == 0 or line . startswith ( '#' ) : continue if '__name__' in line and '__main__' in line : break else : yield line | yields all non - empty lines in a file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.