idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
58,100 | def execute ( self , mold_id , data , wrapper_tag = 'div' ) : template = self . load_mold ( mold_id ) kwargs = { } kwargs . update ( data ) kwargs [ '_nunja_data_' ] = 'data-nunja="%s"' % mold_id kwargs [ '_template_' ] = template kwargs [ '_wrapper_tag_' ] = wrapper_tag return self . _core_template_ . render ( ** kwargs ) | Execute a mold mold_id by rendering through env . |
58,101 | def render ( self , mold_id , data ) : template = self . load_mold ( mold_id ) return template . render ( ** data ) | Render a mold mold_id . No wrappers are applied as only the default template defined for the mold is rendered . |
58,102 | def _get_model_table ( self , part ) : rows = self . parser . find ( part ) . find_children ( 'tr' ) . list_results ( ) table = [ ] for row in rows : table . append ( self . _get_model_row ( self . parser . find ( row ) . find_children ( 'td,th' ) . list_results ( ) ) ) return self . _get_valid_model_table ( table ) | Returns a list that represents the table . |
58,103 | def _get_valid_model_table ( self , ros ) : new_table = [ ] if bool ( ros ) : length_table = len ( ros ) for row_index in range ( 0 , length_table ) : cells_added = 0 original_row = [ ] + ros [ row_index ] if len ( new_table ) <= row_index : new_table . append ( [ ] ) length_row = len ( original_row ) for cell_index in range ( 0 , length_row ) : cell = original_row [ cell_index ] new_cell_index = cell_index + cells_added new_row = new_table [ row_index ] while True : if len ( new_row ) <= new_cell_index : new_row . append ( None ) break elif new_row [ new_cell_index ] is None : break else : cells_added += 1 new_cell_index = cell_index + cells_added new_row [ new_cell_index ] = cell if cell . has_attribute ( 'rowspan' ) : rowspan = int ( cell . get_attribute ( 'rowspan' ) ) if rowspan > 1 : for rowspan_index in range ( 1 , rowspan ) : new_row_index = row_index + rowspan_index if len ( new_table ) <= new_row_index : new_table . append ( [ ] ) while ( len ( new_table [ new_row_index ] ) < new_cell_index ) : new_table [ new_row_index ] . append ( None ) new_table [ new_row_index ] . append ( cell ) return new_table | Returns a list that represents the table with the rowspans . |
58,104 | def _get_model_row ( self , row ) : new_row = [ ] + row size = len ( row ) for i in range ( 0 , size ) : cell = row [ i ] if cell . has_attribute ( 'colspan' ) : colspan = int ( cell . get_attribute ( 'colspan' ) ) if colspan > 1 : for j in range ( 1 , colspan ) : new_row . insert ( i + j , cell ) return new_row | Returns a list that represents the line of table with the colspans . |
58,105 | def _validate_header ( self , hed ) : if not bool ( hed ) : return False length = - 1 for row in hed : if not bool ( row ) : return False elif length == - 1 : length = len ( row ) elif len ( row ) != length : return False return True | Validate the list that represents the table header . |
58,106 | def _get_cells_headers_ids ( self , hed , index ) : ids = [ ] for row in hed : if row [ index ] . get_tag_name ( ) == 'TH' : ids . append ( row [ index ] . get_attribute ( 'id' ) ) return ids | Returns a list with ids of rows of same column . |
58,107 | def _associate_data_cells_with_header_cells_of_row ( self , element ) : table = self . _get_model_table ( element ) for row in table : headers_ids = [ ] for cell in row : if cell . get_tag_name ( ) == 'TH' : self . id_generator . generate_id ( cell ) headers_ids . append ( cell . get_attribute ( 'id' ) ) cell . set_attribute ( 'scope' , 'row' ) if bool ( headers_ids ) : for cell in row : if cell . get_tag_name ( ) == 'TD' : headers = cell . get_attribute ( 'headers' ) for header_id in headers_ids : headers = CommonFunctions . increase_in_list ( headers , header_id ) cell . set_attribute ( 'headers' , headers ) | Associate the data cell with header cell of row . |
58,108 | def _prepare_header_cells ( self , table_header ) : cells = self . parser . find ( table_header ) . find_children ( 'tr' ) . find_children ( 'th' ) . list_results ( ) for cell in cells : self . id_generator . generate_id ( cell ) cell . set_attribute ( 'scope' , 'col' ) | Set the scope of header cells of table header . |
58,109 | def encrypt ( clear_text ) -> str : if not isinstance ( clear_text , bytes ) : clear_text = str . encode ( clear_text ) cipher = Fernet ( current_app . config [ 'KEY' ] ) return cipher . encrypt ( clear_text ) . decode ( "utf-8" ) | Use config . json key to encrypt |
58,110 | def decrypt ( crypt_text ) -> str : cipher = Fernet ( current_app . config [ 'KEY' ] ) if not isinstance ( crypt_text , bytes ) : crypt_text = str . encode ( crypt_text ) return cipher . decrypt ( crypt_text ) . decode ( "utf-8" ) | Use config . json key to decrypt |
58,111 | def get_volume ( self , id ) : if exists ( id ) : with open ( id ) as file : size = os . lseek ( file . fileno ( ) , 0 , os . SEEK_END ) return { 'path' : id , 'size' : size } return self . volume . get ( id ) | return volume information if the argument is an id or a path |
58,112 | def randomize ( self , device = None , percent = 100 , silent = False ) : volume = self . get_volume ( device ) blocks = int ( volume [ 'size' ] / BLOCK_SIZE ) num_writes = int ( blocks * percent * 0.01 ) offsets = sorted ( random . sample ( range ( blocks ) , num_writes ) ) total = 0 if not silent : print ( 'Writing urandom to %s bytes in %s' % ( volume [ 'size' ] , volume [ 'path' ] ) ) with open ( volume [ 'path' ] , 'w' ) as file : for offset in offsets : if not silent : self . dot ( ) file . seek ( offset * BLOCK_SIZE ) data = os . urandom ( 32768 ) * 128 total += len ( data ) file . write ( data ) print ( "\nWrote: %s" % total ) | Writes random data to the beginning of each 4MB block on a block device this is useful when performance testing the backup process |
58,113 | def backup ( self , id = None , src = None , timestamp = None ) : logging . basicConfig ( ) log = logger . get_logger ( ) log . logger . setLevel ( logging . DEBUG ) conf = LunrConfig . from_storage_conf ( ) timestamp = timestamp or time ( ) volume = VolumeHelper ( conf ) backup = BackupHelper ( conf ) try : snapshot = volume . create_snapshot ( src , id , timestamp ) print ( "Created snap-shot: " , pprint ( snapshot ) ) with self . timeit ( snapshot [ 'size' ] ) : print ( "Starting Backup" ) backup . save ( snapshot , id ) finally : if 'snapshot' in locals ( ) : self . _remove_volume ( snapshot [ 'path' ] ) | This runs a backup job outside of the storage api which is useful for performance testing backups |
58,114 | def get_ip ( request ) : if getsetting ( 'LOCAL_GEOLOCATION_IP' ) : return getsetting ( 'LOCAL_GEOLOCATION_IP' ) forwarded_for = request . META . get ( 'HTTP_X_FORWARDED_FOR' ) if not forwarded_for : return UNKNOWN_IP for ip in forwarded_for . split ( ',' ) : ip = ip . strip ( ) if not ip . startswith ( '10.' ) and not ip == '127.0.0.1' : return ip return UNKNOWN_IP | Return the IP address inside the HTTP_X_FORWARDED_FOR var inside the request object . |
58,115 | def get_connection ( self ) : if self . conn : return self . conn redis_configs = getsetting ( 'REDIS_CONNECTIONS' ) if redis_configs : config_name = getsetting ( 'EVENTLIB_REDIS_CONFIG_NAME' , 'default' ) config = redis_configs [ config_name ] host = config [ 'HOST' ] port = config [ 'PORT' ] self . conn = redis . StrictRedis ( host = host , port = port ) else : self . conn = None return self . conn | Return a valid redis connection based on the following settings |
58,116 | def _run_setup_py ( self , args , echo = True , echo2 = True , ff = '' ) : python = self . python if ff : setup_py = '-c"%s"' % ( RUN_SETUP % locals ( ) ) else : setup_py = 'setup.py %s' % ' ' . join ( args ) rc , lines = self . process . popen ( '"%(python)s" %(setup_py)s' % locals ( ) , echo = echo , echo2 = echo2 ) return rc , lines | Run setup . py with monkey - patched setuptools . |
58,117 | def app_factory ( global_settings , ** local_settings ) : config = Configurator ( ) config . setup_registry ( settings = local_settings , root_factory = RootFactory ( ) ) if 'configure_zcml' in local_settings : config . load_zcml ( local_settings [ 'configure_zcml' ] ) app = config . make_wsgi_app ( ) app_name = app_name_from_ini_file ( global_settings [ '__file__' ] ) ep_group = "%s.plugins" % app_name plugin_mgr = config . get_registered_utility ( IPluginManager ) plugin_mgr . load_all ( ep_group ) return app | Default factory for creating a WSGI application using the everest configurator and root factory . |
58,118 | async def fetch_page ( session , host ) : await asyncio . sleep ( random . randint ( 0 , 25 ) * 0.1 ) start = time . time ( ) logger . info ( 'Fetch from {}' . format ( host ) ) try : response = await session . get ( host , allow_redirects = False ) except aiohttp . ClientResponseError as err : results_tuple = ( host , 'no page' , err ) except aiohttp . ClientConnectorError as err : results_tuple = ( host , 'no http' , err ) except aiohttp . ServerConnectionError as err : results_tuple = ( host , 'no dev' , err ) except aiohttp . InvalidURL as err : results_tuple = ( host , 'no URL' , err ) else : try : text_response = await response . text ( ) except aiohttp . ClientPayloadError as err : results_tuple = ( host , 'no read' , err ) else : results_tuple = ( host , 'found' , text_response ) response . close ( ) logger . info ( 'Recvd from {} after {:.2f}s' . format ( host , time . time ( ) - start ) ) return results_tuple | Perform the page fetch from an individual host . |
58,119 | async def asynchronous ( urls = None , re_filter = None ) : class _URLBase ( str ) : @ property def hostname ( self ) : return urlsplit ( self ) . hostname http_devices = { } qualified_devices = [ ] connection = aiohttp . TCPConnector ( limit = 0 ) async with aiohttp . ClientSession ( connector = connection , conn_timeout = 5 , raise_for_status = True ) as session : futures = [ fetch_page ( session , url ) for url in urls ] for future in asyncio . as_completed ( futures ) : response = await future if 'found' in response [ 1 ] : http_devices [ response [ 0 ] ] = response [ 2 ] logger . debug ( 'Processed %s' , response [ 0 ] ) if re_filter . search ( response [ 2 ] ) : qualified_devices . append ( _URLBase ( response [ 0 ] ) ) return qualified_devices | Asynchronous request manager for session . Returns list of responses that match the filter . |
58,120 | def url_generator ( network = None , path = '' ) : network_object = ipaddress . ip_network ( network ) if network_object . num_addresses > 256 : logger . error ( 'Scan limited to 256 addresses, requested %d.' , network_object . num_addresses ) raise NotImplementedError elif network_object . num_addresses > 1 : network_hosts = network_object . hosts ( ) else : network_hosts = [ network_object . network_address ] return ( urlunsplit ( ( 'http' , str ( ip ) , path , '' , '' ) ) for ip in network_hosts ) | Return a tuple of URLs with path one for each host on network |
58,121 | def survey ( network = None , path = '' , pattern = '' , log = False ) : if log : logger . setLevel ( logging . DEBUG ) else : logger . setLevel ( logging . CRITICAL ) network_scan = asyncio . ensure_future ( asynchronous ( urls = url_generator ( network = network , path = path ) , re_filter = re . compile ( pattern ) ) ) ioloop = asyncio . get_event_loop ( ) ioloop . run_until_complete ( network_scan ) ioloop . run_until_complete ( asyncio . sleep ( 0 ) ) return sorted ( network_scan . result ( ) , key = lambda x : ipaddress . ip_address ( x . hostname ) ) | Search network for hosts with a response to path that matches pattern |
58,122 | def hidden_cursor ( ) : if sys . stdout . isatty ( ) : _LOGGER . debug ( 'Hiding cursor.' ) print ( '\x1B[?25l' , end = '' ) sys . stdout . flush ( ) try : yield finally : if sys . stdout . isatty ( ) : _LOGGER . debug ( 'Showing cursor.' ) print ( '\n\x1B[?25h' , end = '' ) sys . stdout . flush ( ) | Temporarily hide the terminal cursor . |
58,123 | def display_status ( ) : def print_status ( msg , color ) : print ( '\r' if sys . stdout . isatty ( ) else '\t' , end = '' ) print ( '{}{}[{color}{msg}{}]{}' . format ( Cursor . FORWARD ( _ncols ( ) - 8 ) , Style . BRIGHT , Fore . RESET , Style . RESET_ALL , color = color , msg = msg [ : 6 ] . upper ( ) . center ( 6 ) ) ) sys . stdout . flush ( ) try : yield except Status as e : _LOGGER . debug ( e ) print_status ( e . msg , e . color ) if e . exc : raise e . exc except ( KeyboardInterrupt , EOFError ) : raise except Exception : print_status ( 'FAILED' , Fore . RED ) raise else : print_status ( 'OK' , Fore . GREEN ) | Display an OK or FAILED message for the context block . |
58,124 | def _pusher_connect_handler ( self , data ) : self . channel = self . pusher . subscribe ( self . pos_callback_chan ) for listener in self . pusher_connected_listeners : listener ( data ) | Event handler for the connection_established event . Binds the shortlink_scanned event |
58,125 | def _runForever ( self , stop_event ) : while ( not stop_event . is_set ( ) ) : state = self . pusher . connection . state if ( state is not "connecting" and state is not "connected" ) : self . logger . warning ( "Pusher seems to be disconnected, trying to reconnect" ) self . pusher . connect ( ) stop_event . wait ( 0.5 ) | Runs the main loop |
58,126 | def stop ( self ) : self . pusherthread_stop . set ( ) self . pusher . disconnect ( ) while self . pusher . connection . state is "connected" : sleep ( 0.1 ) logging . info ( "shutting down pusher connector thread" ) | Stops the pusherclient cleanly |
58,127 | def load ( self , filename , bs = 512 ) : with open ( filename , 'rb' ) as f : f . seek ( GPT_HEADER_OFFSET + 0x0C ) header_size = struct . unpack ( "<I" , f . read ( 4 ) ) [ 0 ] f . seek ( GPT_HEADER_OFFSET ) header_data = f . read ( header_size ) self . header = GPT_HEADER ( header_data ) if ( self . header . signature != GPT_SIGNATURE ) : raise Exception ( "Invalid GPT signature" ) self . __load_partition_entries ( f , bs ) | Loads GPT partition table . |
58,128 | def global_to_local ( self , index ) : if ( type ( index ) is int ) or ( type ( index ) is slice ) : if len ( self . __mask ) > 1 : raise IndexError ( 'check length of parameter index' ) if type ( index ) is int : return self . int_global_to_local ( index ) elif type ( index ) is slice : return self . slice_global_to_local ( index ) else : raise IndexError ( 'check data type of index to be integer or slice' ) elif type ( index ) is tuple : local_index = [ ] for k , item in enumerate ( index ) : if k < len ( self . __mask ) : if type ( item ) is slice : temp_index = self . slice_global_to_local ( item , k ) elif type ( item ) in [ int , np . int64 , np . int32 ] : temp_index = self . int_global_to_local ( item , k ) if temp_index is None : return temp_index else : temp_index = item local_index . append ( temp_index ) return tuple ( local_index ) else : raise IndexError ( 'check index for correct length and type' ) | Calculate local index from global index |
58,129 | def int_global_to_local_start ( self , index , axis = 0 ) : if index >= self . __mask [ axis ] . stop - self . __halos [ 1 ] [ axis ] : return None if index < self . __mask [ axis ] . start : return 0 return index - self . __mask [ axis ] . start | Calculate local index from global index from start_index |
58,130 | def int_global_to_local_stop ( self , index , axis = 0 ) : if index < self . __mask [ axis ] . start + self . __halos [ 0 ] [ axis ] : return None if index > self . __mask [ axis ] . stop : return self . __mask [ axis ] . stop - self . __mask [ axis ] . start return index - self . __mask [ axis ] . start | Calculate local index from global index from stop_index |
58,131 | def int_global_to_local ( self , index , axis = 0 ) : if index >= self . __mask [ axis ] . stop - self . __halos [ 1 ] [ axis ] : return None if index < self . __mask [ axis ] . start + self . __halos [ 0 ] [ axis ] : return None return index - self . __mask [ axis ] . start | Calculate local index from global index for integer input |
58,132 | def int_out_of_bounds ( self , index , axis = 0 ) : if index > self . _global_shape [ axis ] : raise IndexError ( 'index is larger than the upper bound' ) if index < 0 : index += self . _global_shape [ axis ] if index < 0 : raise IndexError ( 'index is smaller than the lower bound' ) return index | examples if index is out of local processing bounds |
58,133 | def out_of_bounds ( self , index ) : if type ( index ) is int : return self . int_out_of_bounds ( index ) elif type ( index ) is slice : return self . slice_out_of_bounds ( index ) elif type ( index ) is tuple : local_index = [ ] for k , item in enumerate ( index ) : if type ( item ) is slice : temp_index = self . slice_out_of_bounds ( item , k ) elif type ( item ) is int : temp_index = self . int_out_of_bounds ( item , k ) if temp_index is None : return temp_index local_index . append ( temp_index ) return tuple ( local_index ) | Check index for out of bounds |
58,134 | def get_server_setting ( self , protocol , host = '127.0.0.1' , port = 8000 , debug = False , ssl = None , sock = None , workers = 1 , loop = None , backlog = 100 , has_log = True ) : if isinstance ( ssl , dict ) : cert = ssl . get ( 'cert' ) or ssl . get ( 'certificate' ) key = ssl . get ( 'key' ) or ssl . get ( 'keyfile' ) if cert is None or key is None : raise ValueError ( 'SSLContext or certificate and key required.' ) context = create_default_context ( purpose = Purpose . CLIENT_AUTH ) context . load_cert_chain ( cert , keyfile = key ) ssl = context server_settings = { 'protocol' : protocol , 'request_handler' : self . request_handler , 'log' : self . log , 'netlog' : self . netlog , 'host' : host , 'port' : port , 'sock' : sock , 'ssl' : ssl , 'signal' : self . signal , 'debug' : debug , 'request_timeout' : self . request_timeout , 'request_max_size' : self . request_max_size , 'keep_alive' : self . keep_alive , 'loop' : loop , 'backlog' : backlog , 'has_log' : has_log } for event_name , settings_name , reverse in ( ( 'before_server_start' , 'before_start' , False ) , ( 'after_server_start' , 'after_start' , False ) , ( 'before_server_stop' , 'before_stop' , True ) , ( 'after_server_stop' , 'after_stop' , True ) , ) : listeners = self . listeners [ event_name ] . copy ( ) if reverse : listeners . reverse ( ) listeners = [ partial ( listener , self . app ) for listener in listeners ] server_settings [ settings_name ] = listeners if debug : self . log . setLevel ( logging . DEBUG ) if host and port : proto = 'http' if ssl is not None : proto = 'https' self . log . info ( 'Goin\' Fast @ {}://{}:{}' . format ( proto , host , port ) ) return server_settings | Helper function used by run . |
58,135 | def verify ( path ) : valid = False try : h5 = h5py . File ( path , mode = "r" ) qpi0 = h5 [ "qpi_0" ] except ( OSError , KeyError ) : pass else : if ( "qpimage version" in qpi0 . attrs and "phase" in qpi0 and "amplitude" in qpi0 and "bg_data" in qpi0 [ "phase" ] and "bg_data" in qpi0 [ "amplitude" ] ) : valid = True return valid | Verify that path has the qpimage series file format |
58,136 | def generate_requirements ( output_path = None ) : from django . conf import settings reqs = set ( ) for app in settings . INSTALLED_APPS : if app in mapping . keys ( ) : reqs |= set ( mapping [ app ] ) if output_path is None : print "--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/" for item in reqs : print item else : try : out_file = open ( output_path , 'w' ) out_file . write ( "--extra-index-url=http://opensource.washingtontimes.com/pypi/simple/\n" ) for item in reqs : out_file . write ( "%s\n" % item ) finally : out_file . close ( ) | Loop through the INSTALLED_APPS and create a set of requirements for pip . if output_path is None then write to standard out otherwise write to the path . |
58,137 | def register_mbr_plugin ( self , fs_id , plugin ) : self . logger . debug ( 'MBR: {}, FS ID: {}' . format ( self . __get_plugin_name ( plugin ) , fs_id ) ) self . __mbr_plugins [ fs_id ] . append ( plugin ) | Used in plugin s registration routine to associate it s detection method with given filesystem id |
58,138 | def register_gpt_plugin ( self , fs_guid , plugin ) : key = uuid . UUID ( fs_guid . lower ( ) ) self . logger . debug ( 'GPT: {}, GUID: {}' . format ( self . __get_plugin_name ( plugin ) , fs_guid ) ) self . __gpt_plugins [ key ] . append ( plugin ) | Used in plugin s registration routine to associate it s detection method with given filesystem guid |
58,139 | def detect_mbr ( self , filename , offset , fs_id ) : self . logger . debug ( 'Detecting MBR partition type' ) if fs_id not in self . __mbr_plugins : return None else : plugins = self . __mbr_plugins . get ( fs_id ) for plugin in plugins : if plugin . detect ( filename , offset ) : return plugin . get_volume_object ( ) return None | Used by rawdisk . session . Session to match mbr partitions against filesystem plugins . |
58,140 | def detect_gpt ( self , filename , offset , fs_guid ) : self . logger . debug ( 'Detecting GPT partition type' ) if fs_guid not in self . __gpt_plugins : return None else : plugins = self . __gpt_plugins . get ( fs_guid ) for plugin in plugins : if plugin . detect ( filename , offset ) : return plugin . get_volume_object ( ) return None | Used by rawdisk . session . Session to match gpt partitions agains filesystem plugins . |
58,141 | def inject_documentation ( ** options ) : import cog loader = ConfigLoader ( ** options ) cog . out ( "\n" + loader . documentation + "\n\n" ) | Generate configuration documentation in reStructuredText_ syntax . |
58,142 | def read_file ( self , filename ) : logger . info ( "Reading file: %s" , format_path ( filename ) ) contents = self . context . read_file ( filename ) num_lines = len ( contents . splitlines ( ) ) logger . debug ( "Read %s from %s." , pluralize ( num_lines , 'line' ) , format_path ( filename ) ) return contents . rstrip ( ) | Read a text file and provide feedback to the user . |
58,143 | def execute_file ( self , filename ) : logger . info ( "Executing file: %s" , format_path ( filename ) ) contents = self . context . execute ( filename , capture = True ) . stdout num_lines = len ( contents . splitlines ( ) ) logger . debug ( "Execution of %s yielded % of output." , format_path ( filename ) , pluralize ( num_lines , 'line' ) ) return contents . rstrip ( ) | Execute a file and provide feedback to the user . |
58,144 | def write_file ( self , filename , contents ) : logger . info ( "Writing file: %s" , format_path ( filename ) ) contents = contents . rstrip ( ) + b"\n" self . context . write_file ( filename , contents ) logger . debug ( "Wrote %s to %s." , pluralize ( len ( contents . splitlines ( ) ) , "line" ) , format_path ( filename ) ) | Write a text file and provide feedback to the user . |
58,145 | def validate_input ( function ) : @ wraps ( function ) def wrapper ( * args , ** kwargs ) : try : name = function . __name__ + '_validator' globals ( ) [ name ] ( kwargs ) return function ( * args , ** kwargs ) except KeyError : raise Exception ( "Could not find validation schema for the" " function " + function . __name__ ) return wrapper | Decorator that validates the kwargs of the function passed to it . |
58,146 | def getModulePath ( project_path , module_name , verbose ) : if not module_name : return None sys . path . append ( project_path ) try : package = pkgutil . get_loader ( module_name ) except ImportError : if verbose : print ( "Parent module for " + module_name + " not found." ) return None except : if verbose : print ( module_name + " not loaded for bizarre reasons" ) try : if package : if package . get_code ( module_name ) : filename = package . get_code ( module_name ) . co_filename return filename elif package . find_spec ( module_name ) . has_location == False : return None else : pass pass except ImportError : if verbose : print ( "Code object unavailable for " + module_name ) return None except AttributeError : if verbose : print ( module_name + " is an ExtensionFileLoader object" ) return None except : if verbose : print ( module_name + " not loaded for bizarre reasons" ) return None else : if verbose : print ( "Module " + module_name + " not found." ) return None | Searches for module_name in searchpath and returns the filepath . If no filepath was found returns None . |
58,147 | def getImportFromObjects ( node ) : somenames = [ x . asname for x in node . names if x . asname ] othernames = [ x . name for x in node . names if not x . asname ] return somenames + othernames | Returns a list of objects referenced by import from node |
58,148 | def as_slug_expression ( attr ) : slug_expr = sa_func . replace ( attr , ' ' , '-' ) slug_expr = sa_func . replace ( slug_expr , '_' , '-' ) slug_expr = sa_func . lower ( slug_expr ) return slug_expr | Converts the given instrumented string attribute into an SQL expression that can be used as a slug . |
58,149 | def mapper ( class_ , local_table = None , id_attribute = 'id' , slug_expression = None , * args , ** kwargs ) : mpr = sa_mapper ( class_ , local_table = local_table , * args , ** kwargs ) if id_attribute != 'id' : if 'id' in mpr . columns : mpr . dispose ( ) raise ValueError ( 'Attempting to overwrite the mapped "id" ' 'attribute.' ) elif isdatadescriptor ( getattr ( class_ , 'id' , None ) ) : mpr . dispose ( ) raise ValueError ( 'Attempting to overwrite the custom data ' 'descriptor defined for the "id" attribute.' ) class_ . id = synonym ( id_attribute ) slug_descr = None for base_cls in class_ . __mro__ : try : slug_descr = object . __getattribute__ ( base_cls , 'slug' ) except AttributeError : pass else : break if isinstance ( slug_descr , hybrid_descriptor ) : if not slug_expression is None : raise ValueError ( 'Attempting to overwrite the expression for ' 'an inherited slug hybrid descriptor.' ) hyb_descr = slug_descr else : if slug_expression is None : cls_expr = lambda cls : cast ( getattr ( cls , 'id' ) , String ) else : cls_expr = slug_expression hyb_descr = hybrid_descriptor ( slug_descr , expr = cls_expr ) class_ . slug = hyb_descr return mpr | Convenience wrapper around the SA mapper which will set up the hybrid id and slug attributes required by everest after calling the SA mapper . |
58,150 | def synonym ( name ) : return hybrid_property ( lambda inst : getattr ( inst , name ) , lambda inst , value : setattr ( inst , name , value ) , expr = lambda cls : getattr ( cls , name ) ) | Utility function mimicking the behavior of the old SA synonym function with the new hybrid property semantics . |
58,151 | def map_system_entities ( engine , metadata , reset ) : msg_tbl = Table ( '_user_messages' , metadata , Column ( 'guid' , String , nullable = False , primary_key = True ) , Column ( 'text' , String , nullable = False ) , Column ( 'time_stamp' , DateTime ( timezone = True ) , nullable = False , default = sa_func . now ( ) ) , ) mapper ( UserMessage , msg_tbl , id_attribute = 'guid' ) if reset : metadata . drop_all ( bind = engine , tables = [ msg_tbl ] ) metadata . create_all ( bind = engine , tables = [ msg_tbl ] ) | Maps all system entities . |
58,152 | def schematron ( self , fn = None , outfn = None , ext = '.sch' ) : from . xslt import XSLT from . import PATH , XML , etree fn = fn or self . fn if os . path . splitext ( fn ) [ - 1 ] . lower ( ) == ext : return fn elif os . path . splitext ( fn ) [ - 1 ] . lower ( ) != '.rng' : fn = Schema ( fn = fn ) . trang ( ext = '.rng' ) rng2sch = XSLT ( fn = os . path . join ( PATH , 'xslts' , 'rng2sch.xslt' ) ) rng = XML ( fn = fn ) outfn = outfn or os . path . splitext ( fn ) [ 0 ] + ext sch = XML ( fn = outfn , root = rng2sch . saxon9 ( rng . root ) . getroot ( ) ) sch . write ( ) return sch . fn | convert the Schema to schematron and save at the given output filename or with the given extension . |
58,153 | def xhtml ( self , outfn = None , ext = '.xhtml' , css = None , ** params ) : from markdown import markdown from copy import deepcopy from bl . file import File from . xslt import XSLT from . rng import RNG from . import XML , PATH , etree rncfn = os . path . splitext ( self . fn ) [ 0 ] + '.rnc' rngfn = os . path . splitext ( self . fn ) [ 0 ] + '.rng' htmlfn = os . path . splitext ( self . fn ) [ 0 ] + '.html' if self . fn == rncfn or os . path . exists ( rncfn ) : rngfn = Schema ( rncfn ) . trang ( ext = '.rng' ) assert os . path . exists ( rngfn ) rng = RNG ( fn = rngfn ) for define in rng . xpath ( rng . root , "//r:define" ) : log . debug ( "%s %r" % ( rng . tag_name ( define ) , define . attrib ) ) tempdefine = deepcopy ( define ) tempgrammar = deepcopy ( rng . root ) tempgrammar . text = '\n' for ch in tempgrammar . getchildren ( ) : rng . remove ( ch ) tempgrammar . insert ( 0 , tempdefine ) for adoc in rng . xpath ( tempdefine , ".//a:documentation | .//a:definition" ) : rng . remove ( adoc ) with tempfile . TemporaryDirectory ( ) as tempdir : x = XML ( fn = os . path . join ( tempdir , 'define.rng' ) , root = tempgrammar ) x . write ( ) newfn = Schema ( x . fn ) . trang ( ext = '.rnc' ) txt = open ( newfn , 'rb' ) . read ( ) . decode ( 'utf-8' ) if '\n\n' in txt : txt = txt [ txt . index ( '\n\n' ) + 1 : ] . strip ( ) adef = etree . Element ( "{%(a)s}definition" % RNG . NS ) adef . text = txt adef . tail = '\n\t\t' log . debug ( adef . text ) annotations = rng . xpath ( define , "a:*" ) if len ( annotations ) > 0 : index = define . index ( annotations [ - 1 ] ) + 1 else : index = 0 define . insert ( index , adef ) rng . write ( ) xslt = XSLT ( fn = os . path . join ( PATH , 'xslts' , 'rng2md.xslt' ) ) md = xslt . saxon9 ( rng . root , ** params ) . strip ( ) html_body = markdown ( md , output_format = "xhtml5" , extensions = [ 'markdown.extensions.extra' , 'markdown.extensions.admonition' , 'markdown.extensions.headerid' , 'markdown.extensions.sane_lists' , 'markdown.extensions.toc' ] ) . strip ( ) html_text = + html_body + html = XML ( fn = htmlfn , root = html_text ) return html | convert the Schema to XHTML with the given output filename or with the given extension . |
58,154 | def from_tag ( cls , tag , schemas , ext = '.rnc' ) : return cls ( fn = cls . filename ( tag , schemas , ext = ext ) ) | load a schema using an element s tag . schemas can be a string or a list of strings |
58,155 | def filename ( cls , tag , schemas , ext = '.rnc' ) : if type ( schemas ) == str : schemas = re . split ( "\s*,\s*" , schemas ) for schema in schemas : fn = os . path . join ( schema , cls . dirname ( tag ) , cls . basename ( tag , ext = ext ) ) if os . path . exists ( fn ) : return fn | given a tag and a list of schemas return the filename of the schema . If schemas is a string treat it as a comma - separated list . |
58,156 | def errors_as_text ( self ) : errors = [ ] errors . append ( self . non_field_errors ( ) . as_text ( ) ) errors_data = self . errors . as_data ( ) for key , value in errors_data . items ( ) : field_label = self . fields [ key ] . label err_descn = '' . join ( [ force_text ( e . message ) for e in value ] ) error = "%s %s" % ( field_label , err_descn ) errors . append ( error ) return ',' . join ( errors ) | only available to Django 1 . 7 + |
58,157 | def add_attr2fields ( self , attr_name , attr_val , fields = [ ] , exclude = [ ] , include_all_if_empty = True ) : for f in self . filter_fields ( fields , exclude , include_all_if_empty ) : f = self . fields [ f . name ] org_val = f . widget . attrs . get ( attr_name , '' ) f . widget . attrs [ attr_name ] = '%s %s' % ( org_val , attr_val ) if org_val else attr_val | add attr to fields |
58,158 | def add_class2fields ( self , html_class , fields = [ ] , exclude = [ ] , include_all_if_empty = True ) : self . add_attr2fields ( 'class' , html_class , fields , exclude ) | add class to html widgets . |
58,159 | def as_required_fields ( self , fields = [ ] ) : fields = self . filter_fields ( fields ) for f in fields : f = self . fields [ f . name ] f . required = True | set required to True |
58,160 | def check_uniqe ( self , obj_class , error_msg = _ ( 'Must be unique' ) , ** kwargs ) : if obj_class . objects . filter ( ** kwargs ) . exclude ( pk = self . instance . pk ) : raise forms . ValidationError ( error_msg ) | check if this object is unique |
58,161 | def get_info ( pyfile ) : info = { } info_re = re . compile ( r"^__(\w+)__ = ['\"](.*)['\"]" ) with open ( pyfile , 'r' ) as f : for line in f . readlines ( ) : match = info_re . search ( line ) if match : info [ match . group ( 1 ) ] = match . group ( 2 ) return info | Retrieve dunder values from a pyfile |
58,162 | def main ( ) : args = parse_args ( ) config_logger ( args ) logger = structlog . get_logger ( __name__ ) if args . show_version : print_version ( ) sys . exit ( 0 ) version = pkg_resources . get_distribution ( 'lander' ) . version logger . info ( 'Lander version {0}' . format ( version ) ) config = Configuration ( args = args ) if config [ 'is_travis_pull_request' ] : logger . info ( 'Skipping build from PR.' ) sys . exit ( 0 ) lander = Lander ( config ) lander . build_site ( ) logger . info ( 'Build complete' ) if config [ 'upload' ] : lander . upload_site ( ) logger . info ( 'Upload complete' ) logger . info ( 'Lander complete' ) | Entrypoint for lander executable . |
58,163 | def insert_node ( self , node ) : if self . _is_node_reserved ( node ) : return False self . _node_map [ node . get_id ( ) ] = node return True | Adds node if name is available or pre - existing node returns True if added returns False if not added |
58,164 | def join ( self , distbase , location ) : sep = '' if distbase and distbase [ - 1 ] not in ( ':' , '/' ) : sep = '/' return distbase + sep + location | Join distbase and location in such way that the result is a valid scp destination . |
58,165 | def get_location ( self , location , depth = 0 ) : if not location : return [ ] if location in self . aliases : res = [ ] if depth > MAXALIASDEPTH : err_exit ( 'Maximum alias depth exceeded: %(location)s' % locals ( ) ) for loc in self . aliases [ location ] : res . extend ( self . get_location ( loc , depth + 1 ) ) return res if self . is_server ( location ) : return [ location ] if location == 'pypi' : err_exit ( 'No configuration found for server: pypi\n' 'Please create a ~/.pypirc file' ) if self . urlparser . is_url ( location ) : return [ location ] if not self . has_host ( location ) and self . distbase : return [ self . join ( self . distbase , location ) ] return [ location ] | Resolve aliases and apply distbase . |
58,166 | def get_default_location ( self ) : res = [ ] for location in self . distdefault : res . extend ( self . get_location ( location ) ) return res | Return the default location . |
58,167 | def check_empty_locations ( self , locations = None ) : if locations is None : locations = self . locations if not locations : err_exit ( 'mkrelease: option -d is required\n%s' % USAGE ) | Fail if locations is empty . |
58,168 | def check_valid_locations ( self , locations = None ) : if locations is None : locations = self . locations for location in locations : if ( not self . is_server ( location ) and not self . is_ssh_url ( location ) and not self . has_host ( location ) ) : err_exit ( 'Unknown location: %(location)s' % locals ( ) ) | Fail if locations contains bad destinations . |
58,169 | def list_locations ( self ) : known = self . defaults . get_known_locations ( ) for default in self . defaults . distdefault : if default not in known : known . add ( default ) if not known : err_exit ( 'No locations' , 0 ) for location in sorted ( known ) : if location in self . defaults . distdefault : print ( location , '(default)' ) else : print ( location ) sys . exit ( 0 ) | Print known dist - locations and exit . |
58,170 | def get_uploadflags ( self , location ) : uploadflags = [ ] server = self . defaults . servers [ location ] if self . sign : uploadflags . append ( '--sign' ) elif server . sign is not None : if server . sign : uploadflags . append ( '--sign' ) elif self . defaults . sign : uploadflags . append ( '--sign' ) if self . identity : if '--sign' not in uploadflags : uploadflags . append ( '--sign' ) uploadflags . append ( '--identity="%s"' % self . identity ) elif '--sign' in uploadflags : if server . identity is not None : if server . identity : uploadflags . append ( '--identity="%s"' % server . identity ) elif self . defaults . identity : uploadflags . append ( '--identity="%s"' % self . defaults . identity ) return uploadflags | Return uploadflags for the given server . |
58,171 | def get_options ( self ) : args = self . parse_options ( self . args ) if args : self . directory = args [ 0 ] if self . develop : self . skiptag = True if not self . develop : self . develop = self . defaults . develop if not self . develop : self . infoflags = self . setuptools . infoflags if not self . formats : self . formats = self . defaults . formats for format in self . formats : if format == 'zip' : self . distributions . append ( ( 'sdist' , [ '--formats="zip"' ] ) ) elif format == 'gztar' : self . distributions . append ( ( 'sdist' , [ '--formats="gztar"' ] ) ) elif format == 'egg' : self . distributions . append ( ( 'bdist' , [ '--formats="egg"' ] ) ) elif format == 'wheel' : self . distributions . append ( ( 'bdist_wheel' , [ ] ) ) if not self . distributions : self . distributions . append ( ( 'sdist' , [ '--formats="zip"' ] ) ) if self . list : self . list_locations ( ) if not self . locations : self . locations . extend ( self . locations . get_default_location ( ) ) if not ( self . skipregister and self . skipupload ) : if not ( self . get_skipregister ( ) and self . get_skipupload ( ) ) : self . locations . check_empty_locations ( ) self . locations . check_valid_locations ( ) if len ( args ) > 1 : if self . urlparser . is_url ( self . directory ) : self . branch = args [ 1 ] elif self . urlparser . is_ssh_url ( self . directory ) : self . branch = args [ 1 ] else : err_exit ( 'mkrelease: invalid arguments\n%s' % USAGE ) if len ( args ) > 2 : err_exit ( 'mkrelease: too many arguments\n%s' % USAGE ) | Process the command line . |
58,172 | def get_package ( self ) : directory = self . directory develop = self . develop scmtype = self . scmtype self . scm = self . scms . get_scm ( scmtype , directory ) if self . scm . is_valid_url ( directory ) : directory = self . urlparser . abspath ( directory ) self . remoteurl = directory self . isremote = self . push = True else : directory = abspath ( expanduser ( directory ) ) self . isremote = False self . scm . check_valid_sandbox ( directory ) self . setuptools . check_valid_package ( directory ) name , version = self . setuptools . get_package_info ( directory , develop ) print ( 'Releasing' , name , version ) if not self . skipcommit : if self . scm . is_dirty_sandbox ( directory ) : self . scm . commit_sandbox ( directory , name , version , self . push ) | Get the URL or sandbox to release . |
58,173 | def make_release ( self ) : directory = self . directory infoflags = self . infoflags branch = self . branch develop = self . develop scmtype = self . scm . name tempdir = abspath ( tempfile . mkdtemp ( prefix = 'mkrelease-' ) ) try : if self . isremote : directory = join ( tempdir , 'build' ) self . scm . clone_url ( self . remoteurl , directory ) else : directory = abspath ( expanduser ( directory ) ) self . scm . check_valid_sandbox ( directory ) if self . isremote : branch = self . scm . make_branchid ( directory , branch ) if branch : self . scm . switch_branch ( directory , branch ) if scmtype != 'svn' : branch = self . scm . get_branch_from_sandbox ( directory ) print ( 'Releasing branch' , branch ) self . setuptools . check_valid_package ( directory ) if not ( self . skipcommit and self . skiptag ) : self . scm . check_dirty_sandbox ( directory ) self . scm . check_unclean_sandbox ( directory ) name , version = self . setuptools . get_package_info ( directory , develop ) if self . isremote : print ( 'Releasing' , name , version ) if not self . skiptag : print ( 'Tagging' , name , version ) tagid = self . scm . make_tagid ( directory , version ) self . scm . check_tag_exists ( directory , tagid ) self . scm . create_tag ( directory , tagid , name , version , self . push ) if self . manifest : scmtype = 'none' for distcmd , distflags in self . distributions : manifest = self . setuptools . run_egg_info ( directory , infoflags , scmtype , self . quiet ) distfile = self . setuptools . run_dist ( directory , infoflags , distcmd , distflags , scmtype , self . quiet ) for location in self . locations : if self . locations . is_server ( location ) : if not self . get_skipregister ( location ) : self . setuptools . run_register ( directory , infoflags , location , scmtype , self . quiet ) if not self . get_skipupload ( ) : uploadflags = self . get_uploadflags ( location ) if '--sign' in uploadflags and isfile ( distfile + '.asc' ) : os . remove ( distfile + '.asc' ) self . setuptools . run_upload ( directory , infoflags , distcmd , distflags , location , uploadflags , scmtype , self . quiet ) else : if not self . skipupload : if self . locations . is_ssh_url ( location ) : scheme , location = self . urlparser . to_ssh_url ( location ) self . scp . run_upload ( scheme , distfile , location ) else : self . scp . run_upload ( 'scp' , distfile , location ) finally : shutil . rmtree ( tempdir ) | Build and distribute the package . |
58,174 | def configure_gateway ( cls , launch_jvm : bool = True , gateway : Union [ GatewayParameters , Dict [ str , Any ] ] = None , callback_server : Union [ CallbackServerParameters , Dict [ str , Any ] ] = False , javaopts : Iterable [ str ] = ( ) , classpath : Iterable [ str ] = '' ) : assert check_argument_types ( ) classpath = classpath if isinstance ( classpath , str ) else os . pathsep . join ( classpath ) javaopts = list ( javaopts ) for match in package_re . finditer ( classpath ) : pkgname = match . group ( 1 ) module = import_module ( pkgname ) module_dir = os . path . dirname ( module . __file__ ) classpath = classpath . replace ( match . group ( 0 ) , module_dir ) if gateway is None : gateway = { } if isinstance ( gateway , dict ) : gateway . setdefault ( 'eager_load' , True ) gateway . setdefault ( 'auto_convert' , True ) gateway = GatewayParameters ( ** gateway ) if isinstance ( callback_server , dict ) : callback_server = CallbackServerParameters ( ** callback_server ) elif callback_server is True : callback_server = CallbackServerParameters ( ) return launch_jvm , gateway , callback_server , classpath , javaopts | Configure a Py4J gateway . |
58,175 | def load ( self , filename , offset ) : self . offset = offset self . filename = filename self . bootsector = BootSector ( filename = filename , length = NTFS_BOOTSECTOR_SIZE , offset = self . offset ) self . mft_table = MftTable ( mft_entry_size = self . bootsector . mft_record_size , filename = self . filename , offset = self . mft_table_offset ) self . mft_table . preload_entries ( NUM_SYSTEM_ENTRIES ) self . _load_volume_information ( ) | Loads NTFS volume information |
58,176 | def _get_mft_zone_size ( self , num_clusters , mft_zone_multiplier = 1 ) : sizes = { 4 : num_clusters >> 1 , 3 : ( num_clusters * 3 ) >> 3 , 2 : num_clusters >> 2 , } return sizes . get ( mft_zone_multiplier , num_clusters >> 3 ) | Returns mft zone size in clusters . From ntfs_progs . 1 . 22 . |
58,177 | def close ( self ) : if hasattr ( self , 'iterators' ) : for it in self . iterators : if hasattr ( it , 'close' ) : it . close ( ) | Closes all the iterators . |
58,178 | def _update_sorting ( self ) : key = self . key sorted_tops = self . sorted_tops tops = self . tops iterators = self . iterators for idx in self . idxs : try : tops [ idx ] = next ( iterators [ idx ] ) top_key = key ( tops [ idx ] ) if top_key not in sorted_tops : sorted_tops [ top_key ] = [ ] sorted_tops [ top_key ] . append ( idx ) except StopIteration : pass if len ( sorted_tops ) == 0 : raise StopIteration key , self . idxs = sorted_tops . popitem ( last = False ) self . c_idx = 0 | Insert new entries into the merged iterator . |
58,179 | def domain_user_stats ( ) : fname = os . path . join ( os . path . dirname ( __file__ ) , "email_domain_users.csv" ) stats = pd . read_csv ( fname , header = 0 , squeeze = True , index_col = 0 ) return stats [ pd . notnull ( stats . index ) ] | Get number of distinct email addresses in observed domains |
58,180 | def is_university ( addr ) : addr_domain = domain ( addr ) if not addr_domain : return False chunks = addr_domain . split ( "." ) if len ( chunks ) < 2 : return False domains = university_domains ( ) return ( chunks [ - 1 ] == "edu" and chunks [ - 2 ] not in ( "england" , "australia" ) ) or chunks [ - 2 ] == "edu" or any ( "." . join ( chunks [ i : ] ) in domains for i in range ( len ( chunks ) - 1 ) ) | Check if provided email has a university domain |
58,181 | def is_public ( addr ) : addr_domain = domain ( addr ) if not addr_domain : return True chunks = addr_domain . rsplit ( "." , 1 ) return len ( chunks ) < 2 or addr_domain . endswith ( "local" ) or addr_domain in public_domains ( ) | Check if the passed email registered at a free pubic mail server |
58,182 | def write_color ( self , text , attr = None ) : log ( u'write_color("%s", %s)' % ( text , attr ) ) chunks = self . terminal_escape . split ( text ) log ( u'chunks=%s' % repr ( chunks ) ) bg = self . savebg n = 0 if attr is None : attr = self . attr try : fg = self . trtable [ ( 0x000f & attr ) ] bg = self . trtable [ ( 0x00f0 & attr ) >> 4 ] except TypeError : fg = attr for chunk in chunks : m = self . escape_parts . match ( chunk ) if m : log ( m . group ( 1 ) ) attr = ansicolor . get ( m . group ( 1 ) , self . attr ) n += len ( chunk ) System . Console . ForegroundColor = fg System . Console . BackgroundColor = bg System . Console . Write ( chunk ) return n | write text at current cursor position and interpret color escapes . return the number of characters written . |
58,183 | def files ( self ) : if self . _files is None : self . _files = SeriesZipTifHolo . _index_files ( self . path ) return self . _files | List of hologram data file names in the input zip file |
58,184 | def get_time ( self , idx ) : ds = self . _get_dataset ( idx ) thetime = ds . get_time ( ) if np . isnan ( thetime ) : zf = zipfile . ZipFile ( self . path ) info = zf . getinfo ( self . files [ idx ] ) timetuple = tuple ( list ( info . date_time ) + [ 0 , 0 , 0 ] ) thetime = time . mktime ( timetuple ) return thetime | Time for each TIFF file |
58,185 | def get_remote_data ( self , localvars , remotevars , inds , shape ) : if self . horiz_size == 'all' : y , y_1 = 0 , shape [ - 2 ] x , x_1 = 0 , shape [ - 1 ] else : r = self . horiz_size x , x_1 = self . point_get . value [ 2 ] - r , self . point_get . value [ 2 ] + r + 1 y , y_1 = self . point_get . value [ 1 ] - r , self . point_get . value [ 1 ] + r + 1 x , x_1 = x [ 0 ] , x_1 [ 0 ] y , y_1 = y [ 0 ] , y_1 [ 0 ] if y < 0 : y = 0 if x < 0 : x = 0 if y_1 > shape [ - 2 ] : y_1 = shape [ - 2 ] if x_1 > shape [ - 1 ] : x_1 = shape [ - 1 ] domain = self . local . variables [ 'domain' ] if len ( shape ) == 4 : domain [ inds [ 0 ] : inds [ - 1 ] + 1 , 0 : shape [ 1 ] , y : y_1 , x : x_1 ] = np . ones ( ( inds [ - 1 ] + 1 - inds [ 0 ] , shape [ 1 ] , y_1 - y , x_1 - x ) ) elif len ( shape ) == 3 : domain [ inds [ 0 ] : inds [ - 1 ] + 1 , y : y_1 , x : x_1 ] = np . ones ( ( inds [ - 1 ] + 1 - inds [ 0 ] , y_1 - y , x_1 - x ) ) logger . debug ( "Filling cache with: Time - %s:%s, Lat - %s:%s, Lon - %s:%s" % ( str ( inds [ 0 ] ) , str ( inds [ - 1 ] + 1 ) , str ( y ) , str ( y_1 ) , str ( x ) , str ( x_1 ) ) ) for local , remote in zip ( localvars , remotevars ) : if len ( shape ) == 4 : local [ inds [ 0 ] : inds [ - 1 ] + 1 , 0 : shape [ 1 ] , y : y_1 , x : x_1 ] = remote [ inds [ 0 ] : inds [ - 1 ] + 1 , 0 : shape [ 1 ] , y : y_1 , x : x_1 ] else : local [ inds [ 0 ] : inds [ - 1 ] + 1 , y : y_1 , x : x_1 ] = remote [ inds [ 0 ] : inds [ - 1 ] + 1 , y : y_1 , x : x_1 ] | Method that does the updating of local netcdf cache with remote data |
58,186 | def need_data ( self , i ) : if self . caching is False : return False logger . debug ( "Checking cache for data availability at %s." % self . part . location . logstring ( ) ) try : with self . read_lock : self . read_count . value += 1 self . has_read_lock . append ( os . getpid ( ) ) self . dataset . opennc ( ) cached_lookup = self . dataset . get_values ( 'domain' , timeinds = [ np . asarray ( [ i ] ) ] , point = self . part . location ) logger . debug ( "Type of result: %s" % type ( cached_lookup ) ) logger . debug ( "Double mean of result: %s" % np . mean ( np . mean ( cached_lookup ) ) ) logger . debug ( "Type of Double mean of result: %s" % type ( np . mean ( np . mean ( cached_lookup ) ) ) ) if type ( np . mean ( np . mean ( cached_lookup ) ) ) == np . ma . core . MaskedConstant : need = True logger . debug ( "I NEED data. Got back: %s" % cached_lookup ) else : need = False logger . debug ( "I DO NOT NEED data" ) except StandardError : need = True logger . debug ( "I NEED data (no time index exists in cache)" ) finally : self . dataset . closenc ( ) with self . read_lock : self . read_count . value -= 1 self . has_read_lock . remove ( os . getpid ( ) ) return need | Method to test if cache contains the data that the particle needs |
58,187 | def linterp ( self , setx , sety , x ) : if math . isnan ( sety [ 0 ] ) or math . isnan ( setx [ 0 ] ) : return np . nan return sety [ 0 ] + ( x - setx [ 0 ] ) * ( ( sety [ 1 ] - sety [ 0 ] ) / ( setx [ 1 ] - setx [ 0 ] ) ) | Linear interp of model data values between time steps |
58,188 | def boundary_interaction ( self , ** kwargs ) : particle = kwargs . pop ( 'particle' ) starting = kwargs . pop ( 'starting' ) ending = kwargs . pop ( 'ending' ) if self . useshore : intersection_point = self . _shoreline . intersect ( start_point = starting . point , end_point = ending . point ) if intersection_point : hitpoint = Location4D ( point = intersection_point [ 'point' ] , time = starting . time + ( ending . time - starting . time ) ) particle . location = hitpoint resulting_point = self . _shoreline . react ( start_point = starting , end_point = ending , hit_point = hitpoint , reverse_distance = self . reverse_distance , feature = intersection_point [ 'feature' ] , distance = kwargs . get ( 'distance' ) , angle = kwargs . get ( 'angle' ) , azimuth = kwargs . get ( 'azimuth' ) , reverse_azimuth = kwargs . get ( 'reverse_azimuth' ) ) ending . latitude = resulting_point . latitude ending . longitude = resulting_point . longitude ending . depth = resulting_point . depth logger . debug ( "%s - hit the shoreline at %s. Setting location to %s." % ( particle . logstring ( ) , hitpoint . logstring ( ) , ending . logstring ( ) ) ) if self . usebathy : if not particle . settled : bintersect = self . _bathymetry . intersect ( start_point = starting , end_point = ending ) if bintersect : pt = self . _bathymetry . react ( type = 'reverse' , start_point = starting , end_point = ending ) logger . debug ( "%s - hit the bottom at %s. Setting location to %s." % ( particle . logstring ( ) , ending . logstring ( ) , pt . logstring ( ) ) ) ending . latitude = pt . latitude ending . longitude = pt . longitude ending . depth = pt . depth if self . usesurface : if ending . depth > 0 : logger . debug ( "%s - rose out of the water. Setting depth to 0." % particle . logstring ( ) ) ending . depth = 0 particle . location = ending return | Returns a list of Location4D objects |
58,189 | def get_buildfile_path ( settings ) : base = os . path . basename ( settings . build_url ) return os . path . join ( BUILDS_ROOT , base ) | Path to which a build tarball should be downloaded . |
58,190 | def prior_dates ( * args , ** kwargs ) : try : chron = args [ 0 ] except IndexError : chron = kwargs [ 'coredates' ] d_r = np . array ( kwargs [ 'd_r' ] ) d_std = np . array ( kwargs [ 'd_std' ] ) t_a = np . array ( kwargs [ 't_a' ] ) t_b = np . array ( kwargs [ 't_b' ] ) try : normal_distr = kwargs [ 'normal_distr' ] except KeyError : normal_distr = None cc_int = kwargs [ 'cc' ] ccdict = { 0 : 'ConstCal' , 1 : 'IntCal3' , 2 : 'Marine13' , 3 : 'SHCal13' , 4 : 'ConstCal' } if 'cc1' in kwargs : ccdict [ 1 ] = str ( kwargs [ 'cc1' ] ) if 'cc2' in kwargs : ccdict [ 2 ] = str ( kwargs [ 'cc2' ] ) if 'cc3' in kwargs : ccdict [ 3 ] = str ( kwargs [ 'cc3' ] ) if 'cc4' in kwargs : ccdict [ 4 ] = str ( kwargs [ 'cc4' ] ) cc = [ ] for i in cc_int : i = int ( i ) cc . append ( fetch_calibcurve ( ccdict [ i ] ) ) d , p = calibrate_dates ( chron , calib_curve = cc , d_r = d_r , d_std = d_std , t_a = t_a , t_b = t_b , normal_distr = normal_distr ) return d , p | Get the prior distribution of calibrated radiocarbon dates |
58,191 | def prior_sediment_rate ( * args , ** kwargs ) : acc_mean = kwargs [ 'acc_mean' ] acc_shape = kwargs [ 'acc_shape' ] x = np . linspace ( 0 , 6 * np . max ( acc_mean ) , 100 ) y = stats . gamma . pdf ( x , a = acc_shape , scale = 1 / ( acc_shape / acc_mean ) ) return y , x | Get the prior density of sediment rates |
58,192 | def prior_sediment_memory ( * args , ** kwargs ) : mem_shape = kwargs [ 'mem_strength' ] mem_mean = kwargs [ 'mem_mean' ] x = np . linspace ( 0 , 1 , 100 ) y = stats . beta . pdf ( x , a = mem_shape * mem_mean , b = mem_shape * ( 1 - mem_mean ) ) return y , x | Get the prior density of sediment memory |
58,193 | def _init_browser ( self ) : self . browser = splinter . Browser ( 'phantomjs' ) self . browser . visit ( self . server_url ) self . browser . find_link_by_partial_text ( "Sign in" ) . click ( ) self . browser . fill ( 'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$' 'txtFormsLogin' , self . user ) self . browser . fill ( 'ctl00$ctl00$NICEMasterPageBodyContent$SiteContentPlaceholder$' 'txtFormsPassword' , self . password ) self . browser . find_by_css ( 'input[type=submit]' ) . click ( ) self . browser . find_by_css ( 'input[type=submit]' ) . click ( ) | Update this everytime the CERN SSO login form is refactored . |
58,194 | def download ( self , directory = '~/Music' , song_name = '%a - %s - %A' ) : formatted = self . format ( song_name ) path = os . path . expanduser ( directory ) + os . path . sep + formatted + '.mp3' try : raw = self . safe_download ( ) with open ( path , 'wb' ) as f : f . write ( raw ) except : raise return formatted | Download a song to a directory . |
58,195 | def safe_download ( self ) : def _markStreamKeyOver30Seconds ( stream ) : self . _connection . request ( 'markStreamKeyOver30Seconds' , { 'streamServerID' : stream . ip , 'artistID' : self . artist . id , 'songQueueID' : self . _connection . session . queue , 'songID' : self . id , 'songQueueSongID' : 1 , 'streamKey' : stream . key } , self . _connection . header ( 'markStreamKeyOver30Seconds' , 'jsqueue' ) ) stream = self . stream timer = threading . Timer ( 30 , _markStreamKeyOver30Seconds , [ stream ] ) timer . start ( ) raw = stream . data . read ( ) if len ( raw ) == stream . size : timer . cancel ( ) self . _connection . request ( 'markSongDownloadedEx' , { 'streamServerID' : stream . ip , 'songID' : self . id , 'streamKey' : stream . key } , self . _connection . header ( 'markSongDownloadedEx' , 'jsqueue' ) ) self . _connection . request ( 'removeSongsFromQueue' , { 'userRemoved' : True , 'songQueueID' : self . _connection . session . queue , 'songQueueSongIDs' : [ 1 ] } , self . _connection . header ( 'removeSongsFromQueue' , 'jsqueue' ) ) return raw else : raise ValueError ( "Content-Length {}, but read {}" . format ( stream . size , len ( raw ) ) ) | Download a song respecting Grooveshark s API . |
58,196 | def copy ( self ) : return self . __class__ ( options = self . __options , attribute_options = self . __attribute_options ) | Return a copy of this configuration . |
58,197 | def get_option ( self , name ) : self . __validate_option_name ( name ) return self . __options . get ( name , None ) | Returns the value for the specified generic configuration option . |
58,198 | def set_option ( self , name , value ) : self . __validate_option_name ( name ) self . __options [ name ] = value | Sets the specified generic configuration option to the given value . |
58,199 | def set_attribute_option ( self , attribute , option_name , option_value ) : self . __validate_attribute_option_name ( option_name ) attribute_key = self . __make_key ( attribute ) mp_options = self . __attribute_options . setdefault ( attribute_key , { } ) mp_options [ option_name ] = option_value | Sets the given attribute option to the given value for the specified attribute . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.