idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
4,500
def load_transport ( self , url ) : aurl = urlparse ( url ) addrl = aurl . netloc . split ( ':' ) self . addr = addrl [ 0 ] , addrl [ 1 ] self . transport = aurl . scheme self . host_url = aurl if aurl . scheme == 'http' : self . launch_actor ( 'http' , rpcactor . RPCDispatcher ( url , self , 'rpc' ) ) elif aurl . scheme == 'amqp' : self . launch_actor ( 'amqp' , rpcactor . RPCDispatcher ( url , self , 'rabbit' ) )
For remote communication . Sets the communication dispatcher of the host at the address and port specified .
4,501
def has_actor ( self , aid ) : url = '%s://%s/%s' % ( self . transport , self . host_url . netloc , aid ) return url in self . actors . keys ( )
Checks if the given id is used in the host by some actor .
4,502
def stop_actor ( self , aid ) : url = '%s://%s/%s' % ( self . transport , self . host_url . netloc , aid ) if url != self . url : actor = self . actors [ url ] Proxy ( actor ) . stop ( ) actor . thread . join ( ) del self . actors [ url ] del self . threads [ actor . thread ]
This method removes one actor from the Host stoping it and deleting all its references .
4,503
def lookup_url ( self , url , klass , module = None ) : if not self . alive : raise HostDownError ( ) aurl = urlparse ( url ) if self . is_local ( aurl ) : if url not in self . actors . keys ( ) : raise NotFoundError ( url ) else : return Proxy ( self . actors [ url ] ) else : try : dispatcher = self . actors [ aurl . scheme ] if module is not None : try : module_ = __import__ ( module , globals ( ) , locals ( ) , [ klass ] , - 1 ) klass_ = getattr ( module_ , klass ) except Exception , e : raise HostError ( "At lookup_url: " + "Import failed for module " + module + ", class " + klass + ". Check this values for the lookup." + " ERROR: " + str ( e ) ) elif isinstance ( klass , ( types . TypeType , types . ClassType ) ) : klass_ = klass else : raise HostError ( "The class specified to look up is" + " not a class." ) remote_actor = actor . ActorRef ( url , klass_ , dispatcher . channel ) return Proxy ( remote_actor ) except HostError : raise except Exception , e : raise HostError ( "ERROR looking for the actor on another " + "server. Hosts must " + "be in http to work properly. " + str ( e ) )
Gets a proxy reference to the actor indicated by the URL in the parameters . It can be a local reference or a remote direction to another host .
4,504
def dumps ( self , param ) : if isinstance ( param , Proxy ) : module_name = param . actor . klass . __module__ filename = sys . modules [ module_name ] . __file__ return ProxyRef ( param . actor . url , param . actor . klass . __name__ , module_name ) elif isinstance ( param , list ) : return [ self . dumps ( elem ) for elem in param ] elif isinstance ( param , dict ) : new_dict = param for key in new_dict . keys ( ) : new_dict [ key ] = self . dumps ( new_dict [ key ] ) return new_dict elif isinstance ( param , tuple ) : return tuple ( [ self . dumps ( elem ) for elem in param ] ) else : return param
Checks the parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host .
4,505
def loads ( self , param ) : if isinstance ( param , ProxyRef ) : try : return self . lookup_url ( param . url , param . klass , param . module ) except HostError : print "Can't lookup for the actor received with the call. \ It does not exist or the url is unreachable." , param raise HostError ( param ) elif isinstance ( param , list ) : return [ self . loads ( elem ) for elem in param ] elif isinstance ( param , tuple ) : return tuple ( [ self . loads ( elem ) for elem in param ] ) elif isinstance ( param , dict ) : new_dict = param for key in new_dict . keys ( ) : new_dict [ key ] = self . loads ( new_dict [ key ] ) return new_dict else : return param
Checks the return parameters generating new proxy instances to avoid query concurrences from shared proxies and creating proxies for actors from another host .
4,506
def new_parallel ( self , function , * params ) : if self . ppool is None : if core_type == 'thread' : from multiprocessing . pool import ThreadPool self . ppool = ThreadPool ( 500 ) else : from gevent . pool import Pool self . ppool = Pool ( 500 ) self . ppool . apply_async ( function , * params )
Register a new thread executing a parallel method .
4,507
def write_to_local ( self , filepath_from , filepath_to , mtime_dt = None ) : self . __log . debug ( "Writing R[%s] -> L[%s]." % ( filepath_from , filepath_to ) ) with SftpFile ( self , filepath_from , 'r' ) as sf_from : with open ( filepath_to , 'wb' ) as file_to : while 1 : part = sf_from . read ( MAX_MIRROR_WRITE_CHUNK_SIZE ) file_to . write ( part ) if len ( part ) < MAX_MIRROR_WRITE_CHUNK_SIZE : break if mtime_dt is None : mtime_dt = datetime . now ( ) mtime_epoch = mktime ( mtime_dt . timetuple ( ) ) utime ( filepath_to , ( mtime_epoch , mtime_epoch ) )
Open a remote file and write it locally .
4,508
def write_to_remote ( self , filepath_from , filepath_to , mtime_dt = None ) : self . __log . debug ( "Writing L[%s] -> R[%s]." % ( filepath_from , filepath_to ) ) with open ( filepath_from , 'rb' ) as file_from : with SftpFile ( self , filepath_to , 'w' ) as sf_to : while 1 : part = file_from . read ( MAX_MIRROR_WRITE_CHUNK_SIZE ) sf_to . write ( part ) if len ( part ) < MAX_MIRROR_WRITE_CHUNK_SIZE : break if mtime_dt is None : mtime_dt = datetime . now ( ) self . utimes_dt ( filepath_to , mtime_dt , mtime_dt )
Open a local file and write it remotely .
4,509
def open ( self ) : self . __sf = _sftp_open ( self . __sftp_session_int , self . __filepath , self . access_type_int , self . __create_mode ) if self . access_type_is_append is True : self . seek ( self . filesize ) return SftpFileObject ( self )
This is the only way to open a file resource .
4,510
def read ( self , size = None ) : if size is not None : return self . __sf . read ( size ) block_size = self . __class__ . __block_size b = bytearray ( ) received_bytes = 0 while 1 : partial = self . __sf . read ( block_size ) b . extend ( partial ) received_bytes += len ( partial ) if len ( partial ) < block_size : self . __log . debug ( "End of file." ) break self . __log . debug ( "Read (%d) bytes for total-file." % ( received_bytes ) ) return b
Read a length of bytes . Return empty on EOF . If size is omitted return whole file .
4,511
def seek ( self , offset , whence = SEEK_SET ) : if whence == SEEK_SET : self . __sf . seek ( offset ) elif whence == SEEK_CUR : self . __sf . seek ( self . tell ( ) + offset ) elif whence == SEEK_END : self . __sf . seek ( self . __sf . filesize - offset )
Reposition the file pointer .
4,512
def readline ( self , size = None ) : ( line , nl ) = self . __buffer . read_until_nl ( self . __retrieve_data ) if self . __sf . access_type_has_universal_nl and nl is not None : self . __newlines [ nl ] = True return line
Read a single line of text with EOF .
4,513
def __retrieve_data ( self ) : if self . __eof is True : return b'' logging . debug ( "Reading another block." ) block = self . read ( self . __block_size ) if block == b'' : self . __log . debug ( "We've encountered the EOF." ) self . __eof = True return block
Read more data from the file .
4,514
def set_mask_from_shapefile ( self , shapefile_path , cell_size ) : shapefile_path = os . path . abspath ( shapefile_path ) with tmp_chdir ( self . project_directory ) : mask_name = '{0}.msk' . format ( self . project_manager . name ) msk_file = WatershedMaskFile ( project_file = self . project_manager , session = self . db_session ) msk_file . generateFromWatershedShapefile ( shapefile_path , cell_size = cell_size , out_raster_path = mask_name , load_raster_to_db = self . load_rasters_to_db )
Adds a mask from a shapefile
4,515
def set_elevation ( self , elevation_grid_path , mask_shapefile ) : ele_file = ElevationGridFile ( project_file = self . project_manager , session = self . db_session ) ele_file . generateFromRaster ( elevation_grid_path , mask_shapefile , load_raster_to_db = self . load_rasters_to_db )
Adds elevation file to project
4,516
def set_outlet ( self , latitude , longitude , outslope ) : self . project_manager . setOutlet ( latitude = latitude , longitude = longitude , outslope = outslope )
Adds outlet point to project
4,517
def set_event ( self , simulation_start = None , simulation_duration = None , simulation_end = None , rain_intensity = 2 , rain_duration = timedelta ( seconds = 30 * 60 ) , event_type = 'EVENT' , ) : if event_type == 'LONG_TERM' : self . event = LongTermMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_end = simulation_end , simulation_duration = simulation_duration , ) else : self . event = EventMode ( self . project_manager , self . db_session , self . project_directory , simulation_start = simulation_start , simulation_duration = simulation_duration , ) self . event . add_uniform_precip_event ( intensity = rain_intensity , duration = rain_duration )
Initializes event for GSSHA model
4,518
def write ( self ) : self . project_manager . writeInput ( session = self . db_session , directory = self . project_directory , name = self . project_manager . name )
Write project to directory
4,519
def mirror ( self , handler , path_from , path_to , log_files = False ) : q = deque ( [ '' ] ) while q : path = q . popleft ( ) full_from = ( '%s/%s' % ( path_from , path ) ) if path else path_from full_to = ( '%s/%s' % ( path_to , path ) ) if path else path_to subdirs = handler ( full_from , full_to , log_files ) for subdir in subdirs : q . append ( ( '%s/%s' % ( path , subdir ) ) if path else subdir )
Recursively mirror the contents of path_from into path_to . handler should be self . mirror_to_local_no_recursion or self . mirror_to_remote_no_recursion to represent which way the files are moving .
4,520
def linkToChannelInputFile ( self , session , channelInputFile , force = False ) : if self . channelInputFile is not None and not force : return self . channelInputFile = channelInputFile orderedLinks = channelInputFile . getOrderedLinks ( session ) timeSteps = self . timeSteps for timeStep in timeSteps : linkDatasets = timeStep . linkDatasets for l , linkDataset in enumerate ( linkDatasets ) : streamLink = orderedLinks [ l ] streamNodes = streamLink . nodes linkDataset . link = streamLink nodeDatasets = linkDataset . nodeDatasets if len ( nodeDatasets ) > 0 and len ( streamNodes ) > 0 : for n , nodeDataset in enumerate ( nodeDatasets ) : nodeDataset . node = streamNodes [ n ] session . add ( self ) session . commit ( )
Create database relationships between the link node dataset and the channel input file .
4,521
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension KEYWORDS = ( 'NUM_LINKS' , 'TIME_STEP' , 'NUM_TS' , 'START_TIME' , 'TS' ) with open ( path , 'r' ) as f : self . name = f . readline ( ) . strip ( ) chunks = pt . chunk ( KEYWORDS , f ) for card , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if card == 'NUM_LINKS' : self . numLinks = schunk [ 1 ] elif card == 'TIME_STEP' : self . timeStepInterval = schunk [ 1 ] elif card == 'NUM_TS' : self . numTimeSteps = schunk [ 1 ] elif card == 'START_TIME' : self . startTime = '%s %s %s %s %s %s' % ( schunk [ 1 ] , schunk [ 2 ] , schunk [ 3 ] , schunk [ 4 ] , schunk [ 5 ] , schunk [ 6 ] ) elif card == 'TS' : for line in chunk : sline = line . strip ( ) . split ( ) token = sline [ 0 ] if token == 'TS' : timeStep = LinkNodeTimeStep ( timeStep = sline [ 1 ] ) timeStep . linkNodeDataset = self else : spLinkLine = line . strip ( ) . split ( ) linkDataset = LinkDataset ( ) linkDataset . numNodeDatasets = int ( spLinkLine [ 0 ] ) linkDataset . timeStep = timeStep linkDataset . linkNodeDatasetFile = self NODE_VALUE_INCREMENT = 2 statusIndex = 1 valueIndex = statusIndex + 1 if linkDataset . numNodeDatasets > 0 : for i in range ( 0 , linkDataset . numNodeDatasets ) : nodeDataset = NodeDataset ( ) nodeDataset . status = int ( spLinkLine [ statusIndex ] ) nodeDataset . value = float ( spLinkLine [ valueIndex ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self statusIndex += NODE_VALUE_INCREMENT valueIndex += NODE_VALUE_INCREMENT else : nodeDataset = NodeDataset ( ) nodeDataset . value = float ( spLinkLine [ 1 ] ) nodeDataset . linkDataset = linkDataset nodeDataset . linkNodeDatasetFile = self
Link Node Dataset File Read from File Method
4,522
def _write ( self , session , openFile , replaceParamFile ) : timeSteps = self . timeSteps openFile . write ( '%s\n' % self . name ) openFile . write ( 'NUM_LINKS %s\n' % self . numLinks ) openFile . write ( 'TIME_STEP %s\n' % self . timeStepInterval ) openFile . write ( 'NUM_TS %s\n' % self . numTimeSteps ) openFile . write ( 'START_TIME %s\n' % self . startTime ) for timeStep in timeSteps : openFile . write ( 'TS %s\n' % timeStep . timeStep ) linkDatasets = timeStep . linkDatasets for linkDataset in linkDatasets : openFile . write ( '{0} ' . format ( linkDataset . numNodeDatasets ) ) nodeDatasets = linkDataset . nodeDatasets if linkDataset . numNodeDatasets > 0 : for nodeDataset in nodeDatasets : openFile . write ( '{0} {1:.5f} ' . format ( nodeDataset . status , nodeDataset . value ) ) else : for nodeDataset in nodeDatasets : if linkDataset . numNodeDatasets < 0 : openFile . write ( '{0:.5f}' . format ( nodeDataset . value ) ) else : openFile . write ( '{0:.3f}' . format ( nodeDataset . value ) ) openFile . write ( '\n' ) openFile . write ( '\n' )
Link Node Dataset File Write to File Method
4,523
def login ( container ) : columns , lines = shutil . get_terminal_size ( ) try : subprocess . check_call ( [ "docker" , "exec" , "--env" , f"COLUMNS={str(columns)},LINES={str(lines)}" , "--env" , f"LINES={str(lines)}" , "--interactive" , "--tty" , container , "bash" , "--login" ] ) except subprocess . CalledProcessError : raise RuntimeError ( ) from None
Log into container .
4,524
def _update_simulation_start ( self , simulation_start ) : self . simulation_start = simulation_start if self . simulation_duration is not None and self . simulation_start is not None : self . simulation_end = self . simulation_start + self . simulation_duration self . _update_simulation_start_cards ( )
Update GSSHA simulation start time
4,525
def _update_simulation_start_cards ( self ) : if self . simulation_start is not None : self . _update_card ( "START_DATE" , self . simulation_start . strftime ( "%Y %m %d" ) ) self . _update_card ( "START_TIME" , self . simulation_start . strftime ( "%H %M" ) )
Update GSSHA cards for simulation start
4,526
def _update_simulation_end_from_lsm ( self ) : te = self . l2g . xd . lsm . datetime [ - 1 ] simulation_end = te . replace ( tzinfo = utc ) . astimezone ( tz = self . tz ) . replace ( tzinfo = None ) if self . simulation_end is None : self . simulation_end = simulation_end elif self . simulation_end > simulation_end : self . simulation_end = simulation_end self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) )
Update simulation end time from LSM
4,527
def add_precip_file ( self , precip_file_path , interpolation_type = None ) : self . _update_card ( 'PRECIP_FILE' , precip_file_path , True ) if interpolation_type is None : if not self . project_manager . getCard ( 'RAIN_INV_DISTANCE' ) and not self . project_manager . getCard ( 'RAIN_THIESSEN' ) : self . _update_card ( 'RAIN_THIESSEN' , '' ) else : if interpolation_type . upper ( ) not in self . PRECIP_INTERP_TYPES : raise IndexError ( "Invalid interpolation_type {0}" . format ( interpolation_type ) ) interpolation_type = interpolation_type . upper ( ) if interpolation_type == "INV_DISTANCE" : self . _update_card ( 'RAIN_INV_DISTANCE' , '' ) self . project_manager . deleteCard ( 'RAIN_THIESSEN' , self . db_session ) else : self . _update_card ( 'RAIN_THIESSEN' , '' ) self . project_manager . deleteCard ( 'RAIN_INV_DISTANCE' , self . db_session )
Adds a precip file to project with interpolation_type
4,528
def prepare_gag_lsm ( self , lsm_precip_data_var , lsm_precip_type , interpolation_type = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) for unif_precip_card in self . UNIFORM_PRECIP_CARDS : self . project_manager . deleteCard ( unif_precip_card , self . db_session ) with tmp_chdir ( self . project_manager . project_directory ) : out_gage_file = '{0}.gag' . format ( self . project_manager . name ) self . l2g . lsm_precip_to_gssha_precip_gage ( out_gage_file , lsm_data_var = lsm_precip_data_var , precip_type = lsm_precip_type ) self . _update_simulation_end_from_lsm ( ) self . set_simulation_duration ( self . simulation_end - self . simulation_start ) self . add_precip_file ( out_gage_file , interpolation_type ) self . l2g . xd . close ( )
Prepares Gage output for GSSHA simulation
4,529
def prepare_rapid_streamflow ( self , path_to_rapid_qout , connection_list_file ) : ihg_filename = '{0}.ihg' . format ( self . project_manager . name ) with tmp_chdir ( self . project_manager . project_directory ) : time_index_range = [ ] with RAPIDDataset ( path_to_rapid_qout , out_tzinfo = self . tz ) as qout_nc : time_index_range = qout_nc . get_time_index_range ( date_search_start = self . simulation_start , date_search_end = self . simulation_end ) if len ( time_index_range ) > 0 : time_array = qout_nc . get_time_array ( return_datetime = True , time_index_array = time_index_range ) if self . simulation_start is not None : if self . simulation_start == time_array [ 0 ] : log . warning ( "First timestep of streamflow skipped " "in order for GSSHA to capture the streamflow." ) time_index_range = time_index_range [ 1 : ] time_array = time_array [ 1 : ] if len ( time_index_range ) > 0 : start_datetime = time_array [ 0 ] if self . simulation_start is None : self . _update_simulation_start ( start_datetime ) if self . simulation_end is None : self . simulation_end = time_array [ - 1 ] qout_nc . write_flows_to_gssha_time_series_ihg ( ihg_filename , connection_list_file , date_search_start = start_datetime , date_search_end = self . simulation_end , ) else : log . warning ( "No streamflow values found in time range ..." ) if len ( time_index_range ) > 0 : self . _update_simulation_start_cards ( ) self . _update_card ( "END_TIME" , self . simulation_end . strftime ( "%Y %m %d %H %M" ) ) self . _update_card ( "CHAN_POINT_INPUT" , ihg_filename , True ) self . set_simulation_duration ( self . simulation_end - self . simulation_start ) self . _update_gmt ( ) else : os . remove ( ihg_filename ) self . project_manager . deleteCard ( 'CHAN_POINT_INPUT' , self . db_session )
Prepares RAPID streamflow for GSSHA simulation
4,530
def add_uniform_precip_event ( self , intensity , duration ) : self . project_manager . setCard ( 'PRECIP_UNIF' , '' ) self . project_manager . setCard ( 'RAIN_INTENSITY' , str ( intensity ) ) self . project_manager . setCard ( 'RAIN_DURATION' , str ( duration . total_seconds ( ) / 60.0 ) )
Add a uniform precip event
4,531
def _update_gmt ( self ) : if self . simulation_start is not None : offset_string = str ( self . simulation_start . replace ( tzinfo = self . tz ) . utcoffset ( ) . total_seconds ( ) / 3600. ) self . _update_card ( 'GMT' , offset_string )
Based on timezone and start date the GMT card is updated
4,532
def prepare_hmet_lsm ( self , lsm_data_var_map_array , hmet_ascii_output_folder = None , netcdf_file_path = None ) : if self . l2g is None : raise ValueError ( "LSM converter not loaded ..." ) with tmp_chdir ( self . project_manager . project_directory ) : self . _update_simulation_end_from_lsm ( ) if netcdf_file_path is not None : self . l2g . lsm_data_to_subset_netcdf ( netcdf_file_path , lsm_data_var_map_array ) self . _update_card ( "HMET_NETCDF" , netcdf_file_path , True ) self . project_manager . deleteCard ( 'HMET_ASCII' , self . db_session ) else : if "{0}" in hmet_ascii_output_folder and "{1}" in hmet_ascii_output_folder : hmet_ascii_output_folder = hmet_ascii_output_folder . format ( self . simulation_start . strftime ( "%Y%m%d%H%M" ) , self . simulation_end . strftime ( "%Y%m%d%H%M" ) ) self . l2g . lsm_data_to_arc_ascii ( lsm_data_var_map_array , main_output_folder = os . path . join ( self . gssha_directory , hmet_ascii_output_folder ) ) self . _update_card ( "HMET_ASCII" , os . path . join ( hmet_ascii_output_folder , 'hmet_file_list.txt' ) , True ) self . project_manager . deleteCard ( 'HMET_NETCDF' , self . db_session ) self . _update_gmt ( )
Prepares HMET data for GSSHA simulation from land surface model data .
4,533
def get_remaining_width ( sample_string , max_terminal_width = None ) : if max_terminal_width is not None : available_width = min ( terminal_width ( ) , max_terminal_width ) else : available_width = terminal_width ( ) return available_width - len ( sample_string )
Returns the number of characters available if sample string were to be printed in the terminal .
4,534
def _define_csbi ( ) : if _WindowsCSBI . CSBI is not None : return class COORD ( ctypes . Structure ) : _fields_ = [ ( 'X' , ctypes . c_short ) , ( 'Y' , ctypes . c_short ) ] class SmallRECT ( ctypes . Structure ) : _fields_ = [ ( 'Left' , ctypes . c_short ) , ( 'Top' , ctypes . c_short ) , ( 'Right' , ctypes . c_short ) , ( 'Bottom' , ctypes . c_short ) ] class ConsoleScreenBufferInfo ( ctypes . Structure ) : _fields_ = [ ( 'dwSize' , COORD ) , ( 'dwCursorPosition' , COORD ) , ( 'wAttributes' , ctypes . wintypes . WORD ) , ( 'srWindow' , SmallRECT ) , ( 'dwMaximumWindowSize' , COORD ) ] _WindowsCSBI . CSBI = ConsoleScreenBufferInfo
Defines structs and populates _WindowsCSBI . CSBI .
4,535
def initialize ( ) : _WindowsCSBI . _define_csbi ( ) _WindowsCSBI . HANDLE_STDERR = _WindowsCSBI . HANDLE_STDERR or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 12 ) _WindowsCSBI . HANDLE_STDOUT = _WindowsCSBI . HANDLE_STDOUT or _WindowsCSBI . WINDLL . kernel32 . GetStdHandle ( - 11 ) if _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes : return _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . argtypes = [ ctypes . wintypes . DWORD ] _WindowsCSBI . WINDLL . kernel32 . GetStdHandle . restype = ctypes . wintypes . HANDLE _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . restype = ctypes . wintypes . BOOL _WindowsCSBI . WINDLL . kernel32 . GetConsoleScreenBufferInfo . argtypes = [ ctypes . wintypes . HANDLE , ctypes . POINTER ( _WindowsCSBI . CSBI ) ]
Initializes the WINDLL resource and populated the CSBI class variable .
4,536
def stencil ( ** kwargs ) : alnfile = kwargs . get ( 'alnfile' ) gtypefile = kwargs . get ( 'gtypefile' ) grpfile = kwargs . get ( 'grpfile' ) if grpfile is None : grpfile2chk = os . path . join ( DATA_DIR , 'ref.gene2transcripts.tsv' ) if os . path . exists ( grpfile2chk ) : grpfile = grpfile2chk else : print >> sys . stderr , '[gbrs::stencil] A group file is *not* given. Genotype will be stenciled as is.' alnmat = emase . AlignmentPropertyMatrix ( h5file = alnfile , grpfile = grpfile ) hid = dict ( zip ( alnmat . hname , np . arange ( alnmat . num_haplotypes ) ) ) gid = dict ( zip ( alnmat . gname , np . arange ( len ( alnmat . gname ) ) ) ) gtmask = np . zeros ( ( alnmat . num_haplotypes , alnmat . num_loci ) ) gtcall_g = dict . fromkeys ( alnmat . gname ) with open ( gtypefile ) as fh : if grpfile is not None : gtcall_t = dict . fromkeys ( alnmat . lname ) for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) tid2set = np . array ( alnmat . groups [ gid [ g ] ] ) gtmask [ np . meshgrid ( hid2set , tid2set ) ] = 1.0 for t in tid2set : gtcall_t [ alnmat . lname [ t ] ] = gt else : for curline in dropwhile ( is_comment , fh ) : item = curline . rstrip ( ) . split ( "\t" ) g , gt = item [ : 2 ] gtcall_g [ g ] = gt hid2set = np . array ( [ hid [ c ] for c in gt ] ) gtmask [ np . meshgrid ( hid2set , gid [ g ] ) ] = 1.0 alnmat . multiply ( gtmask , axis = 2 ) for h in xrange ( alnmat . num_haplotypes ) : alnmat . data [ h ] . eliminate_zeros ( ) outfile = kwargs . get ( 'outfile' ) if outfile is None : outfile = 'gbrs.stenciled.' + os . path . basename ( alnfile ) alnmat . save ( h5file = outfile )
Applying genotype calls to multi - way alignment incidence matrix
4,537
def register_items ( self , items ) : for item in items : item . set_parent ( self ) self . items . extend ( items )
Bulk register_item .
4,538
def endpoints ( self ) : children = [ item . endpoints ( ) for item in self . items ] return self . name , self . endpoint , children
Get all the endpoints under this node in a tree like structure .
4,539
def absolute_name ( self ) : if self . is_root ( ) or self . parent . is_root ( ) : return utils . slugify ( self . name ) return ':' . join ( [ self . parent . absolute_name , utils . slugify ( self . name ) ] )
Get the absolute name of self .
4,540
def absolute_url ( self ) : if self . is_root ( ) : return utils . concat_urls ( self . url ) return utils . concat_urls ( self . parent . absolute_url , self . url )
Get the absolute url of self .
4,541
def split_tracks ( lat , lon , * args ) : tracks = [ ] lt , ln = [ lat [ 0 ] ] , [ lon [ 0 ] ] zz = [ [ z [ 0 ] ] for z in args ] for i in range ( 1 , len ( lon ) ) : lt . append ( lat [ i ] ) for z , a in zip ( zz , args ) : z . append ( a [ i ] ) d1 = abs ( lon [ i ] - lon [ i - 1 ] ) d2 = abs ( ( lon [ i - 1 ] + 360 ) - lon [ i ] ) d3 = abs ( lon [ i - 1 ] - ( lon [ i ] + 360 ) ) if d2 < d1 : ln . append ( lon [ i ] - 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] + 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] ] for z in args ] elif d3 < d1 : ln . append ( lon [ i ] + 360 ) tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) lt = [ lat [ i - 1 ] , lat [ i ] ] ln = [ lon [ i - 1 ] - 360 , lon [ i ] ] zz = [ [ z [ i - 1 ] , z [ i ] ] for z in args ] else : ln . append ( lon [ i ] ) if len ( lt ) : tracks . append ( [ np . array ( lt ) , np . array ( ln ) ] + [ np . array ( z ) for z in zz ] ) return tracks
assumes eastward motion
4,542
def str_rate ( self ) : if not self . _eta . started or self . _eta . stalled or not self . rate : return '--.-KiB/s' unit_rate , unit = UnitByte ( self . _eta . rate_overall if self . done else self . rate ) . auto if unit_rate >= 100 : formatter = '%d' elif unit_rate >= 10 : formatter = '%.1f' else : formatter = '%.2f' return '{0}{1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
Returns the rate with formatting . If done returns the overall rate instead .
4,543
def str_rate ( self ) : if not self . _eta . started or self . _eta . stalled or not self . rate : return '--- KiB/s' unit_rate , unit = UnitByte ( self . rate ) . auto_no_thousands if unit_rate >= 10 : formatter = '%d' else : formatter = '%0.1f' return '{0} {1}/s' . format ( locale . format ( formatter , unit_rate , grouping = False ) , unit )
Returns the rate with formatting .
4,544
def init_db ( sqlalchemy_url ) : engine = create_engine ( sqlalchemy_url ) start = time . time ( ) metadata . create_all ( engine ) return time . time ( ) - start
Initialize database with gsshapy tables
4,545
def get_sessionmaker ( sqlalchemy_url , engine = None ) : if engine is None : engine = create_engine ( sqlalchemy_url ) return sessionmaker ( bind = engine )
Create session with database to work in
4,546
def get_project_session ( project_name , project_directory , map_type = None ) : sqlalchemy_url , sql_engine = init_sqlite_memory ( ) gdb_sessionmaker = get_sessionmaker ( sqlalchemy_url , sql_engine ) project_manager = ProjectFile ( name = project_name , project_directory = project_directory , map_type = map_type ) return project_manager , gdb_sessionmaker
Load project manager and in memory sqlite db sessionmaker for GSSHA project
4,547
def get_settings ( config_uri , section = None , defaults = None ) : loader = get_loader ( config_uri ) return loader . get_settings ( section , defaults )
Load the settings from a named section .
4,548
def find_loaders ( scheme , protocols = None ) : matching_groups = [ 'plaster.loader_factory' ] if protocols : matching_groups += [ 'plaster.{0}_loader_factory' . format ( proto ) for proto in protocols ] scheme = scheme . lower ( ) parts = scheme . split ( '+' , 1 ) if len ( parts ) == 2 : try : distro = pkg_resources . get_distribution ( parts [ 0 ] ) except pkg_resources . DistributionNotFound : pass else : ep = _find_ep_in_dist ( distro , parts [ 1 ] , matching_groups ) if ep : return [ EntryPointLoaderInfo ( ep , protocols ) ] possible_entry_points = [ ep for ep in pkg_resources . iter_entry_points ( 'plaster.loader_factory' ) if scheme is None or scheme == ep . name . lower ( ) ] distros = { ep . dist for ep in possible_entry_points } matched_entry_points = list ( filter ( None , [ _find_ep_in_dist ( distro , scheme , matching_groups ) for distro in distros ] ) ) return [ EntryPointLoaderInfo ( ep , protocols = protocols ) for ep in matched_entry_points ]
Find all loaders that match the requested scheme and protocols .
4,549
def combine_dicts ( * dicts , copy = False , base = None ) : if len ( dicts ) == 1 and base is None : cd = dicts [ 0 ] . copy ( ) else : cd = { } if base is None else base for d in dicts : if d : cd . update ( d ) return { k : _copy . deepcopy ( v ) for k , v in cd . items ( ) } if copy else cd
Combines multiple dicts in one .
4,550
def kk_dict ( * kk , ** adict ) : for k in kk : if isinstance ( k , dict ) : if not set ( k ) . isdisjoint ( adict ) : raise ValueError ( 'keyword argument repeated' ) adict . update ( k ) elif k in adict : raise ValueError ( 'keyword argument repeated' ) else : adict [ k ] = k return adict
Merges and defines dictionaries with values identical to keys .
4,551
def bypass ( * inputs , copy = False ) : if len ( inputs ) == 1 : inputs = inputs [ 0 ] return _copy . deepcopy ( inputs ) if copy else inputs
Returns the same arguments .
4,552
def map_dict ( key_map , * dicts , copy = False , base = None ) : it = combine_dicts ( * dicts ) . items ( ) get = key_map . get return combine_dicts ( { get ( k , k ) : v for k , v in it } , copy = copy , base = base )
Returns a dict with new key values .
4,553
def map_list ( key_map , * inputs , copy = False , base = None ) : d = { } if base is None else base for m , v in zip ( key_map , inputs ) : if isinstance ( m , dict ) : map_dict ( m , v , base = d ) elif isinstance ( m , list ) : map_list ( m , * v , base = d ) else : d [ m ] = v return combine_dicts ( copy = copy , base = d )
Returns a new dict .
4,554
def selector ( keys , dictionary , copy = False , output_type = 'dict' , allow_miss = False ) : if not allow_miss : def check ( key ) : return True else : def check ( key ) : return key in dictionary if output_type == 'list' : res = [ dictionary [ k ] for k in keys if check ( k ) ] return _copy . deepcopy ( res ) if copy else res elif output_type == 'values' : return bypass ( * [ dictionary [ k ] for k in keys if check ( k ) ] , copy = copy ) return bypass ( { k : dictionary [ k ] for k in keys if check ( k ) } , copy = copy )
Selects the chosen dictionary keys from the given dictionary .
4,555
def replicate_value ( value , n = 2 , copy = True ) : return bypass ( * [ value ] * n , copy = copy )
Replicates n times the input value .
4,556
def stack_nested_keys ( nested_dict , key = ( ) , depth = - 1 ) : if depth != 0 and hasattr ( nested_dict , 'items' ) : for k , v in nested_dict . items ( ) : yield from stack_nested_keys ( v , key = key + ( k , ) , depth = depth - 1 ) else : yield key , nested_dict
Stacks the keys of nested - dictionaries into tuples and yields a list of k - v pairs .
4,557
def are_in_nested_dicts ( nested_dict , * keys ) : if keys : try : return are_in_nested_dicts ( nested_dict [ keys [ 0 ] ] , * keys [ 1 : ] ) except Exception : return False return True
Nested keys are inside of nested - dictionaries .
4,558
def combine_nested_dicts ( * nested_dicts , depth = - 1 , base = None ) : if base is None : base = { } for nested_dict in nested_dicts : for k , v in stack_nested_keys ( nested_dict , depth = depth ) : while k : try : get_nested_dicts ( base , * k [ : - 1 ] ) [ k [ - 1 ] ] = v break except Exception : k = k [ : - 1 ] v = get_nested_dicts ( nested_dict , * k ) return base
Merge nested - dictionaries .
4,559
def add_function ( dsp , inputs_kwargs = False , inputs_defaults = False , ** kw ) : def decorator ( f ) : dsp . add_func ( f , inputs_kwargs = inputs_kwargs , inputs_defaults = inputs_defaults , ** kw ) return f return decorator
Decorator to add a function to a dispatcher .
4,560
def blue ( self , memo = None ) : memo = { } if memo is None else memo if self not in memo : import inspect from . blue import Blueprint , _parent_blue keys = tuple ( inspect . signature ( self . __init__ ) . parameters ) memo [ self ] = Blueprint ( ** { k : _parent_blue ( v , memo ) for k , v in self . __dict__ . items ( ) if k in keys } ) . _set_cls ( self . __class__ ) return memo [ self ]
Constructs a Blueprint out of the current object .
4,561
def value_from_datadict ( self , data , files , name ) : value = super ( FileSizeWidget , self ) . value_from_datadict ( data , files , name ) if value not in EMPTY_VALUES : try : return parse_size ( value ) except ValueError : pass return value
Given a dictionary of data and this widget s name returns the value of this widget . Returns None if it s not provided .
4,562
def connect_ssh_with_cb ( ssh_cb , user , host , auth_cb , allow_new = True , verbosity = 0 ) : with connect_ssh ( user , host , auth_cb , allow_new = True , verbosity = 0 ) as ssh : ssh_cb ( ssh )
A managed SSH session . When the session is ready we ll invoke the ssh_cb callback .
4,563
def connect_sftp_with_cb ( sftp_cb , * args , ** kwargs ) : with _connect_sftp ( * args , ** kwargs ) as ( ssh , sftp ) : sftp_cb ( ssh , sftp )
A managed SFTP session . When the SSH session and an additional SFTP session are ready invoke the sftp_cb callback .
4,564
def get_key_auth_cb ( key_filepath ) : def auth_cb ( ssh ) : key = ssh_pki_import_privkey_file ( key_filepath ) ssh . userauth_publickey ( key ) return auth_cb
This is just a convenience function for key - based login .
4,565
def add_edge_fun ( graph ) : succ , pred , node = graph . _succ , graph . _pred , graph . _node def add_edge ( u , v , ** attr ) : if v not in succ : succ [ v ] , pred [ v ] , node [ v ] = { } , { } , { } succ [ u ] [ v ] = pred [ v ] [ u ] = attr return add_edge
Returns a function that adds an edge to the graph checking only the out node .
4,566
def remove_edge_fun ( graph ) : rm_edge , rm_node = graph . remove_edge , graph . remove_node from networkx import is_isolate def remove_edge ( u , v ) : rm_edge ( u , v ) if is_isolate ( graph , v ) : rm_node ( v ) return remove_edge
Returns a function that removes an edge from the graph .
4,567
def get_unused_node_id ( graph , initial_guess = 'unknown' , _format = '{}<%d>' ) : has_node = graph . has_node n = counter ( ) node_id_format = _format . format ( initial_guess ) node_id = initial_guess while has_node ( node_id ) : node_id = node_id_format % n ( ) return node_id
Finds an unused node id in graph .
4,568
def add_func_edges ( dsp , fun_id , nodes_bunch , edge_weights = None , input = True , data_nodes = None ) : add_edge = _add_edge_dmap_fun ( dsp . dmap , edge_weights ) node , add_data = dsp . dmap . nodes , dsp . add_data remove_nodes = dsp . dmap . remove_nodes_from msg = 'Invalid %sput id: {} is not a data node' % [ 'out' , 'in' ] [ input ] i , j = ( 'i' , 'o' ) if input else ( 'o' , 'i' ) data_nodes = data_nodes or [ ] for u in nodes_bunch : try : if node [ u ] [ 'type' ] != 'data' : data_nodes . append ( fun_id ) remove_nodes ( data_nodes ) raise ValueError ( msg . format ( u ) ) except KeyError : data_nodes . append ( add_data ( data_id = u ) ) add_edge ( ** { i : u , j : fun_id , 'w' : u } ) return data_nodes
Adds function node edges .
4,569
def _add_edge_dmap_fun ( graph , edges_weights = None ) : add = graph . add_edge if edges_weights is not None : def add_edge ( i , o , w ) : if w in edges_weights : add ( i , o , weight = edges_weights [ w ] ) else : add ( i , o ) else : def add_edge ( i , o , w ) : add ( i , o ) return add_edge
Adds edge to the dispatcher map .
4,570
def _get_node ( nodes , node_id , fuzzy = True ) : try : return node_id , nodes [ node_id ] except KeyError as ex : if fuzzy : it = sorted ( nodes . items ( ) ) n = next ( ( ( k , v ) for k , v in it if node_id in k ) , EMPTY ) if n is not EMPTY : return n raise ex
Returns a dispatcher node that match the given node id .
4,571
def get_full_pipe ( sol , base = ( ) ) : pipe , i = DspPipe ( ) , len ( base ) for p in sol . _pipe : n , s = p [ - 1 ] d = s . dsp p = { 'task' : p } if n in s . _errors : p [ 'error' ] = s . _errors [ n ] node_id = s . full_name + ( n , ) assert base == node_id [ : i ] , '%s != %s' % ( node_id [ : i ] , base ) n_id = node_id [ i : ] n , path = d . get_node ( n , node_attr = None ) if n [ 'type' ] == 'function' and 'function' in n : try : sub_sol = s . workflow . node [ path [ - 1 ] ] [ 'solution' ] sp = get_full_pipe ( sub_sol , base = node_id ) if sp : p [ 'sub_pipe' ] = sp except KeyError : pass pipe [ bypass ( * n_id ) ] = p return pipe
Returns the full pipe of a dispatch run .
4,572
def connectChunk ( key , chunk ) : schunk = chunk [ 0 ] . strip ( ) . split ( ) result = { 'slinkNumber' : schunk [ 1 ] , 'upSjunc' : schunk [ 2 ] , 'downSjunc' : schunk [ 3 ] } return result
Parse Storm Pipe CONNECT Chunk Method
4,573
def get_items ( self , page = 1 , order_by = None , filters = None ) : start = ( page - 1 ) * self . per_page query = self . get_query ( ) if order_by is not None : query = query . order_by ( self . _get_field ( order_by ) ) if filters is not None : query = self . _filter ( query , filters ) return query . offset ( start ) . limit ( self . per_page ) , self . count ( query )
Fetch database for items matching .
4,574
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . name = name self . fileExtension = extension with open ( path , 'r' ) as f : self . text = f . read ( )
Generic File Read from File Method
4,575
def isdisjoint ( self , other ) : r if isinstance ( other , _sequence_types + ( BaseMultiset , ) ) : pass elif not isinstance ( other , Container ) : other = self . _as_multiset ( other ) return all ( element not in other for element in self . _elements . keys ( ) )
r Return True if the set has no elements in common with other .
4,576
def difference ( self , * others ) : r result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : if element in _elements : old_multiplicity = _elements [ element ] new_multiplicity = old_multiplicity - multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity else : del _elements [ element ] _total -= old_multiplicity result . _total = _total return result
r Return a new multiset with all elements from the others removed .
4,577
def union ( self , * others ) : r result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity result . _total = _total return result
r Return a new multiset with all elements from the multiset and the others with maximal multiplicities .
4,578
def intersection ( self , * others ) : r result = self . __copy__ ( ) _elements = result . _elements _total = result . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in list ( _elements . items ( ) ) : new_multiplicity = other . get ( element , 0 ) if new_multiplicity < multiplicity : if new_multiplicity > 0 : _elements [ element ] = new_multiplicity _total -= multiplicity - new_multiplicity else : del _elements [ element ] _total -= multiplicity result . _total = _total return result
r Return a new multiset with elements common to the multiset and all others .
4,579
def symmetric_difference ( self , other ) : r other = self . _as_multiset ( other ) result = self . __class__ ( ) _total = 0 _elements = result . _elements self_elements = self . _elements other_elements = other . _elements dist_elements = set ( self_elements . keys ( ) ) | set ( other_elements . keys ( ) ) for element in dist_elements : multiplicity = self_elements . get ( element , 0 ) other_multiplicity = other_elements . get ( element , 0 ) new_multiplicity = ( multiplicity - other_multiplicity if multiplicity > other_multiplicity else other_multiplicity - multiplicity ) _total += new_multiplicity if new_multiplicity > 0 : _elements [ element ] = new_multiplicity result . _total = _total return result
r Return a new set with elements in either the set or other but not both .
4,580
def times ( self , factor ) : if factor == 0 : return self . __class__ ( ) if factor < 0 : raise ValueError ( 'The factor must no be negative.' ) result = self . __copy__ ( ) _elements = result . _elements for element in _elements : _elements [ element ] *= factor result . _total *= factor return result
Return a new set with each element s multiplicity multiplied with the given scalar factor .
4,581
def union_update ( self , * others ) : r _elements = self . _elements _total = self . _total for other in map ( self . _as_mapping , others ) : for element , multiplicity in other . items ( ) : old_multiplicity = _elements . get ( element , 0 ) if multiplicity > old_multiplicity : _elements [ element ] = multiplicity _total += multiplicity - old_multiplicity self . _total = _total
r Update the multiset adding elements from all others using the maximum multiplicity .
4,582
def intersection_update ( self , * others ) : r for other in map ( self . _as_mapping , others ) : for element , current_count in list ( self . items ( ) ) : multiplicity = other . get ( element , 0 ) if multiplicity < current_count : self [ element ] = multiplicity
r Update the multiset keeping only elements found in it and all others .
4,583
def difference_update ( self , * others ) : r for other in map ( self . _as_multiset , others ) : for element , multiplicity in other . items ( ) : self . discard ( element , multiplicity )
r Remove all elements contained the others from this multiset .
4,584
def symmetric_difference_update ( self , other ) : r other = self . _as_multiset ( other ) elements = set ( self . distinct_elements ( ) ) | set ( other . distinct_elements ( ) ) for element in elements : multiplicity = self [ element ] other_count = other [ element ] self [ element ] = ( multiplicity - other_count if multiplicity > other_count else other_count - multiplicity )
r Update the multiset to contain only elements in either this multiset or the other but not both .
4,585
def times_update ( self , factor ) : if factor < 0 : raise ValueError ( "The factor must not be negative." ) elif factor == 0 : self . clear ( ) else : _elements = self . _elements for element in _elements : _elements [ element ] *= factor self . _total *= factor
Update each this multiset by multiplying each element s multiplicity with the given scalar factor .
4,586
def add ( self , element , multiplicity = 1 ) : if multiplicity < 1 : raise ValueError ( "Multiplicity must be positive" ) self . _elements [ element ] += multiplicity self . _total += multiplicity
Adds an element to the multiset .
4,587
def remove ( self , element , multiplicity = None ) : _elements = self . _elements if element not in _elements : raise KeyError old_multiplicity = _elements . get ( element , 0 ) if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must be not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity
Removes an element from the multiset .
4,588
def discard ( self , element , multiplicity = None ) : _elements = self . _elements if element in _elements : old_multiplicity = _elements [ element ] if multiplicity is None or multiplicity >= old_multiplicity : del _elements [ element ] self . _total -= old_multiplicity elif multiplicity < 0 : raise ValueError ( "Multiplicity must not be negative" ) elif multiplicity > 0 : _elements [ element ] -= multiplicity self . _total -= multiplicity return old_multiplicity else : return 0
Removes the element from the multiset .
4,589
def shutdown_executors ( wait = True ) : return { k : shutdown_executor ( k , wait ) for k in list ( _EXECUTORS . keys ( ) ) }
Clean - up the resources of all initialized executors .
4,590
def async_thread ( sol , args , node_attr , node_id , * a , ** kw ) : executor = _get_executor ( _executor_name ( kw . get ( 'executor' , False ) , sol . dsp ) ) if not executor : return sol . _evaluate_node ( args , node_attr , node_id , * a , ** kw ) futures = args if node_attr [ 'type' ] == 'data' and ( node_attr [ 'wait_inputs' ] or 'function' in node_attr ) : futures = args [ 0 ] . values ( ) from concurrent . futures import Future futures = { v for v in futures if isinstance ( v , Future ) } def _submit ( ) : return executor . thread ( _async_eval , sol , args , node_attr , node_id , * a , ** kw ) if futures : result = Future ( ) def _set_res ( fut ) : try : result . set_result ( fut . result ( ) ) except BaseException as ex : result . set_exception ( ex ) def _submit_task ( fut = None ) : futures . discard ( fut ) not futures and _submit ( ) . add_done_callback ( _set_res ) for f in list ( futures ) : f . add_done_callback ( _submit_task ) else : result = _submit ( ) timeout = node_attr . get ( 'await_result' , False ) if timeout is not False : return _await_result ( result , timeout , sol , node_id ) n = len ( node_attr . get ( 'outputs' , [ ] ) ) return AsyncList ( future = result , n = n ) if n > 1 else result
Execute sol . _evaluate_node in an asynchronous thread .
4,591
def await_result ( obj , timeout = None ) : from concurrent . futures import Future return obj . result ( timeout ) if isinstance ( obj , Future ) else obj
Return the result of a Future object .
4,592
def pivot ( table , left , top , value ) : rs = { } ysort = [ ] xsort = [ ] for row in table : yaxis = tuple ( [ row [ c ] for c in left ] ) if yaxis not in ysort : ysort . append ( yaxis ) xaxis = tuple ( [ row [ c ] for c in top ] ) if xaxis not in xsort : xsort . append ( xaxis ) try : rs [ yaxis ] except KeyError : rs [ yaxis ] = { } if xaxis not in rs [ yaxis ] : rs [ yaxis ] [ xaxis ] = 0 rs [ yaxis ] [ xaxis ] += row [ value ] for key in rs : if len ( rs [ key ] ) - len ( xsort ) : for var in xsort : if var not in rs [ key ] . keys ( ) : rs [ key ] [ var ] = '' headings = list ( left ) headings . extend ( xsort ) t = [ ] for left in ysort : row = list ( left ) sortedkeys = sorted ( rs [ left ] . keys ( ) ) sortedvalues = map ( rs [ left ] . get , sortedkeys ) row . extend ( sortedvalues ) t . append ( dict ( zip ( headings , row ) ) ) return t
Creates a cross - tab or pivot table from a normalised input table . Use this function to denormalize a table of normalized records .
4,593
def download_hrrr_for_gssha ( main_directory , forecast_start_date_string , forecast_start_hour_string , leftlon = - 180 , rightlon = 180 , toplat = 90 , bottomlat = - 90 ) : out_directory = path . join ( main_directory , forecast_start_date_string ) try : mkdir ( out_directory ) except OSError : pass forecast_timestep_hour_string_array = [ '00' , '01' , '02' , '03' , '04' , '05' , '06' , '07' , '08' , '09' , '10' , '11' , '12' , '13' , '14' , '15' , '16' , '17' , '18' ] downloaded_file_list = [ ] for forecast_timestep_hour_string in forecast_timestep_hour_string_array : file_name = 'hrrr.t{0}z.wrfsfcf{1}.grib2' . format ( forecast_start_hour_string , forecast_timestep_hour_string ) payload = { 'file' : file_name , 'lev_10_m_above_ground' : 'on' , 'lev_2_m_above_ground' : 'on' , 'lev_entire_atmosphere' : 'on' , 'lev_surface' : 'on' , 'var_DSWRF' : 'on' , 'var_PRATE' : 'on' , 'var_PRES' : 'on' , 'var_RH' : 'on' , 'var_TMP' : 'on' , 'var_UGRD' : 'on' , 'var_VGRD' : 'on' , 'var_TCDC' : 'on' , 'subregion' : '' , 'leftlon' : str ( leftlon ) , 'rightlon' : str ( rightlon ) , 'toplat' : str ( toplat ) , 'bottomlat' : str ( bottomlat ) , 'dir' : '/hrrr.{0}' . format ( forecast_start_date_string ) , } r = requests . get ( 'http://nomads.ncep.noaa.gov/cgi-bin/filter_hrrr_2d.pl' , params = payload , stream = True ) if r . status_code == requests . codes . ok : out_file = path . join ( out_directory , file_name ) downloaded_file_list . append ( out_file ) with open ( out_file , 'wb' ) as fd : for chunk in r . iter_content ( chunk_size = 1024 ) : fd . write ( chunk ) else : log . error ( "Problem downloading {0}" . format ( file_name ) ) for filename in downloaded_file_list : try : remove ( filename ) except OSError : pass downloaded_file_list = [ ] break return downloaded_file_list
Function to download HRRR data for GSSHA
4,594
def _patch_resource ( self , method ) : resource = self . client . get_resource ( "" , self . resource . path , method ) if not resource : raise UnsupportedResourceMethodError ( self . resource . path , method ) self . resource = resource
Patch the current RAML ResourceNode by the resource with the correct method if it exists
4,595
def parse_raml ( self ) : if utils . is_url ( self . ramlfile ) : raml = utils . download_file ( self . ramlfile ) else : with codecs . open ( self . ramlfile , "rb" , encoding = "utf-8" ) as raml_f : raml = raml_f . read ( ) loader = ramlfications . loads ( raml ) config = ramlfications . setup_config ( self . ramlconfig ) self . raml = ramlfications . parse_raml ( loader , config )
Parse RAML file
4,596
def get_resource ( self , base_resource_path , resource_path , method = None ) : basic_path = base_resource_path + resource_path dynamic_path = base_resource_path + "{" + resource_path + "}" for resource in self . raml . resources : method_matched = method is None or resource . method == method if method_matched and ( resource . path == basic_path or resource . path == basic_path + '/' ) : return resource if resource . path == dynamic_path and method_matched : return NodeParameter ( resource = resource , parameter = resource_path ) return None
Gets a resource by it s path and optional by it s method
4,597
def auto_no_thousands ( self ) : if self . _value >= 1000000000000 : return self . TiB , 'TiB' if self . _value >= 1000000000 : return self . GiB , 'GiB' if self . _value >= 1000000 : return self . MiB , 'MiB' if self . _value >= 1000 : return self . KiB , 'KiB' else : return self . B , 'B'
Like self . auto but calculates the next unit if > 999 . 99 .
4,598
def error ( message , code = 1 ) : if message : print ( 'ERROR: {0}' . format ( message ) , file = sys . stderr ) else : print ( file = sys . stderr ) sys . exit ( code )
Prints an error message to stderr and exits with a status of 1 by default .
4,599
def update_hmet_card_file ( hmet_card_file_path , new_hmet_data_path ) : hmet_card_file_path_temp = "{0}_tmp" . format ( hmet_card_file_path ) try : remove ( hmet_card_file_path_temp ) except OSError : pass copy ( hmet_card_file_path , hmet_card_file_path_temp ) with io_open ( hmet_card_file_path_temp , 'w' , newline = '\r\n' ) as out_hmet_list_file : with open ( hmet_card_file_path ) as old_hmet_list_file : for date_path in old_hmet_list_file : out_hmet_list_file . write ( u"{0}\n" . format ( path . join ( new_hmet_data_path , path . basename ( date_path ) ) ) ) try : remove ( hmet_card_file_path ) except OSError : pass rename ( hmet_card_file_path_temp , hmet_card_file_path )
This function updates the paths in the HMET card file to the new location of the HMET data . This is necessary because the file paths are absolute and will need to be updated if moved .