idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
4,700
def datasetHeaderChunk ( key , lines ) : KEYWORDS = ( 'DATASET' , 'OBJTYPE' , 'VECTYPE' , 'BEGSCL' , 'BEGVEC' , 'OBJID' , 'ND' , 'NC' , 'NAME' ) TYPE_KEYS = ( 'BEGSCL' , 'BEGVEC' ) result = { 'type' : None , 'numberData' : None , 'numberCells' : None , 'name' : None , 'objectID' : None , 'objectType' : None , 'vectorType' : None } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = pt . splitLine ( chunk [ 0 ] ) if key == 'ND' : result [ 'numberData' ] = int ( schunk [ 1 ] ) elif key == 'NC' : result [ 'numberCells' ] = int ( schunk [ 1 ] ) elif key == 'NAME' : result [ 'name' ] = schunk [ 1 ] elif key == 'OBJID' : result [ 'objectID' ] = int ( schunk [ 1 ] ) elif key == 'OBJTYPE' : result [ 'objectType' ] = schunk [ 1 ] elif key == 'VECTYPE' : result [ 'vectorType' ] = schunk [ 1 ] elif key in TYPE_KEYS : result [ 'type' ] = schunk [ 0 ] return result
Process the dataset header
4,701
def datasetScalarTimeStepChunk ( lines , numberColumns , numberCells ) : END_DATASET_TAG = 'ENDDS' result = { 'iStatus' : None , 'timestamp' : None , 'cellArray' : None , 'rasterText' : None } timeStep = pt . splitLine ( lines . pop ( 0 ) ) startCellsIndex = numberCells iStatus = int ( timeStep [ 1 ] ) if iStatus == 0 : startCellsIndex = 0 if END_DATASET_TAG in lines [ - 1 ] : lines . pop ( - 1 ) arrayString = '[[' columnCounter = 1 lenLines = len ( lines ) - 1 rasterText = '' for index in range ( startCellsIndex , len ( lines ) ) : if columnCounter % numberColumns != 0 and index != lenLines : arrayString += lines [ index ] . strip ( ) + ', ' elif columnCounter % numberColumns == 0 and index != lenLines : arrayString += lines [ index ] . strip ( ) + '], [' elif index == lenLines : arrayString += lines [ index ] . strip ( ) + ']]' columnCounter += 1 rasterText += lines [ index ] result [ 'cellArray' ] = arrayString result [ 'rasterText' ] = rasterText result [ 'iStatus' ] = iStatus result [ 'timestamp' ] = float ( timeStep [ 2 ] ) return result
Process the time step chunks for scalar datasets
4,702
def save_dispatcher ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp , f )
Write Dispatcher object in Python pickle format .
4,703
def save_default_values ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp . default_values , f )
Write Dispatcher default values in Python pickle format .
4,704
def load_default_values ( dsp , path ) : import dill with open ( path , 'rb' ) as f : dsp . __init__ ( dmap = dsp . dmap , default_values = dill . load ( f ) )
Load Dispatcher default values in Python pickle format .
4,705
def save_map ( dsp , path ) : import dill with open ( path , 'wb' ) as f : dill . dump ( dsp . dmap , f )
Write Dispatcher graph object in Python pickle format .
4,706
def chunk ( keywords , lines ) : chunks = dict ( ) chunk = [ ] for keyword in keywords : chunks [ keyword ] = [ ] for line in lines : if line . strip ( ) : token = line . split ( ) [ 0 ] if token in keywords : chunk = [ line ] chunks [ token ] . append ( chunk ) else : chunk . append ( line ) return chunks
Divide a file into chunks between key words in the list
4,707
def valueReadPreprocessor ( valueString , replaceParamsFile = None ) : if type ( valueString ) is bool : log . warning ( "Only numerical variable types can be handled by the valueReadPreprocessor function." ) return valueString processedValue = valueString if replaceParamsFile is not None and valueString is not None : if '[' in valueString or ']' in valueString : processedValue = '{0}' . format ( REPLACE_NO_VALUE ) for targetParam in replaceParamsFile . targetParameters : if targetParam . targetVariable == valueString : processedValue = '{0}' . format ( - 1 * targetParam . id ) break return processedValue
Apply global pre - processing to values during reading throughout the project .
4,708
def valueWritePreprocessor ( valueString , replaceParamsFile = None ) : if type ( valueString ) is bool : log . warning ( "Only numerical variable types can be handled by the valueReadPreprocessor function." ) return valueString variableString = valueString if replaceParamsFile is not None : if variableString == REPLACE_NO_VALUE : variableString = '[NO_VARIABLE]' else : try : number = int ( valueString ) if number < 0 : parameterID = number * - 1 for targetParam in replaceParamsFile . targetParameters : if targetParam . id == parameterID : variableString = targetParam . targetVariable break except : pass return variableString
Look up variable name in replace param file for the negative id given and return it .
4,709
def run ( self , dataset_path ) : features = self . _generate_features ( self . _feature_extractors ) features . to_csv ( dataset_path )
Run all FeatureExtractors and output results to CSV .
4,710
def _generate_features ( self , feature_extractors ) : results = [ pd . DataFrame ( ) ] n_ext = len ( feature_extractors ) for i , extractor in enumerate ( feature_extractors ) : log . info ( "generating: '%s' (%d/%d)" , extractor . name , i + 1 , n_ext ) cached_extractor = self . _cache [ extractor . name ] if extractor . same ( cached_extractor ) : log . info ( 'pulling from cache' ) extractor = cached_extractor else : log . info ( 'running...' ) extractor . extract ( ) results . append ( extractor . result ) if self . cache_path : self . _cache [ extractor . name ] = extractor if self . cache_path : with open ( self . cache_path , 'wb' ) as f : pickle . dump ( self . _cache , f ) return pd . concat ( results , axis = 1 )
Run all FeatureExtractors and record results in a key - value format .
4,711
def read ( self , directory , filename , session , spatial = False , spatialReferenceID = 4236 , replaceParamFile = None , ** kwargs ) : path = os . path . join ( directory , filename ) filename_split = filename . split ( '.' ) name = filename_split [ 0 ] extension = '' if len ( filename_split ) >= 2 : extension = filename_split [ - 1 ] if os . path . isfile ( path ) : session . add ( self ) self . _read ( directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile , ** kwargs ) self . _commit ( session , self . COMMIT_ERROR_MESSAGE ) else : session . rollback ( ) log . warning ( 'Could not find file named {0}. File not read.' . format ( filename ) )
Generic read file into database method .
4,712
def write ( self , session , directory , name , replaceParamFile = None , ** kwargs ) : name_split = name . split ( '.' ) name = name_split [ 0 ] extension = '' if len ( name_split ) >= 2 : extension = name_split [ - 1 ] try : name = self . _namePreprocessor ( name ) except : 'DO NOTHING' if extension == '' : filename = '{0}.{1}' . format ( name , self . fileExtension ) else : filename = '{0}.{1}' . format ( name , extension ) filePath = os . path . join ( directory , filename ) with io_open ( filePath , 'w' ) as openFile : self . _write ( session = session , openFile = openFile , replaceParamFile = replaceParamFile , ** kwargs )
Write from database back to file .
4,713
def _commit ( self , session , errorMessage ) : try : session . commit ( ) except IntegrityError : log . error ( 'Commit to database failed. %s' % errorMessage ) except : raise
Custom commit function for file objects
4,714
def run ( self ) : print ( " creating %s" % versionfile_source ) with open ( versionfile_source , "w" ) as f : f . write ( get_vcs_code ( ) ) ipy = os . path . join ( os . path . dirname ( versionfile_source ) , "__init__.py" ) try : with open ( ipy , "r" ) as f : old = f . read ( ) except EnvironmentError : old = "" if INIT_PY_SNIPPET not in old : print ( " appending to %s" % ipy ) with open ( ipy , "a" ) as f : f . write ( INIT_PY_SNIPPET ) else : print ( " %s unmodified" % ipy ) manifest_in = os . path . join ( get_root ( ) , "MANIFEST.in" ) simple_includes = set ( ) try : with open ( manifest_in , "r" ) as f : for line in f : if line . startswith ( "include " ) : for include in line . split ( ) [ 1 : ] : simple_includes . add ( include ) except EnvironmentError : pass if "versioneer.py" not in simple_includes : print ( " appending 'versioneer.py' to MANIFEST.in" ) with open ( manifest_in , "a" ) as f : f . write ( "include versioneer.py\n" ) else : print ( " 'versioneer.py' already in MANIFEST.in" ) if versionfile_source not in simple_includes : print ( " appending versionfile_source ('%s') to MANIFEST.in" % versionfile_source ) with open ( manifest_in , "a" ) as f : f . write ( "include %s\n" % versionfile_source ) else : print ( " versionfile_source already in MANIFEST.in" ) do_vcs_install_f = getattr ( sys . modules [ __name__ ] , VCS + '_do_vcs_install' ) do_vcs_install_f ( manifest_in , versionfile_source , ipy )
Create the versioneer . py file .
4,715
def linkChunk ( key , chunk ) : linkType = chunk [ 1 ] . strip ( ) . split ( ) [ 0 ] if linkType == 'DX' : result = xSectionLink ( chunk ) elif linkType == 'STRUCTURE' : result = structureLink ( chunk ) elif linkType in ( 'RESERVOIR' , 'LAKE' ) : result = reservoirLink ( chunk ) return result
Parse LINK Chunk Method
4,716
def structureLink ( lines ) : KEYWORDS = ( 'LINK' , 'STRUCTURE' , 'NUMSTRUCTS' , 'STRUCTTYPE' ) WEIR_KEYWORDS = ( 'STRUCTTYPE' , 'CREST_LENGTH' , 'CREST_LOW_ELEV' , 'DISCHARGE_COEFF_FORWARD' , 'DISCHARGE_COEFF_REVERSE' , 'CREST_LOW_LOC' , 'STEEP_SLOPE' , 'SHALLOW_SLOPE' ) CULVERT_KEYWORDS = ( 'STRUCTTYPE' , 'UPINVERT' , 'DOWNINVERT' , 'INLET_DISCH_COEFF' , 'REV_FLOW_DISCH_COEFF' , 'SLOPE' , 'LENGTH' , 'ROUGH_COEFF' , 'DIAMETER' , 'WIDTH' , 'HEIGHT' ) WEIRS = ( 'WEIR' , 'SAG_WEIR' ) CULVERTS = ( 'ROUND_CULVERT' , 'RECT_CULVERT' ) CURVES = ( 'RATING_CURVE' , 'SCHEDULED_RELEASE' , 'RULE_CURVE' ) result = { 'type' : 'STRUCTURE' , 'header' : { 'link' : None , 'numstructs' : None } , 'structures' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : if key == 'STRUCTTYPE' : structType = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] if structType in WEIRS : weirResult = { 'structtype' : None , 'crest_length' : None , 'crest_low_elev' : None , 'discharge_coeff_forward' : None , 'discharge_coeff_reverse' : None , 'crest_low_loc' : None , 'steep_slope' : None , 'shallow_slope' : None } result [ 'structures' ] . append ( structureChunk ( WEIR_KEYWORDS , weirResult , chunk ) ) elif structType in CULVERTS : culvertResult = { 'structtype' : None , 'upinvert' : None , 'downinvert' : None , 'inlet_disch_coeff' : None , 'rev_flow_disch_coeff' : None , 'slope' : None , 'length' : None , 'rough_coeff' : None , 'diameter' : None , 'width' : None , 'height' : None } result [ 'structures' ] . append ( structureChunk ( CULVERT_KEYWORDS , culvertResult , chunk ) ) elif structType in CURVES : pass elif key != 'STRUCTURE' : result [ 'header' ] [ key . lower ( ) ] = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] return result
Parse STRUCTURE LINK Method
4,717
def xSectionLink ( lines ) : KEYWORDS = ( 'LINK' , 'DX' , 'TRAPEZOID' , 'TRAPEZOID_ERODE' , 'TRAPEZOID_SUBSURFACE' , 'ERODE_TRAPEZOID' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_TRAPEZOID' , 'SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'ERODE_TRAPEZOID_SUBSURFACE' , 'ERODE_SUBSURFACE_TRAPEZOID' , 'SUBSURFACE_TRAPEZOID_ERODE' , 'SUBSURFACE_ERODE_TRAPEZOID' , 'BREAKPOINT' , 'BREAKPOINT_ERODE' , 'BREAKPOINT_SUBSURFACE' , 'ERODE_BREAKPOINT' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_BREAKPOINT' , 'SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'ERODE_BREAKPOINT_SUBSURFACE' , 'ERODE_SUBSURFACE_BREAKPOINT' , 'SUBSURFACE_BREAKPOINT_ERODE' , 'SUBSURFACE_ERODE_BREAKPOINT' , 'TRAP' , 'TRAP_ERODE' , 'TRAP_SUBSURFACE' , 'ERODE_TRAP' , 'ERODE_SUBSURFACE' , 'SUBSURFACE_TRAP' , 'SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'TRAP_SUBSURFACE_ERODE' , 'ERODE_TRAP_SUBSURFACE' , 'ERODE_SUBSURFACE_TRAP' , 'SUBSURFACE_TRAP_ERODE' , 'SUBSURFACE_ERODE_TRAP' , 'NODES' , 'NODE' , 'XSEC' ) ERODE = ( 'TRAPEZOID_ERODE' , 'TRAP_ERODE' , 'TRAP_SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'BREAKPOINT_ERODE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' ) SUBSURFACE = ( 'TRAPEZOID_SUBSURFACE' , 'TRAP_SUBSURFACE' , 'TRAP_SUBSURFACE_ERODE' , 'TRAP_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE' , 'TRAPEZOID_SUBSURFACE_ERODE' , 'TRAPEZOID_ERODE_SUBSURFACE' , 'BREAKPOINT_SUBSURFACE_ERODE' , 'BREAKPOINT_ERODE_SUBSURFACE' ) result = { 'type' : 'XSEC' , 'header' : { 'link' : None , 'dx' : None , 'xSecType' : None , 'nodes' : None , 'erode' : False , 'subsurface' : False } , 'xSection' : None , 'nodes' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : if key == 'NODE' : result [ 'nodes' ] . append ( nodeChunk ( chunk ) ) elif key == 'XSEC' : result [ 'xSection' ] = xSectionChunk ( chunk ) elif ( 'TRAPEZOID' in key ) or ( 'BREAKPOINT' in key ) or ( 'TRAP' in key ) : result [ 'header' ] [ 'xSecType' ] = key elif key in ERODE : result [ 'header' ] [ 'erode' ] = True elif key in SUBSURFACE : result [ 'header' ] [ 'subsurface' ] = True else : result [ 'header' ] [ key . lower ( ) ] = chunk [ 0 ] . strip ( ) . split ( ) [ 1 ] return result
Parse Cross Section Links Method
4,718
def reservoirLink ( lines ) : KEYWORDS = ( 'LINK' , 'RESERVOIR' , 'RES_MINWSE' , 'RES_INITWSE' , 'RES_MAXWSE' , 'RES_NUMPTS' , 'LAKE' , 'MINWSE' , 'INITWSE' , 'MAXWSE' , 'NUMPTS' ) result = { 'header' : { 'link' : None , 'res_minwse' : None , 'res_initwse' : None , 'res_maxwse' : None , 'res_numpts' : None , 'minwse' : None , 'initwse' : None , 'maxwse' : None , 'numpts' : None } , 'type' : None , 'points' : [ ] } pair = { 'i' : None , 'j' : None } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if key in ( 'NUMPTS' , 'RES_NUMPTS' ) : result [ 'header' ] [ key . lower ( ) ] = schunk [ 1 ] for idx in range ( 1 , len ( chunk ) ) : schunk = chunk [ idx ] . strip ( ) . split ( ) for count , ordinate in enumerate ( schunk ) : if ( count % 2 ) == 0 : pair [ 'i' ] = ordinate else : pair [ 'j' ] = ordinate result [ 'points' ] . append ( pair ) pair = { 'i' : None , 'j' : None } elif key in ( 'LAKE' , 'RESERVOIR' ) : result [ 'type' ] = schunk [ 0 ] else : result [ 'header' ] [ key . lower ( ) ] = schunk [ 1 ] return result
Parse RESERVOIR Link Method
4,719
def nodeChunk ( lines ) : KEYWORDS = ( 'NODE' , 'X_Y' , 'ELEV' ) result = { 'node' : None , 'x' : None , 'y' : None , 'elev' : None } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if key == 'X_Y' : result [ 'x' ] = schunk [ 1 ] result [ 'y' ] = schunk [ 2 ] else : result [ key . lower ( ) ] = schunk [ 1 ] return result
Parse NODE Method
4,720
def xSectionChunk ( lines ) : KEYWORDS = ( 'MANNINGS_N' , 'BOTTOM_WIDTH' , 'BANKFULL_DEPTH' , 'SIDE_SLOPE' , 'NPAIRS' , 'NUM_INTERP' , 'X1' , 'ERODE' , 'MAX_EROSION' , 'SUBSURFACE' , 'M_RIVER' , 'K_RIVER' ) result = { 'mannings_n' : None , 'bottom_width' : None , 'bankfull_depth' : None , 'side_slope' : None , 'npairs' : None , 'num_interp' : None , 'erode' : False , 'subsurface' : False , 'max_erosion' : None , 'm_river' : None , 'k_river' : None , 'breakpoints' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if key == 'X1' : x = schunk [ 1 ] y = schunk [ 2 ] result [ 'breakpoints' ] . append ( { 'x' : x , 'y' : y } ) if key in ( 'SUBSURFACE' , 'ERODE' ) : result [ key . lower ( ) ] = True else : result [ key . lower ( ) ] = schunk [ 1 ] return result
Parse XSEC Method
4,721
def structureChunk ( keywords , resultDict , lines ) : chunks = pt . chunk ( keywords , lines ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) resultDict [ key . lower ( ) ] = schunk [ 1 ] return resultDict
Parse Weir and Culvert Structures Method
4,722
def bar ( self , width , ** _ ) : width -= self . _width_offset self . _position += self . _direction if self . _position <= 0 and self . _direction < 0 : self . _position = 0 self . _direction = 1 elif self . _position > width : self . _position = width - 1 self . _direction = - 1 final_bar = ( self . CHAR_LEFT_BORDER + self . CHAR_EMPTY * self . _position + self . CHAR_ANIMATED + self . CHAR_EMPTY * ( width - self . _position ) + self . CHAR_RIGHT_BORDER ) return final_bar
Returns the completed progress bar . Every time this is called the animation moves .
4,723
def _read ( self , directory , filename , session , path , name , extension , spatial = False , spatialReferenceID = 4236 , replaceParamFile = None , readIndexMaps = True ) : self . fileExtension = extension KEYWORDS = { 'INDEX_MAP' : mtc . indexMapChunk , 'ROUGHNESS' : mtc . mapTableChunk , 'INTERCEPTION' : mtc . mapTableChunk , 'RETENTION' : mtc . mapTableChunk , 'GREEN_AMPT_INFILTRATION' : mtc . mapTableChunk , 'GREEN_AMPT_INITIAL_SOIL_MOISTURE' : mtc . mapTableChunk , 'RICHARDS_EQN_INFILTRATION_BROOKS' : mtc . mapTableChunk , 'RICHARDS_EQN_INFILTRATION_HAVERCAMP' : mtc . mapTableChunk , 'EVAPOTRANSPIRATION' : mtc . mapTableChunk , 'WELL_TABLE' : mtc . mapTableChunk , 'OVERLAND_BOUNDARY' : mtc . mapTableChunk , 'TIME_SERIES_INDEX' : mtc . mapTableChunk , 'GROUNDWATER' : mtc . mapTableChunk , 'GROUNDWATER_BOUNDARY' : mtc . mapTableChunk , 'AREA_REDUCTION' : mtc . mapTableChunk , 'WETLAND_PROPERTIES' : mtc . mapTableChunk , 'MULTI_LAYER_SOIL' : mtc . mapTableChunk , 'SOIL_EROSION_PROPS' : mtc . mapTableChunk , 'CONTAMINANT_TRANSPORT' : mtc . contamChunk , 'SEDIMENTS' : mtc . sedimentChunk } indexMaps = dict ( ) mapTables = [ ] with io_open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : result = KEYWORDS [ key ] ( key , chunk ) if key == 'INDEX_MAP' : indexMap = IndexMap ( name = result [ 'idxName' ] ) indexMaps [ result [ 'idxName' ] ] = indexMap indexMap . mapTableFile = self if readIndexMaps : indexMap . read ( directory = directory , filename = result [ 'filename' ] , session = session , spatial = spatial , spatialReferenceID = spatialReferenceID ) else : indexMap . filename = result [ 'filename' ] else : if result : mapTables . append ( result ) self . _createGsshaPyObjects ( mapTables , indexMaps , replaceParamFile , directory , session , spatial , spatialReferenceID )
Mapping Table Read from File Method
4,724
def _write ( self , session , openFile , replaceParamFile = None , writeIndexMaps = True ) : directory = os . path . split ( openFile . name ) [ 0 ] for mapTable in self . getOrderedMapTables ( session ) : if mapTable . name == 'CONTAMINANT_TRANSPORT' : contaminantList = [ ] for mtValue in mapTable . values : if mtValue . contaminant not in contaminantList : contaminantList . append ( mtValue . contaminant ) contaminants = sorted ( contaminantList , key = lambda x : ( x . indexMap . name , x . name ) ) openFile . write ( 'GSSHA_INDEX_MAP_TABLES\n' ) for indexMap in self . indexMaps : openFile . write ( 'INDEX_MAP%s"%s" "%s"\n' % ( ' ' * 16 , indexMap . filename , indexMap . name ) ) if writeIndexMaps : indexMap . write ( directory , session = session ) for mapTable in self . getOrderedMapTables ( session ) : if mapTable . name == 'SEDIMENTS' : self . _writeSedimentTable ( session = session , fileObject = openFile , mapTable = mapTable , replaceParamFile = replaceParamFile ) elif mapTable . name == 'CONTAMINANT_TRANSPORT' : self . _writeContaminantTable ( session = session , fileObject = openFile , mapTable = mapTable , contaminants = contaminants , replaceParamFile = replaceParamFile ) else : self . _writeMapTable ( session = session , fileObject = openFile , mapTable = mapTable , replaceParamFile = replaceParamFile )
Map Table Write to File Method
4,725
def getOrderedMapTables ( self , session ) : return session . query ( MapTable ) . filter ( MapTable . mapTableFile == self ) . order_by ( MapTable . name ) . all ( )
Retrieve the map tables ordered by name
4,726
def deleteMapTable ( self , name , session ) : duplicate_map_tables = session . query ( MapTable ) . filter ( MapTable . mapTableFile == self ) . filter ( MapTable . name == name ) . all ( ) for duplicate_map_table in duplicate_map_tables : if duplicate_map_table . indexMap : session . delete ( duplicate_map_table . indexMap ) session . delete ( duplicate_map_table ) session . commit ( )
Remove duplicate map table if it exists
4,727
def _createGsshaPyObjects ( self , mapTables , indexMaps , replaceParamFile , directory , session , spatial , spatialReferenceID ) : for mt in mapTables : try : if mt [ 'indexMapName' ] is not None : indexMaps [ mt [ 'indexMapName' ] ] mapTable = MapTable ( name = mt [ 'name' ] , numIDs = mt [ 'numVars' ] [ 'NUM_IDS' ] , maxNumCells = mt [ 'numVars' ] [ 'MAX_NUMBER_CELLS' ] , numSed = mt [ 'numVars' ] . get ( 'NUM_SED' ) , numContam = mt [ 'numVars' ] . get ( 'NUM_CONTAM' ) , maxSoilID = mt [ 'numVars' ] . get ( 'MAX_SOIL_ID' ) ) mapTable . mapTableFile = self if mt [ 'indexMapName' ] : mapTable . indexMap = indexMaps [ mt [ 'indexMapName' ] ] if mt [ 'name' ] == 'CONTAMINANT_TRANSPORT' : for contam in mt [ 'contaminants' ] : outputBaseFilename = self . _preprocessContaminantOutFilePath ( contam [ 'outPath' ] ) contaminant = MTContaminant ( name = contam [ 'name' ] , outputFilename = outputBaseFilename , precipConc = vrp ( contam [ 'contamVars' ] [ 'PRECIP_CONC' ] , replaceParamFile ) , partition = vrp ( contam [ 'contamVars' ] [ 'PARTITION' ] , replaceParamFile ) , numIDs = contam [ 'contamVars' ] [ 'NUM_IDS' ] ) indexMap = indexMaps [ contam [ 'indexMapName' ] ] contaminant . indexMap = indexMap self . _createValueObjects ( contam [ 'valueList' ] , contam [ 'varList' ] , mapTable , indexMap , contaminant , replaceParamFile ) self . _readContaminantOutputFiles ( directory , outputBaseFilename , session , spatial , spatialReferenceID ) elif mt [ 'name' ] == 'SEDIMENTS' : for line in mt [ 'valueList' ] : sediment = MTSediment ( description = line [ 0 ] , specificGravity = vrp ( line [ 1 ] , replaceParamFile ) , particleDiameter = vrp ( line [ 2 ] , replaceParamFile ) , outputFilename = line [ 3 ] ) sediment . mapTable = mapTable else : indexMap = indexMaps [ mt [ 'indexMapName' ] ] self . _createValueObjects ( mt [ 'valueList' ] , mt [ 'varList' ] , mapTable , indexMap , None , replaceParamFile ) except KeyError : log . info ( ( 'Index Map "%s" for Mapping Table "%s" not found in list of index maps in the mapping ' 'table file. The Mapping Table was not read into the database.' ) % ( mt [ 'indexMapName' ] , mt [ 'name' ] ) )
Create GSSHAPY Mapping Table ORM Objects Method
4,728
def _createValueObjects ( self , valueList , varList , mapTable , indexMap , contaminant , replaceParamFile ) : def assign_values_to_table ( value_list , layer_id ) : for i , value in enumerate ( value_list ) : value = vrp ( value , replaceParamFile ) mtValue = MTValue ( variable = varList [ i ] , value = float ( value ) ) mtValue . index = mtIndex mtValue . mapTable = mapTable mtValue . layer_id = layer_id if contaminant : mtValue . contaminant = contaminant for row in valueList : mtIndex = MTIndex ( index = row [ 'index' ] , description1 = row [ 'description1' ] , description2 = row [ 'description2' ] ) mtIndex . indexMap = indexMap if len ( np . shape ( row [ 'values' ] ) ) == 2 : for layer_id , values in enumerate ( row [ 'values' ] ) : assign_values_to_table ( values , layer_id ) else : assign_values_to_table ( row [ 'values' ] , 0 )
Populate GSSHAPY MTValue and MTIndex Objects Method
4,729
def _readContaminantOutputFiles ( self , directory , baseFileName , session , spatial , spatialReferenceID ) : if not os . path . isdir ( directory ) : return if baseFileName == '' : return chanBaseFileName = '.' . join ( [ baseFileName , 'chan' ] ) directoryList = os . listdir ( directory ) chanFiles = [ ] for thing in directoryList : if chanBaseFileName in thing : chanFiles . append ( thing ) for chanFile in chanFiles : linkNodeDatasetFile = LinkNodeDatasetFile ( ) linkNodeDatasetFile . projectFile = self . projectFile try : linkNodeDatasetFile . read ( directory = directory , filename = chanFile , session = session , spatial = spatial , spatialReferenceID = spatialReferenceID ) except : log . warning ( 'Attempted to read Contaminant Transport Output file {0}, but failed.' . format ( chanFile ) )
Read any contaminant output files if available
4,730
def _writeMapTable ( self , session , fileObject , mapTable , replaceParamFile ) : fileObject . write ( '%s "%s"\n' % ( mapTable . name , mapTable . indexMap . name ) ) if mapTable . numIDs : fileObject . write ( 'NUM_IDS %s\n' % ( mapTable . numIDs ) ) if mapTable . maxNumCells : fileObject . write ( 'MAX_NUMBER_CELLS %s\n' % ( mapTable . maxNumCells ) ) if mapTable . numSed : fileObject . write ( 'NUM_SED %s\n' % ( mapTable . numSed ) ) if mapTable . maxSoilID : fileObject . write ( 'MAX_SOIL_ID %s\n' % ( mapTable . maxSoilID ) ) self . _writeValues ( session , fileObject , mapTable , None , replaceParamFile )
Write Generic Map Table Method
4,731
def _writeContaminantTable ( self , session , fileObject , mapTable , contaminants , replaceParamFile ) : fileObject . write ( '%s\n' % ( mapTable . name ) ) fileObject . write ( 'NUM_CONTAM %s\n' % ( mapTable . numContam ) ) for contaminant in contaminants : fileObject . write ( '"%s" "%s" %s\n' % ( contaminant . name , contaminant . indexMap . name , contaminant . outputFilename ) ) precipConcString = vwp ( contaminant . precipConc , replaceParamFile ) partitionString = vwp ( contaminant . partition , replaceParamFile ) try : precipConc = '%.2f' % precipConcString except : precipConc = '%s' % precipConcString try : partition = '%.2f' % partitionString except : partition = '%s' % partitionString fileObject . write ( 'PRECIP_CONC%s%s\n' % ( ' ' * 10 , precipConc ) ) fileObject . write ( 'PARTITION%s%s\n' % ( ' ' * 12 , partition ) ) fileObject . write ( 'NUM_IDS %s\n' % contaminant . numIDs ) self . _writeValues ( session , fileObject , mapTable , contaminant , replaceParamFile )
This method writes the contaminant transport mapping table case .
4,732
def _writeSedimentTable ( self , session , fileObject , mapTable , replaceParamFile ) : fileObject . write ( '%s\n' % ( mapTable . name ) ) fileObject . write ( 'NUM_SED %s\n' % ( mapTable . numSed ) ) fileObject . write ( 'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\n' % ( ' ' * 22 , ' ' * 3 , ' ' * 5 ) ) sediments = session . query ( MTSediment ) . filter ( MTSediment . mapTable == mapTable ) . order_by ( MTSediment . id ) . all ( ) for sediment in sediments : space1 = 42 - len ( sediment . description ) specGravString = vwp ( sediment . specificGravity , replaceParamFile ) partDiamString = vwp ( sediment . particleDiameter , replaceParamFile ) try : specGrav = '%.6f' % specGravString except : specGrav = '%s' % specGravString try : partDiam = '%.6f' % partDiamString except : partDiam = '%s' % partDiamString fileObject . write ( '%s%s%s%s%s%s%s\n' % ( sediment . description , ' ' * space1 , specGrav , ' ' * 5 , partDiam , ' ' * 6 , sediment . outputFilename ) )
Write Sediment Mapping Table Method
4,733
def _valuePivot ( self , session , mapTable , contaminant , replaceParaFile ) : indexes = session . query ( MTIndex ) . join ( MTValue . index ) . filter ( MTValue . mapTable == mapTable ) . filter ( MTValue . contaminant == contaminant ) . order_by ( MTIndex . index ) . all ( ) layer_indices = [ 0 ] if mapTable . name in ( 'MULTI_LAYER_SOIL' , 'RICHARDS_EQN_INFILTRATION_BROOKS' ) : layer_indices = range ( 3 ) lines = [ ] values = { } for idx in indexes : for layer_index in layer_indices : values = session . query ( MTValue ) . filter ( MTValue . mapTable == mapTable ) . filter ( MTValue . contaminant == contaminant ) . filter ( MTValue . index == idx ) . filter ( MTValue . layer_id == layer_index ) . order_by ( MTValue . id ) . all ( ) valString = '' for val in values : if val . value <= - 9999 : continue processedValue = vwp ( val . value , replaceParaFile ) try : numString = '%.6f' % processedValue except : numString = '%s' % processedValue valString = '%s%s%s' % ( valString , numString , ' ' * 3 ) spacing1 = max ( 1 , 6 - len ( str ( idx . index ) ) ) spacing2 = max ( 1 , 40 - len ( idx . description1 ) ) spacing3 = max ( 1 , 40 - len ( idx . description2 ) ) if layer_index == 0 : line = '%s%s%s%s%s%s%s\n' % ( idx . index , ' ' * spacing1 , idx . description1 , ' ' * spacing2 , idx . description2 , ' ' * spacing3 , valString ) else : num_prepend_spaces = len ( str ( idx . index ) ) + spacing1 + len ( idx . description1 ) + spacing2 + len ( idx . description2 ) + spacing3 line = '{0}{1}\n' . format ( ' ' * num_prepend_spaces , valString ) lines . append ( line ) varString = '' for idx , val in enumerate ( values ) : if val . variable == 'XSEDIMENT' : if idx >= len ( values ) - 1 : varString = '%s%s%s%s' % ( varString , mapTable . numSed , ' SEDIMENTS....' , ' ' * 2 ) else : varString = '%s%s%s' % ( varString , val . variable , ' ' * 2 ) header = 'ID%sDESCRIPTION1%sDESCRIPTION2%s%s\n' % ( ' ' * 4 , ' ' * 28 , ' ' * 28 , varString ) lines . insert ( 0 , header ) return lines
This function retrieves the values of a mapping table from the database and pivots them into the format that is required by the mapping table file . This function returns a list of strings that can be printed to the file directly .
4,734
def _preprocessContaminantOutFilePath ( outPath ) : if '/' in outPath : splitPath = outPath . split ( '/' ) elif '\\' in outPath : splitPath = outPath . split ( '\\' ) else : splitPath = [ outPath , ] if splitPath [ - 1 ] == '' : outputFilename = splitPath [ - 2 ] else : outputFilename = splitPath [ - 1 ] if '.' in outputFilename : outputFilename = outputFilename . split ( '.' ) [ 0 ] return outputFilename
Preprocess the contaminant output file path to a relative path .
4,735
def addRoughnessMapFromLandUse ( self , name , session , land_use_grid , land_use_to_roughness_table = None , land_use_grid_id = None , ) : LAND_USE_GRID_TABLES = { 'nga' : 'land_cover_nga.txt' , 'glcf' : 'land_cover_glcf_modis.txt' , 'nlcd' : 'land_cover_nlcd.txt' , } if isinstance ( land_use_to_roughness_table , pd . DataFrame ) : df = land_use_to_roughness_table else : if land_use_to_roughness_table is None : if land_use_grid_id is None : raise ValueError ( "Must have land_use_to_roughness_table or land_use_grid_id set ..." ) land_use_to_roughness_table = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , '..' , 'grid' , 'land_cover' , LAND_USE_GRID_TABLES [ land_use_grid_id ] ) land_use_to_roughness_table = os . path . abspath ( land_use_to_roughness_table ) df = pd . read_table ( land_use_to_roughness_table , delim_whitespace = True , header = None , skiprows = 1 , names = ( 'id' , 'description' , 'roughness' ) , dtype = { 'id' : 'int' , 'description' : 'str' , 'roughness' : 'float' } , ) land_use_grid = os . path . abspath ( land_use_grid ) land_use_resampled = resample_grid ( land_use_grid , self . projectFile . getGrid ( ) , resample_method = gdalconst . GRA_NearestNeighbour , as_gdal_grid = True ) unique_land_use_ids = np . unique ( land_use_resampled . np_array ( ) ) df = df [ df . id . isin ( unique_land_use_ids ) ] for land_use_id in unique_land_use_ids : if land_use_id not in df . id . values : raise IndexError ( "Land use ID {0} not found in table." . format ( land_use_id ) ) self . deleteMapTable ( "ROUGHNESS" , session ) mapTable = MapTable ( name = "ROUGHNESS" , numIDs = len ( df . index ) , maxNumCells = 0 , numSed = 0 , numContam = 0 ) indexMap = IndexMap ( name = name ) indexMap . mapTableFile = self mapTable . indexMap = indexMap mapTable . mapTableFile = self for row in df . itertuples ( ) : idx = MTIndex ( str ( row . id ) , row . description , '' ) idx . indexMap = indexMap val = MTValue ( 'ROUGH' , row . roughness ) val . index = idx val . mapTable = mapTable manningn_card = self . projectFile . getCard ( 'MANNING_N' ) if manningn_card : session . delete ( manningn_card ) session . commit ( ) mapTable . indexMap . filename = '{0}.idx' . format ( name ) with tmp_chdir ( self . projectFile . project_directory ) : land_use_resampled . to_grass_ascii ( mapTable . indexMap . filename , print_nodata = False ) if not self . projectFile . getCard ( 'MAPPING_TABLE' ) : self . projectFile . setCard ( 'MAPPING_TABLE' , '{0}.cmt' . format ( self . projectFile . name ) , add_quotes = True )
Adds a roughness map from land use file
4,736
def _set_wildcards ( self , inputs = None , outputs = None ) : w = self . _wildcards = set ( ) if outputs and inputs : node , wi = self . nodes , self . _wait_in . get w_crd = { u : node [ u ] for u in inputs if u in outputs or wi ( u , False ) } w . update ( [ k for k , v in w_crd . items ( ) if v . get ( 'wildcard' , True ) ] )
Update wildcards set with the input data nodes that are also outputs .
4,737
def result ( self , timeout = None ) : it , exceptions , future_lists = [ ] , [ ] , [ ] from concurrent . futures import Future , wait as wait_fut def update ( fut , data , key ) : if isinstance ( fut , Future ) : it . append ( ( fut , data , key ) ) elif isinstance ( fut , AsyncList ) and fut not in future_lists : future_lists . append ( fut ) it . extend ( [ ( j , fut , i ) for i , j in enumerate ( fut ) if isinstance ( j , Future ) ] [ : : - 1 ] ) for s in self . sub_sol . values ( ) : for k , v in list ( s . items ( ) ) : update ( v , s , k ) for d in s . workflow . nodes . values ( ) : if 'results' in d : update ( d [ 'results' ] , d , 'results' ) for d in s . workflow . edges . values ( ) : if 'value' in d : update ( d [ 'value' ] , d , 'value' ) wait_fut ( { v [ 0 ] for v in it } , timeout ) for f , d , k in it : try : d [ k ] = await_result ( f , 0 ) except SkipNode as e : exceptions . append ( ( f , d , k , e . ex ) ) del d [ k ] except ( Exception , ExecutorShutdown , DispatcherAbort ) as ex : exceptions . append ( ( f , d , k , ex ) ) del d [ k ] if exceptions : raise exceptions [ 0 ] [ - 1 ] return self
Set all asynchronous results .
4,738
def _check_targets ( self ) : if self . outputs : targets = self . outputs . copy ( ) def check_targets ( node_id ) : try : targets . remove ( node_id ) return not targets except KeyError : return False else : def check_targets ( node_id ) : return False return check_targets
Returns a function to terminate the ArciDispatch algorithm when all targets have been visited .
4,739
def _get_node_estimations ( self , node_attr , node_id ) : estimations = self . _wf_pred [ node_id ] wait_in = node_attr [ 'wait_inputs' ] if len ( estimations ) > 1 and not self . _wait_in . get ( node_id , wait_in ) : dist , edg_length , adj = self . dist , self . _edge_length , self . dmap . adj est = [ ] for k , v in estimations . items ( ) : if k is not START : d = dist [ k ] + edg_length ( adj [ k ] [ node_id ] , node_attr ) heapq . heappush ( est , ( d , k , v ) ) estimations = { est [ 0 ] [ 1 ] : est [ 0 ] [ 2 ] } self . workflow . remove_edges_from ( [ ( v [ 1 ] , node_id ) for v in est [ 1 : ] ] ) return estimations , wait_in
Returns the data nodes estimations and wait_inputs flag .
4,740
def _set_node_output ( self , node_id , no_call , next_nds = None , ** kw ) : node_attr = self . nodes [ node_id ] node_type = node_attr [ 'type' ] if node_type == 'data' : return self . _set_data_node_output ( node_id , node_attr , no_call , next_nds , ** kw ) elif node_type == 'function' : return self . _set_function_node_output ( node_id , node_attr , no_call , next_nds , ** kw )
Set the node outputs from node inputs .
4,741
def _set_data_node_output ( self , node_id , node_attr , no_call , next_nds = None , ** kw ) : est , wait_in = self . _get_node_estimations ( node_attr , node_id ) if not no_call : if node_id is PLOT : est = est . copy ( ) est [ PLOT ] = { 'value' : { 'obj' : self } } sf , args = False , ( { k : v [ 'value' ] for k , v in est . items ( ) } , ) if not ( wait_in or 'function' in node_attr ) : sf , args = True , tuple ( args [ 0 ] . values ( ) ) try : value = async_thread ( self , args , node_attr , node_id , sf , ** kw ) except SkipNode : return False if value is not NONE : self [ node_id ] = value value = { 'value' : value } else : self [ node_id ] = NONE value = { } if next_nds : wf_add_edge = self . _wf_add_edge for u in next_nds : wf_add_edge ( node_id , u , ** value ) else : n , has , sub_sol = self . nodes , self . workflow . has_edge , self . sub_sol def no_visited_in_sub_dsp ( i ) : node = n [ i ] if node [ 'type' ] == 'dispatcher' and has ( i , node_id ) : visited = sub_sol [ self . index + node [ 'index' ] ] . _visited return node [ 'inputs' ] [ node_id ] not in visited return True succ_fun = [ u for u in self . _succ [ node_id ] if no_visited_in_sub_dsp ( u ) ] if succ_fun and succ_fun [ 0 ] not in self . _visited : wf_add_edge = self . _wf_add_edge for u in succ_fun : wf_add_edge ( node_id , u , ** value ) return True
Set the data node output from node estimations .
4,742
def _set_function_node_output ( self , node_id , node_attr , no_call , next_nds = None , ** kw ) : o_nds , dist = node_attr [ 'outputs' ] , self . dist output_nodes = next_nds or set ( self . _succ [ node_id ] ) . difference ( dist ) if not output_nodes : self . workflow . remove_node ( node_id ) return False wf_add_edge = self . _wf_add_edge if no_call : for u in output_nodes : wf_add_edge ( node_id , u ) return True args = self . _wf_pred [ node_id ] args = [ args [ k ] [ 'value' ] for k in node_attr [ 'inputs' ] ] try : self . _check_function_domain ( args , node_attr , node_id ) res = async_thread ( self , args , node_attr , node_id , ** kw ) self . workflow . node [ node_id ] [ 'results' ] = res except SkipNode : return False for k , v in zip ( o_nds , res if len ( o_nds ) > 1 else [ res ] ) : if k in output_nodes and v is not NONE : wf_add_edge ( node_id , k , value = v ) return True
Set the function node output from node inputs .
4,743
def _add_initial_value ( self , data_id , value , initial_dist = 0.0 , fringe = None , check_cutoff = None , no_call = None ) : nodes , seen , edge_weight = self . nodes , self . seen , self . _edge_length wf_remove_edge , check_wait_in = self . _wf_remove_edge , self . check_wait_in wf_add_edge , dsp_in = self . _wf_add_edge , self . _set_sub_dsp_node_input update_view = self . _update_meeting if fringe is None : fringe = self . fringe if no_call is None : no_call = self . no_call check_cutoff = check_cutoff or self . check_cutoff if data_id not in nodes : return False wait_in = nodes [ data_id ] [ 'wait_inputs' ] index = nodes [ data_id ] [ 'index' ] wf_add_edge ( START , data_id , ** value ) if data_id in self . _wildcards : self . _visited . add ( data_id ) self . workflow . add_node ( data_id ) for w , edge_data in self . dmap [ data_id ] . items ( ) : wf_add_edge ( data_id , w , ** value ) node = nodes [ w ] vw_dist = initial_dist + edge_weight ( edge_data , node ) update_view ( w , vw_dist ) if check_cutoff ( vw_dist ) : wf_remove_edge ( data_id , w ) continue elif node [ 'type' ] == 'dispatcher' : dsp_in ( data_id , w , fringe , check_cutoff , no_call , vw_dist ) elif check_wait_in ( True , w ) : continue seen [ w ] = vw_dist vd = ( True , w , self . index + node [ 'index' ] ) heapq . heappush ( fringe , ( vw_dist , vd , ( w , self ) ) ) return True update_view ( data_id , initial_dist ) if check_cutoff ( initial_dist ) : wf_remove_edge ( START , data_id ) elif not check_wait_in ( wait_in , data_id ) : seen [ data_id ] = initial_dist vd = ( wait_in , data_id , self . index + index ) heapq . heappush ( fringe , ( initial_dist , vd , ( data_id , self ) ) ) return True return False
Add initial values updating workflow seen and fringe .
4,744
def _visit_nodes ( self , node_id , dist , fringe , check_cutoff , no_call = False , ** kw ) : wf_rm_edge , wf_has_edge = self . _wf_remove_edge , self . workflow . has_edge edge_weight , nodes = self . _edge_length , self . nodes self . dist [ node_id ] = dist self . _visited . add ( node_id ) if not self . _set_node_output ( node_id , no_call , ** kw ) : return True if self . check_targets ( node_id ) : return False for w , e_data in self . dmap [ node_id ] . items ( ) : if not wf_has_edge ( node_id , w ) : continue node = nodes [ w ] vw_d = dist + edge_weight ( e_data , node ) if check_cutoff ( vw_d ) : wf_rm_edge ( node_id , w ) continue if node [ 'type' ] == 'dispatcher' : self . _set_sub_dsp_node_input ( node_id , w , fringe , check_cutoff , no_call , vw_d ) else : self . _see_node ( w , fringe , vw_d ) return True
Visits a node updating workflow seen and fringe ..
4,745
def _see_node ( self , node_id , fringe , dist , w_wait_in = 0 ) : seen , dists = self . seen , self . dist wait_in = self . nodes [ node_id ] [ 'wait_inputs' ] self . _update_meeting ( node_id , dist ) if self . check_wait_in ( wait_in , node_id ) : pass elif node_id in dists : if dist < dists [ node_id ] : raise DispatcherError ( 'Contradictory paths found: ' 'negative weights?' , sol = self ) elif node_id not in seen or dist < seen [ node_id ] : seen [ node_id ] = dist index = self . nodes [ node_id ] [ 'index' ] vd = ( w_wait_in + int ( wait_in ) , node_id , self . index + index ) heapq . heappush ( fringe , ( dist , vd , ( node_id , self ) ) ) return True return False
See a node updating seen and fringe .
4,746
def _remove_unused_nodes ( self ) : nodes , wf_remove_node = self . nodes , self . workflow . remove_node add_visited , succ = self . _visited . add , self . workflow . succ for n in ( set ( self . _wf_pred ) - set ( self . _visited ) ) : node_type = nodes [ n ] [ 'type' ] if node_type == 'data' : continue if node_type == 'dispatcher' and succ [ n ] : add_visited ( n ) i = self . index + nodes [ n ] [ 'index' ] self . sub_sol [ i ] . _remove_unused_nodes ( ) continue wf_remove_node ( n )
Removes unused function and sub - dispatcher nodes .
4,747
def _init_sub_dsp ( self , dsp , fringe , outputs , no_call , initial_dist , index , full_name ) : sol = self . __class__ ( dsp , { } , outputs , False , None , None , no_call , False , wait_in = self . _wait_in . get ( dsp , None ) , index = self . index + index , full_name = full_name ) sol . sub_sol = self . sub_sol for f in sol . fringe : item = ( initial_dist + f [ 0 ] , ( 2 , ) + f [ 1 ] [ 1 : ] , f [ - 1 ] ) heapq . heappush ( fringe , item ) return sol
Initialize the dispatcher as sub - dispatcher and update the fringe .
4,748
def _set_sub_dsp_node_input ( self , node_id , dsp_id , fringe , check_cutoff , no_call , initial_dist ) : node = self . nodes [ dsp_id ] dsp , pred = node [ 'function' ] , self . _wf_pred [ dsp_id ] distances , sub_sol = self . dist , self . sub_sol iv_nodes = [ node_id ] self . _meet [ dsp_id ] = initial_dist if self . check_wait_in ( node [ 'wait_inputs' ] , dsp_id ) : return False if dsp_id not in distances : kw = { } dom = self . _check_sub_dsp_domain ( dsp_id , node , pred , kw ) if dom is True : iv_nodes = pred elif dom is False : return False sub_sol [ self . index + node [ 'index' ] ] = sol = self . _init_sub_dsp ( dsp , fringe , node [ 'outputs' ] , no_call , initial_dist , node [ 'index' ] , self . full_name + ( dsp_id , ) ) self . workflow . add_node ( dsp_id , solution = sol , ** kw ) distances [ dsp_id ] = initial_dist else : sol = sub_sol [ self . index + node [ 'index' ] ] for n_id in iv_nodes : val = pred [ n_id ] for n in stlp ( node [ 'inputs' ] [ n_id ] ) : sol . _add_initial_value ( n , val , initial_dist , fringe , check_cutoff , no_call ) return True
Initializes the sub - dispatcher and set its inputs .
4,749
def _warning ( self , msg , node_id , ex , * args , ** kwargs ) : raises = self . raises ( ex ) if callable ( self . raises ) else self . raises if raises and isinstance ( ex , DispatcherError ) : ex . update ( self ) raise ex self . _errors [ node_id ] = msg % ( ( node_id , ex ) + args ) node_id = '/' . join ( self . full_name + ( node_id , ) ) if raises : raise DispatcherError ( msg , node_id , ex , * args , sol = self , ** kwargs ) else : kwargs [ 'exc_info' ] = kwargs . get ( 'exc_info' , 1 ) log . error ( msg , node_id , ex , * args , ** kwargs )
Handles the error messages .
4,750
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension KEYWORDS = ( 'STREAMCELLS' , 'CELLIJ' ) with open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) for key , chunkList in iteritems ( chunks ) : for chunk in chunkList : if key == 'STREAMCELLS' : schunk = chunk [ 0 ] . strip ( ) . split ( ) self . streamCells = schunk [ 1 ] elif key == 'CELLIJ' : result = self . _cellChunk ( chunk ) self . _createGsshaPyObjects ( result )
Grid Stream File Read from File Method
4,751
def _write ( self , session , openFile , replaceParamFile ) : openFile . write ( 'GRIDSTREAMFILE\n' ) openFile . write ( 'STREAMCELLS %s\n' % self . streamCells ) for cell in self . gridStreamCells : openFile . write ( 'CELLIJ %s %s\n' % ( cell . cellI , cell . cellJ ) ) openFile . write ( 'NUMNODES %s\n' % cell . numNodes ) for node in cell . gridStreamNodes : openFile . write ( 'LINKNODE %s %s %.6f\n' % ( node . linkNumber , node . nodeNumber , node . nodePercentGrid ) )
Grid Stream File Write to File Method
4,752
def _createGsshaPyObjects ( self , cell ) : gridCell = GridStreamCell ( cellI = cell [ 'i' ] , cellJ = cell [ 'j' ] , numNodes = cell [ 'numNodes' ] ) gridCell . gridStreamFile = self for linkNode in cell [ 'linkNodes' ] : gridNode = GridStreamNode ( linkNumber = linkNode [ 'linkNumber' ] , nodeNumber = linkNode [ 'nodeNumber' ] , nodePercentGrid = linkNode [ 'percent' ] ) gridNode . gridStreamCell = gridCell
Create GSSHAPY PipeGridCell and PipeGridNode Objects Method
4,753
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension with open ( path , 'r' ) as f : self . rasterText = f . read ( ) lines = self . rasterText . split ( '\n' ) for line in lines [ 0 : 6 ] : spline = line . split ( ) if 'north' in spline [ 0 ] . lower ( ) : self . north = float ( spline [ 1 ] ) elif 'south' in spline [ 0 ] . lower ( ) : self . south = float ( spline [ 1 ] ) elif 'east' in spline [ 0 ] . lower ( ) : self . east = float ( spline [ 1 ] ) elif 'west' in spline [ 0 ] . lower ( ) : self . west = float ( spline [ 1 ] ) elif 'rows' in spline [ 0 ] . lower ( ) : self . rows = int ( spline [ 1 ] ) elif 'cols' in spline [ 0 ] . lower ( ) : self . columns = int ( spline [ 1 ] ) if spatial : wkbRaster = RasterLoader . grassAsciiRasterToWKB ( session = session , grassRasterPath = path , srid = str ( spatialReferenceID ) , noData = '-1' ) self . raster = wkbRaster self . srid = spatialReferenceID self . filename = filename
Index Map Read from File Method
4,754
def write ( self , directory , name = None , session = None , replaceParamFile = None ) : if name != None : filename = '%s.%s' % ( name , self . fileExtension ) filePath = os . path . join ( directory , filename ) else : filePath = os . path . join ( directory , self . filename ) if type ( self . raster ) != type ( None ) : converter = RasterConverter ( session ) grassAsciiGrid = converter . getAsGrassAsciiRaster ( rasterFieldName = 'raster' , tableName = self . __tablename__ , rasterIdFieldName = 'id' , rasterId = self . id ) with open ( filePath , 'w' ) as mapFile : mapFile . write ( grassAsciiGrid ) else : if self . rasterText is not None : with open ( filePath , 'w' ) as mapFile : mapFile . write ( self . rasterText )
Index Map Write to File Method
4,755
def _write ( self , session , openFile , replaceParamFile ) : openFile . write ( 'GRIDPIPEFILE\n' ) openFile . write ( 'PIPECELLS %s\n' % self . pipeCells ) for cell in self . gridPipeCells : openFile . write ( 'CELLIJ %s %s\n' % ( cell . cellI , cell . cellJ ) ) openFile . write ( 'NUMPIPES %s\n' % cell . numPipes ) for node in cell . gridPipeNodes : openFile . write ( 'SPIPE %s %s %.6f\n' % ( node . linkNumber , node . nodeNumber , node . fractPipeLength ) )
Grid Pipe File Write to File Method
4,756
def _createGsshaPyObjects ( self , cell ) : gridCell = GridPipeCell ( cellI = cell [ 'i' ] , cellJ = cell [ 'j' ] , numPipes = cell [ 'numPipes' ] ) gridCell . gridPipeFile = self for spipe in cell [ 'spipes' ] : gridNode = GridPipeNode ( linkNumber = spipe [ 'linkNumber' ] , nodeNumber = spipe [ 'nodeNumber' ] , fractPipeLength = spipe [ 'fraction' ] ) gridNode . gridPipeCell = gridCell
Create GSSHAPY GridPipeCell and GridPipeNode Objects Method
4,757
def _cellChunk ( self , lines ) : KEYWORDS = ( 'CELLIJ' , 'NUMPIPES' , 'SPIPE' ) result = { 'i' : None , 'j' : None , 'numPipes' : None , 'spipes' : [ ] } chunks = pt . chunk ( KEYWORDS , lines ) for card , chunkList in iteritems ( chunks ) : for chunk in chunkList : schunk = chunk [ 0 ] . strip ( ) . split ( ) if card == 'CELLIJ' : result [ 'i' ] = schunk [ 1 ] result [ 'j' ] = schunk [ 2 ] elif card == 'NUMPIPES' : result [ 'numPipes' ] = schunk [ 1 ] elif card == 'SPIPE' : pipe = { 'linkNumber' : schunk [ 1 ] , 'nodeNumber' : schunk [ 2 ] , 'fraction' : schunk [ 3 ] } result [ 'spipes' ] . append ( pipe ) return result
Parse CELLIJ Chunk Method
4,758
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension with open ( path , 'r' ) as f : for line in f : sline = line . strip ( ) . split ( ) if len ( sline ) == 1 : self . numParameters = sline [ 0 ] else : target = TargetParameter ( targetVariable = sline [ 0 ] , varFormat = sline [ 1 ] ) target . replaceParamFile = self
Replace Param File Read from File Method
4,759
def _write ( self , session , openFile , replaceParamFile ) : targets = self . targetParameters openFile . write ( '%s\n' % self . numParameters ) for target in targets : openFile . write ( '%s %s\n' % ( target . targetVariable , target . varFormat ) )
Replace Param File Write to File Method
4,760
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension with open ( path , 'r' ) as f : for line in f : valLine = ReplaceValLine ( ) valLine . contents = line valLine . replaceValFile = self
Replace Val File Read from File Method
4,761
def _write ( self , session , openFile , replaceParamFile ) : for line in self . lines : openFile . write ( line . contents )
Replace Val File Write to File Method
4,762
def emit ( self , data_frame ) : if self . result is not None : raise MultipleEmitsError ( ) data_frame . columns = [ self . prefix + '__' + c for c in data_frame . columns ] self . result = data_frame
Use this function in emit data into the store .
4,763
def trigger_hats ( self , command , arg = None , callback = None ) : threads = [ ] for scriptable in [ self . project . stage ] + self . project . sprites : threads += self . trigger_scriptable_hats ( scriptable , command , arg , callback ) return threads
Returns a list with each script that is triggered .
4,764
def push_script ( self , scriptable , script , callback = None ) : if script in self . threads : self . threads [ script ] . finish ( ) thread = Thread ( self . run_script ( scriptable , script ) , scriptable , callback ) self . new_threads [ script ] = thread return thread
Run the script and add it to the list of threads .
4,765
def tick ( self , events ) : self . add_new_threads ( ) if self . drag_sprite : ( mx , my ) = self . screen . get_mouse_pos ( ) ( ox , oy ) = self . drag_offset new_position = ( mx + ox , my + oy ) if self . drag_sprite . position != new_position : self . has_dragged = True self . drag_sprite . position = new_position for event in events : if event . kind == "key_pressed" : assert event . value in kurt . Insert ( None , "key" ) . options ( ) self . trigger_hats ( "whenKeyPressed" , event . value ) elif event . kind == "mouse_down" : mouse_pos = self . screen . get_mouse_pos ( ) for sprite in reversed ( self . project . sprites ) : rect = bounds ( sprite ) if rect . collide_point ( mouse_pos ) : if self . screen . touching_mouse ( sprite ) : scriptable = sprite break else : scriptable = self . project . stage if scriptable . is_draggable : ( mx , my ) = self . screen . get_mouse_pos ( ) ( x , y ) = scriptable . position self . drag_offset = ( x - mx , y - my ) self . drag_sprite = scriptable self . has_dragged = False go_to_front ( scriptable ) else : self . trigger_scriptable_hats ( scriptable , "whenClicked" ) elif event . kind == "mouse_up" : if self . drag_sprite : if not self . has_dragged : self . trigger_scriptable_hats ( self . drag_sprite , "whenClicked" ) self . drag_sprite = None remove_threads = [ ] while 1 : for ( script , thread ) in self . threads . items ( ) : modified = False for event in thread . tick ( ) : if event . kind == "stop" : if event . value == "all" : self . stop ( ) return elif event . value == "other scripts in sprite" : for ( script , other ) in self . threads . items ( ) : if other . scriptable == thread . scriptable : other . finish ( ) del self . threads [ script ] modified = True break else : thread . finish ( ) del self . threads [ script ] modified = True break else : yield event if modified : break else : break self . add_new_threads ( )
Execute one frame of the interpreter .
4,766
def stop ( self ) : self . threads = { } self . new_threads = { } self . answer = "" self . ask_lock = False
Stop running threads .
4,767
def evaluate ( self , s , value , insert = None ) : assert not isinstance ( value , kurt . Script ) if insert and insert . unevaluated : return value if isinstance ( value , kurt . Block ) : if value . type . shape == "hat" : return [ ] if value . type not in self . COMMANDS : if getattr ( value . type , '_workaround' , None ) : value = value . type . _workaround ( value ) if not value : raise kurt . BlockNotSupported ( value . type ) else : raise kurt . BlockNotSupported ( value . type ) f = self . COMMANDS [ value . type ] args = [ self . evaluate ( s , arg , arg_insert ) for ( arg , arg_insert ) in zip ( list ( value . args ) , value . type . inserts ) ] value = f ( s , * args ) def flatten_generators ( gen ) : for item in gen : if inspect . isgenerator ( item ) : for x in flatten_generators ( item ) : yield x else : yield item if inspect . isgenerator ( value ) : value = flatten_generators ( value ) if value is None : value = [ ] if insert : if isinstance ( value , basestring ) : value = unicode ( value ) if insert . shape in ( "number" , "number-menu" , "string" ) : try : value = float ( value ) except ( TypeError , ValueError ) : if insert . shape == "number" : value = 0 if isinstance ( value , float ) and value == int ( value ) : value = int ( value ) if insert . kind in ( "spriteOrStage" , "spriteOrMouse" , "stageOrThis" , "spriteOnly" , "touching" ) : if value not in ( "mouse-pointer" , "edge" ) : value = ( self . project . stage if value == "Stage" else self . project . get_sprite ( value ) ) elif insert . kind == "var" : if value in s . variables : value = s . variables [ value ] else : value = s . project . variables [ value ] elif insert . kind == "list" : if value in s . lists : value = s . lists [ value ] else : value = s . project . lists [ value ] elif insert . kind == "sound" : for sound in s . sounds : if sound . name == value : value = sound break return value
Expression evaluator .
4,768
def get_cluster_name ( self ) : return self . _get ( url = self . url + '/api/cluster-name' , headers = self . headers , auth = self . auth )
Name identifying this RabbitMQ cluster .
4,769
def get_connection ( self , name ) : return self . _api_get ( '/api/connections/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
An individual connection .
4,770
def delete_connection ( self , name , reason = None ) : headers = { 'X-Reason' : reason } if reason else { } self . _api_delete ( '/api/connections/{0}' . format ( urllib . parse . quote_plus ( name ) ) , headers = headers , )
Closes an individual connection . Give an optional reason
4,771
def list_connection_channels ( self , name ) : return self . _api_get ( '/api/connections/{0}/channels' . format ( urllib . parse . quote_plus ( name ) ) )
List of all channels for a given connection .
4,772
def get_channel ( self , name ) : return self . _api_get ( '/api/channels/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
Details about an individual channel .
4,773
def list_consumers_for_vhost ( self , vhost ) : return self . _api_get ( '/api/consumers/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
A list of all consumers in a given virtual host .
4,774
def list_exchanges_for_vhost ( self , vhost ) : return self . _api_get ( '/api/exchanges/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
A list of all exchanges in a given virtual host .
4,775
def get_exchange_for_vhost ( self , exchange , vhost ) : return self . _api_get ( '/api/exchanges/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( exchange ) ) )
An individual exchange
4,776
def delete_exchange_for_vhost ( self , exchange , vhost , if_unused = False ) : self . _api_delete ( '/api/exchanges/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( exchange ) ) , params = { 'if-unused' : if_unused } , )
Delete an individual exchange . You can add the parameter if_unused = True . This prevents the delete from succeeding if the exchange is bound to a queue or as a source to another exchange .
4,777
def list_bindings_for_vhost ( self , vhost ) : return self . _api_get ( '/api/bindings/{}' . format ( urllib . parse . quote_plus ( vhost ) ) )
A list of all bindings in a given virtual host .
4,778
def get_vhost ( self , name ) : return self . _api_get ( '/api/vhosts/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
Details about an individual vhost .
4,779
def create_vhost ( self , name , tracing = False ) : data = { 'tracing' : True } if tracing else { } self . _api_put ( '/api/vhosts/{0}' . format ( urllib . parse . quote_plus ( name ) ) , data = data , )
Create an individual vhost .
4,780
def get_user ( self , name ) : return self . _api_get ( '/api/users/{0}' . format ( urllib . parse . quote_plus ( name ) ) )
Details about an individual user .
4,781
def list_user_permissions ( self , name ) : return self . _api_get ( '/api/users/{0}/permissions' . format ( urllib . parse . quote_plus ( name ) ) )
A list of all permissions for a given user .
4,782
def list_policies_for_vhost ( self , vhost ) : return self . _api_get ( '/api/policies/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
A list of all policies for a vhost .
4,783
def get_policy_for_vhost ( self , vhost , name ) : return self . _api_get ( '/api/policies/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) )
Get a specific policy for a vhost .
4,784
def create_policy_for_vhost ( self , vhost , name , definition , pattern = None , priority = 0 , apply_to = 'all' ) : data = { "pattern" : pattern , "definition" : definition , "priority" : priority , "apply-to" : apply_to } self . _api_put ( '/api/policies/{0}/{1}' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) , data = data , )
Create a policy for a vhost .
4,785
def delete_policy_for_vhost ( self , vhost , name ) : self . _api_delete ( '/api/policies/{0}/{1}/' . format ( urllib . parse . quote_plus ( vhost ) , urllib . parse . quote_plus ( name ) , ) )
Delete a specific policy for a vhost .
4,786
def is_vhost_alive ( self , vhost ) : return self . _api_get ( '/api/aliveness-test/{0}' . format ( urllib . parse . quote_plus ( vhost ) ) )
Declares a test queue then publishes and consumes a message . Intended for use by monitoring tools .
4,787
def write ( self , session , directory , name , maskMap ) : name_split = name . split ( '.' ) name = name_split [ 0 ] extension = '' if len ( name_split ) >= 2 : extension = name_split [ - 1 ] try : name = self . _namePreprocessor ( name ) except : 'DO NOTHING' if extension == '' : filename = '{0}.{1}' . format ( name , self . fileExtension ) else : filename = '{0}.{1}' . format ( name , extension ) filePath = os . path . join ( directory , filename ) with open ( filePath , 'w' ) as openFile : self . _write ( session = session , openFile = openFile , maskMap = maskMap )
Write from database to file .
4,788
def getAsKmlGridAnimation ( self , session , projectFile = None , path = None , documentName = None , colorRamp = None , alpha = 1.0 , noDataValue = 0.0 ) : timeStampedRasters = self . _assembleRasterParams ( projectFile , self . rasters ) converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) if documentName is None : documentName = self . fileExtension kmlString = converter . getAsKmlGridAnimation ( tableName = WMSDatasetRaster . tableName , timeStampedRasters = timeStampedRasters , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , documentName = documentName , alpha = alpha , noDataValue = noDataValue ) if path : with open ( path , 'w' ) as f : f . write ( kmlString ) return kmlString
Retrieve the WMS dataset as a gridded time stamped KML string .
4,789
def getAsKmlPngAnimation ( self , session , projectFile = None , path = None , documentName = None , colorRamp = None , alpha = 1.0 , noDataValue = 0 , drawOrder = 0 , cellSize = None , resampleMethod = 'NearestNeighbour' ) : timeStampedRasters = self . _assembleRasterParams ( projectFile , self . rasters ) converter = RasterConverter ( sqlAlchemyEngineOrSession = session ) if isinstance ( colorRamp , dict ) : converter . setCustomColorRamp ( colorRamp [ 'colors' ] , colorRamp [ 'interpolatedPoints' ] ) else : converter . setDefaultColorRamp ( colorRamp ) if documentName is None : documentName = self . fileExtension kmlString , binaryPngStrings = converter . getAsKmlPngAnimation ( tableName = WMSDatasetRaster . tableName , timeStampedRasters = timeStampedRasters , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , documentName = documentName , alpha = alpha , drawOrder = drawOrder , cellSize = cellSize , noDataValue = noDataValue , resampleMethod = resampleMethod ) if path : directory = os . path . dirname ( path ) archiveName = ( os . path . split ( path ) [ 1 ] ) . split ( '.' ) [ 0 ] kmzPath = os . path . join ( directory , ( archiveName + '.kmz' ) ) with ZipFile ( kmzPath , 'w' ) as kmz : kmz . writestr ( archiveName + '.kml' , kmlString ) for index , binaryPngString in enumerate ( binaryPngStrings ) : kmz . writestr ( 'raster{0}.png' . format ( index ) , binaryPngString ) return kmlString , binaryPngStrings
Retrieve the WMS dataset as a PNG time stamped KMZ
4,790
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , maskMap ) : self . fileExtension = extension if isinstance ( maskMap , RasterMapFile ) and maskMap . fileExtension == 'msk' : columns = maskMap . columns rows = maskMap . rows upperLeftX = maskMap . west upperLeftY = maskMap . north cellSizeX = int ( abs ( maskMap . west - maskMap . east ) / columns ) cellSizeY = - 1 * cellSizeX KEYWORDS = { 'DATASET' : wdc . datasetHeaderChunk , 'TS' : wdc . datasetScalarTimeStepChunk } with open ( path , 'r' ) as f : chunks = pt . chunk ( KEYWORDS , f ) header = wdc . datasetHeaderChunk ( 'DATASET' , chunks [ 'DATASET' ] [ 0 ] ) timeStepRasters = [ ] for chunk in chunks [ 'TS' ] : timeStepRasters . append ( wdc . datasetScalarTimeStepChunk ( chunk , columns , header [ 'numberCells' ] ) ) self . name = header [ 'name' ] self . numberCells = header [ 'numberCells' ] self . numberData = header [ 'numberData' ] self . objectID = header [ 'objectID' ] if header [ 'type' ] == 'BEGSCL' : self . objectType = header [ 'objectType' ] self . type = self . SCALAR_TYPE elif header [ 'type' ] == 'BEGVEC' : self . vectorType = header [ 'objectType' ] self . type = self . VECTOR_TYPE for timeStep , timeStepRaster in enumerate ( timeStepRasters ) : wmsRasterDatasetFile = WMSDatasetRaster ( ) wmsRasterDatasetFile . wmsDataset = self wmsRasterDatasetFile . iStatus = timeStepRaster [ 'iStatus' ] wmsRasterDatasetFile . timestamp = timeStepRaster [ 'timestamp' ] wmsRasterDatasetFile . timeStep = timeStep + 1 if spatial : wmsRasterDatasetFile . raster = RasterLoader . makeSingleBandWKBRaster ( session , columns , rows , upperLeftX , upperLeftY , cellSizeX , cellSizeY , 0 , 0 , spatialReferenceID , timeStepRaster [ 'cellArray' ] ) else : wmsRasterDatasetFile . rasterText = timeStepRaster [ 'rasterText' ] session . add ( self ) else : log . warning ( "Could not read {0}. Mask Map must be supplied " "to read WMS Datasets." . format ( filename ) )
WMS Dataset File Read from File Method
4,791
def _write ( self , session , openFile , maskMap ) : FIRST_VALUE_INDEX = 12 openFile . write ( 'DATASET\r\n' ) if self . type == self . SCALAR_TYPE : openFile . write ( 'OBJTYPE {0}\r\n' . format ( self . objectType ) ) openFile . write ( 'BEGSCL\r\n' ) elif self . type == self . VECTOR_TYPE : openFile . write ( 'VECTYPE {0}\r\n' . format ( self . vectorType ) ) openFile . write ( 'BEGVEC\r\n' ) openFile . write ( 'OBJID {0}\r\n' . format ( self . objectID ) ) openFile . write ( 'ND {0}\r\n' . format ( self . numberData ) ) openFile . write ( 'NC {0}\r\n' . format ( self . numberCells ) ) openFile . write ( 'NAME {0}\r\n' . format ( self . name ) ) statusString = '' if isinstance ( maskMap , RasterMapFile ) : statusGrassRasterString = maskMap . getAsGrassAsciiGrid ( session ) if statusGrassRasterString is not None : statusValues = statusGrassRasterString . split ( ) else : statusValues = maskMap . rasterText . split ( ) for i in range ( FIRST_VALUE_INDEX , len ( statusValues ) ) : statusString += statusValues [ i ] + '\r\n' for timeStepRaster in self . rasters : openFile . write ( 'TS {0} {1}\r\n' . format ( timeStepRaster . iStatus , timeStepRaster . timestamp ) ) if timeStepRaster . iStatus == 1 : openFile . write ( statusString ) valueString = timeStepRaster . getAsWmsDatasetString ( session ) if valueString is not None : openFile . write ( valueString ) else : openFile . write ( timeStepRaster . rasterText ) openFile . write ( 'ENDDS\r\n' )
WMS Dataset File Write to File Method
4,792
def getAsWmsDatasetString ( self , session ) : FIRST_VALUE_INDEX = 12 if type ( self . raster ) != type ( None ) : valueGrassRasterString = self . getAsGrassAsciiGrid ( session ) values = valueGrassRasterString . split ( ) wmsDatasetString = '' for i in range ( FIRST_VALUE_INDEX , len ( values ) ) : wmsDatasetString += '{0:.6f}\r\n' . format ( float ( values [ i ] ) ) return wmsDatasetString else : wmsDatasetString = self . rasterText
Retrieve the WMS Raster as a string in the WMS Dataset format
4,793
def check_watershed_boundary_geometry ( shapefile_path ) : wfg = gpd . read_file ( shapefile_path ) first_shape = wfg . iloc [ 0 ] . geometry if hasattr ( first_shape , 'geoms' ) : raise ValueError ( "Invalid watershed boundary geometry. " "To fix this, remove disconnected shapes or run " "gsshapy.modeling.GSSHAModel.clean_boundary_shapefile" )
Make sure that there are no random artifacts in the file .
4,794
def get_batch ( sentences , token_dict , ignore_case = False , unk_index = 1 , eos_index = 2 ) : batch_size = len ( sentences ) max_sentence_len = max ( map ( len , sentences ) ) inputs = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_forward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] outputs_backward = [ [ 0 ] * max_sentence_len for _ in range ( batch_size ) ] for i , sentence in enumerate ( sentences ) : outputs_forward [ i ] [ len ( sentence ) - 1 ] = eos_index outputs_backward [ i ] [ 0 ] = eos_index for j , token in enumerate ( sentence ) : if ignore_case : index = token_dict . get ( token . lower ( ) , unk_index ) else : index = token_dict . get ( token , unk_index ) inputs [ i ] [ j ] = index if j - 1 >= 0 : outputs_forward [ i ] [ j - 1 ] = index if j + 1 < len ( sentence ) : outputs_backward [ i ] [ j + 1 ] = index outputs_forward = np . expand_dims ( np . asarray ( outputs_forward ) , axis = - 1 ) outputs_backward = np . expand_dims ( np . asarray ( outputs_backward ) , axis = - 1 ) return np . asarray ( inputs ) , [ outputs_forward , outputs_backward ]
Get a batch of inputs and outputs from given sentences .
4,795
def fit ( self , inputs , outputs , epochs = 1 ) : self . model . fit ( inputs , outputs , epochs = epochs )
Simple wrapper of model . fit .
4,796
def get_feature_layers ( self , input_layer = None , trainable = False , use_weighted_sum = False ) : model = keras . models . clone_model ( self . model , input_layer ) if not trainable : for layer in model . layers : layer . trainable = False if use_weighted_sum : rnn_layers_forward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_forward , ) ) rnn_layers_backward = list ( map ( lambda x : model . get_layer ( x . name . split ( '/' ) [ 0 ] . split ( ':' ) [ 0 ] . split ( '_' ) [ 0 ] ) . output , self . rnn_layers_backward , ) ) forward_layer = WeightedSum ( name = 'Bi-LM-Forward-Sum' ) ( rnn_layers_forward ) backward_layer_rev = WeightedSum ( name = 'Bi-LM-Backward-Sum-Rev' ) ( rnn_layers_backward ) backward_layer = keras . layers . Lambda ( function = self . _reverse_x , mask = lambda _ , mask : self . _reverse_x ( mask ) , name = 'Bi-LM-Backward-Sum' ) ( backward_layer_rev ) else : forward_layer = model . get_layer ( name = 'Bi-LM-Forward' ) . output backward_layer = model . get_layer ( name = 'Bi-LM-Backward' ) . output output_layer = keras . layers . Concatenate ( name = 'Bi-LM-Feature' ) ( [ forward_layer , backward_layer ] ) if input_layer is None : input_layer = model . layers [ 0 ] . input return input_layer , output_layer return output_layer
Get layers that output the Bi - LM feature .
4,797
def join ( self , n1 ) : if self . id == n1 . get_id ( ) : for i in range ( k ) : self . finger [ i ] = self . proxy self . predecessor = self . proxy self . run = True return True else : try : self . init_finger_table ( n1 ) except Exception : print 'Join failed' return False else : self . run = True return True
if join returns false the node did not entry the ring . Retry it
4,798
def _read ( self , directory , filename , session , path , name , extension , spatial , spatialReferenceID , replaceParamFile ) : self . fileExtension = extension with open ( path , 'r' ) as nwsrfsFile : for line in nwsrfsFile : sline = line . strip ( ) . split ( ) if sline [ 0 ] . lower ( ) == 'number_bands:' : self . numBands = sline [ 1 ] elif sline [ 0 ] . lower ( ) == 'lower_elevation' : else : record = NwsrfsRecord ( lowerElev = sline [ 0 ] , upperElev = sline [ 1 ] , mfMin = sline [ 2 ] , mfMax = sline [ 3 ] , scf = sline [ 4 ] , frUse = sline [ 5 ] , tipm = sline [ 6 ] , nmf = sline [ 7 ] , fua = sline [ 8 ] , plwhc = sline [ 9 ] ) record . nwsrfsFile = self
NWSRFS Read from File Method
4,799
def _write ( self , session , openFile , replaceParamFile ) : openFile . write ( 'Number_Bands: %s\n' % self . numBands ) openFile . write ( 'Lower_Elevation Upper_Elevation MF_Min MF_Max SCF FR_USE TIPM NMF FUA PCWHC\n' ) records = self . nwsrfsRecords for record in records : openFile . write ( '%s%s%s%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f%s%.1f\n' % ( record . lowerElev , ' ' * ( 17 - len ( str ( record . lowerElev ) ) ) , record . upperElev , ' ' * ( 17 - len ( str ( record . upperElev ) ) ) , record . mfMin , ' ' * ( 8 - len ( str ( record . mfMin ) ) ) , record . mfMax , ' ' * ( 8 - len ( str ( record . mfMax ) ) ) , record . scf , ' ' * ( 5 - len ( str ( record . scf ) ) ) , record . frUse , ' ' * ( 8 - len ( str ( record . frUse ) ) ) , record . tipm , ' ' * ( 6 - len ( str ( record . tipm ) ) ) , record . nmf , ' ' * ( 5 - len ( str ( record . nmf ) ) ) , record . fua , ' ' * ( 5 - len ( str ( record . fua ) ) ) , record . plwhc ) )
NWSRFS Write to File Method