idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
39,200
private void understandSchema ( String schema ) throws JSONException { JSONObject j1 = new JSONObject ( schema ) ; JSONArray fields = j1 . getJSONArray ( FIELDS ) ; String fieldName ; String fieldTypeValue ; Object recName ; for ( int k = 0 ; k < fields . length ( ) ; k ++ ) { if ( fields . get ( k ) == null ) { continue ; } JSONObject allEvents = new JSONObject ( fields . get ( k ) . toString ( ) ) ; Object name = allEvents . get ( NAME ) ; if ( name != null ) { if ( name . toString ( ) . equalsIgnoreCase ( EVENT ) ) { JSONArray allTypeDetails = allEvents . getJSONArray ( TYPE ) ; for ( int i = 0 ; i < allTypeDetails . length ( ) ; i ++ ) { JSONObject actual = ( JSONObject ) allTypeDetails . get ( i ) ; JSONArray types = actual . getJSONArray ( FIELDS ) ; Map < String , String > typeDetails = new HashMap < String , String > ( ) ; for ( int j = 0 ; j < types . length ( ) ; j ++ ) { if ( types . getJSONObject ( j ) == null ) { continue ; } fieldName = types . getJSONObject ( j ) . getString ( NAME ) ; fieldTypeValue = types . getJSONObject ( j ) . getString ( TYPE ) ; if ( ( fieldName != null ) && ( fieldTypeValue != null ) ) { typeDetails . put ( fieldName , fieldTypeValue ) ; } } recName = actual . get ( NAME ) ; if ( recName != null ) { fieldTypes . put ( Hadoop2RecordType . valueOf ( recName . toString ( ) ) , typeDetails ) ; } } } } } }
understand the schema so that we can parse the rest of the file
39,201
private void iterateAndPreparePuts ( JSONObject eventDetails , Put p , Hadoop2RecordType recType ) throws JSONException { Iterator < ? > keys = eventDetails . keys ( ) ; while ( keys . hasNext ( ) ) { String key = ( String ) keys . next ( ) ; processAllTypes ( p , recType , eventDetails , key ) ; } }
iterate over the event details and prepare puts
39,202
private String getKey ( String key ) throws IllegalArgumentException { String checkKey = JobHistoryKeys . HADOOP2_TO_HADOOP1_MAPPING . containsKey ( key ) ? JobHistoryKeys . HADOOP2_TO_HADOOP1_MAPPING . get ( key ) : key ; return ( JobHistoryKeys . valueOf ( checkKey ) . toString ( ) ) ; }
maintains compatibility between hadoop 1 . 0 keys and hadoop 2 . 0 keys . It also confirms that this key exists in JobHistoryKeys enum
39,203
private void populatePut ( Put p , byte [ ] family , String key , long value ) { byte [ ] valueBytes = null ; valueBytes = ( value != 0L ) ? Bytes . toBytes ( value ) : Constants . ZERO_LONG_BYTES ; byte [ ] qualifier = Bytes . toBytes ( getKey ( key ) . toLowerCase ( ) ) ; p . addColumn ( family , qualifier , valueBytes ) ; }
populates a put for long values
39,204
byte [ ] getValue ( String key , int value ) { byte [ ] valueBytes = null ; Class < ? > clazz = JobHistoryKeys . KEY_TYPES . get ( JobHistoryKeys . valueOf ( key ) ) ; if ( clazz == null ) { throw new IllegalArgumentException ( " unknown key " + key + " encountered while parsing " + this . jobKey ) ; } if ( Long . class . equals ( clazz ) ) { valueBytes = ( value != 0L ) ? Bytes . toBytes ( new Long ( value ) ) : Constants . ZERO_LONG_BYTES ; } else { valueBytes = ( value != 0 ) ? Bytes . toBytes ( value ) : Constants . ZERO_INT_BYTES ; } return valueBytes ; }
gets the int values as ints or longs some keys in 2 . 0 are now int they were longs in 1 . 0 this will maintain compatiblity between 1 . 0 and 2 . 0 by casting those ints to long
39,205
public byte [ ] getTaskKey ( String prefix , String jobNumber , String fullId ) { String taskComponent = fullId ; if ( fullId == null ) { taskComponent = "" ; } else { String expectedPrefix = prefix + jobNumber + "_" ; if ( ( fullId . startsWith ( expectedPrefix ) ) && ( fullId . length ( ) > expectedPrefix . length ( ) ) ) { taskComponent = fullId . substring ( expectedPrefix . length ( ) ) ; } } return taskKeyConv . toBytes ( new TaskKey ( this . jobKey , taskComponent ) ) ; }
Returns the Task ID or Task Attempt ID stripped of the leading job ID appended to the job row key .
39,206
public byte [ ] getAMKey ( String prefix , String fullId ) { String taskComponent = prefix + fullId ; return taskKeyConv . toBytes ( new TaskKey ( this . jobKey , taskComponent ) ) ; }
Returns the AM Attempt id stripped of the leading job ID appended to the job row key .
39,207
public void printAllPuts ( List < Put > p ) { for ( Put p1 : p ) { Map < byte [ ] , List < KeyValue > > d = p1 . getFamilyMap ( ) ; for ( byte [ ] k : d . keySet ( ) ) { System . out . println ( " k " + Bytes . toString ( k ) ) ; } for ( List < KeyValue > lkv : d . values ( ) ) { for ( KeyValue kv : lkv ) { System . out . println ( "\n row: " + taskKeyConv . fromBytes ( kv . getRow ( ) ) + "\n " + Bytes . toString ( kv . getQualifier ( ) ) + ": " + Bytes . toString ( kv . getValue ( ) ) ) ; } } } }
utitlity function for printing all puts
39,208
public void moveFlow ( FlowQueueKey oldKey , FlowQueueKey newKey ) throws DataException , IOException { byte [ ] oldRowKey = queueKeyConverter . toBytes ( oldKey ) ; Get get = new Get ( oldRowKey ) ; Table flowQueueTable = null ; try { flowQueueTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . FLOW_QUEUE_TABLE ) ) ; Result result = flowQueueTable . get ( get ) ; if ( result == null || result . isEmpty ( ) ) { throw new DataException ( "No row for key " + Bytes . toStringBinary ( oldRowKey ) ) ; } Put p = new Put ( queueKeyConverter . toBytes ( newKey ) ) ; for ( Cell c : result . rawCells ( ) ) { p . addColumn ( CellUtil . cloneFamily ( c ) , CellUtil . cloneQualifier ( c ) , CellUtil . cloneValue ( c ) ) ; } flowQueueTable . put ( p ) ; Delete d = new Delete ( oldRowKey ) ; flowQueueTable . delete ( d ) ; } finally { if ( flowQueueTable != null ) { flowQueueTable . close ( ) ; } } }
Moves a flow_queue record from one row key to another . All Cells in the existing row will be written to the new row . This would primarily be used for transitioning a flow s data from one status to another .
39,209
public PaginatedResult < Flow > getPaginatedFlowsForStatus ( String cluster , Flow . Status status , int limit , String user , byte [ ] startRow ) throws IOException { List < Flow > flows = getFlowsForStatus ( cluster , status , limit + 1 , user , startRow ) ; PaginatedResult < Flow > result = new PaginatedResult < Flow > ( limit ) ; if ( flows . size ( ) > limit ) { result . setValues ( flows . subList ( 0 , limit ) ) ; Flow lastFlow = flows . get ( limit ) ; result . setNextStartRow ( queueKeyConverter . toBytes ( lastFlow . getQueueKey ( ) ) ) ; } else { result . setValues ( flows ) ; } return result ; }
Returns a page of flows for the given cluster and status
39,210
public JobFile track ( FileStatus jobFileStatus ) { String jobfileName = jobFileStatus . getPath ( ) . getName ( ) ; JobFile jobFile = new JobFile ( jobfileName ) ; if ( jobFile . isJobConfFile ( ) || jobFile . isJobHistoryFile ( ) ) { track ( jobFile . getJobid ( ) ) ; long modificationTimeMillis = jobFileStatus . getModificationTime ( ) ; if ( modificationTimeMillis < minModificationTimeMillis ) { minModificationTimeMillis = modificationTimeMillis ; } if ( modificationTimeMillis > maxModificationTimeMillis ) { maxModificationTimeMillis = modificationTimeMillis ; } } return jobFile ; }
Converts a jobFileStatus to a JobFile and tracks the min and max modification times and JobIds .
39,211
boolean processRecords ( Configuration conf , Connection hbaseConnection , String cluster , int batchSize , int threadCount , String processFileSubstring ) throws IOException , InterruptedException , ClassNotFoundException , ExecutionException , RowKeyParseException { List < ProcessRecord > processRecords = getProcessRecords ( conf , hbaseConnection , cluster , processFileSubstring ) ; if ( ( processRecords == null ) || ( processRecords . size ( ) == 0 ) ) { return true ; } MinMaxJobFileTracker minMaxJobFileTracker = new MinMaxJobFileTracker ( ) ; for ( ProcessRecord processRecord : processRecords ) { minMaxJobFileTracker . track ( processRecord . getMinJobId ( ) ) ; minMaxJobFileTracker . track ( processRecord . getMaxJobId ( ) ) ; } List < JobRunner > jobRunners = getJobRunners ( conf , hbaseConnection , cluster , false , batchSize , minMaxJobFileTracker . getMinJobId ( ) , minMaxJobFileTracker . getMaxJobId ( ) ) ; boolean success = runJobs ( threadCount , jobRunners ) ; if ( success ) { updateProcessRecords ( conf , hbaseConnection , processRecords ) ; } return success ; }
Pick up the ranges of jobs to process from ProcessRecords . Skip raw rows that have already been processed .
39,212
private boolean runJobs ( int threadCount , List < JobRunner > jobRunners ) throws InterruptedException , ExecutionException { ExecutorService execSvc = Executors . newFixedThreadPool ( threadCount ) ; if ( ( jobRunners == null ) || ( jobRunners . size ( ) == 0 ) ) { return true ; } boolean success = true ; try { List < Future < Boolean > > jobFutures = new LinkedList < Future < Boolean > > ( ) ; for ( JobRunner jobRunner : jobRunners ) { Future < Boolean > jobFuture = execSvc . submit ( jobRunner ) ; jobFutures . add ( jobFuture ) ; } for ( Future < Boolean > jobFuture : jobFutures ) { success = jobFuture . get ( ) ; if ( ! success ) { break ; } } } finally { List < Runnable > neverRan = execSvc . shutdownNow ( ) ; if ( neverRan != null && neverRan . size ( ) > 0 ) { System . err . println ( "Interrupted run. Currently running Hadoop jobs will continue unless cancelled. " + neverRan + " jobs never scheduled." ) ; } } return success ; }
Run the jobs and wait for all of them to complete .
39,213
private Scan createFlowScan ( byte [ ] rowPrefix , int limit , String version ) { Scan scan = new Scan ( ) ; scan . setStartRow ( rowPrefix ) ; scan . setCaching ( Math . min ( limit , defaultScannerCaching ) ) ; Filter prefixFilter = new WhileMatchFilter ( new PrefixFilter ( rowPrefix ) ) ; if ( version != null && version . length ( ) > 0 ) { FilterList filters = new FilterList ( FilterList . Operator . MUST_PASS_ALL ) ; filters . addFilter ( prefixFilter ) ; filters . addFilter ( new SingleColumnValueFilter ( Constants . INFO_FAM_BYTES , Constants . VERSION_COLUMN_BYTES , CompareFilter . CompareOp . EQUAL , Bytes . toBytes ( version ) ) ) ; scan . setFilter ( filters ) ; } else { scan . setFilter ( prefixFilter ) ; } return scan ; }
creates a scan for flow data
39,214
public JobDetails getJobByJobID ( String cluster , String jobId ) throws IOException { return getJobByJobID ( cluster , jobId , false ) ; }
Returns a specific job s data by job ID . This version does not populate the job s task data .
39,215
private Scan getTaskScan ( JobKey jobKey ) { byte [ ] startKey = Bytes . add ( jobKeyConv . toBytes ( jobKey ) , Constants . SEP_BYTES ) ; Scan scan = new Scan ( ) ; scan . setStartRow ( startKey ) ; scan . setFilter ( new WhileMatchFilter ( new PrefixFilter ( startKey ) ) ) ; scan . setCaching ( 500 ) ; return scan ; }
Returns a Scan instance to retrieve all the task rows for a given job from the job_history_task table .
39,216
public static Configuration parseConfiguration ( Map < byte [ ] , byte [ ] > keyValues ) { Configuration config = new Configuration ( false ) ; byte [ ] configPrefix = Bytes . add ( Constants . JOB_CONF_COLUMN_PREFIX_BYTES , Constants . SEP_BYTES ) ; for ( Map . Entry < byte [ ] , byte [ ] > entry : keyValues . entrySet ( ) ) { byte [ ] key = entry . getKey ( ) ; if ( Bytes . startsWith ( key , configPrefix ) && key . length > configPrefix . length ) { byte [ ] name = Bytes . tail ( key , key . length - configPrefix . length ) ; config . set ( Bytes . toString ( name ) , Bytes . toString ( entry . getValue ( ) ) ) ; } } return config ; }
Converts serialized configuration properties back in to a Configuration object .
39,217
public static CounterMap parseCounters ( byte [ ] prefix , Map < byte [ ] , byte [ ] > keyValues ) { CounterMap counterValues = new CounterMap ( ) ; byte [ ] counterPrefix = Bytes . add ( prefix , Constants . SEP_BYTES ) ; for ( Map . Entry < byte [ ] , byte [ ] > entry : keyValues . entrySet ( ) ) { byte [ ] key = entry . getKey ( ) ; if ( Bytes . startsWith ( key , counterPrefix ) && key . length > counterPrefix . length ) { byte [ ] [ ] qualifierFields = ByteUtil . split ( Bytes . tail ( key , key . length - counterPrefix . length ) , Constants . SEP_BYTES ) ; if ( qualifierFields . length != 2 ) { throw new IllegalArgumentException ( "Malformed column qualifier for counter value: " + Bytes . toStringBinary ( key ) ) ; } Counter c = new Counter ( Bytes . toString ( qualifierFields [ 0 ] ) , Bytes . toString ( qualifierFields [ 1 ] ) , Bytes . toLong ( entry . getValue ( ) ) ) ; counterValues . add ( c ) ; } } return counterValues ; }
Converts encoded key values back into counter objects .
39,218
public int removeJob ( JobKey key ) throws IOException { byte [ ] jobRow = jobKeyConv . toBytes ( key ) ; Table historyTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . HISTORY_TABLE ) ) ; historyTable . delete ( new Delete ( jobRow ) ) ; historyTable . close ( ) ; int deleteCount = 1 ; Scan taskScan = getTaskScan ( key ) ; taskScan . addColumn ( Constants . INFO_FAM_BYTES , JobHistoryKeys . KEYS_TO_BYTES . get ( JobHistoryKeys . TASKID ) ) ; taskScan . setCacheBlocks ( false ) ; List < Delete > taskDeletes = new ArrayList < Delete > ( ) ; Table taskTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . HISTORY_TASK_TABLE ) ) ; ResultScanner scanner = taskTable . getScanner ( taskScan ) ; try { for ( Result r : scanner ) { if ( r != null && ! r . isEmpty ( ) ) { byte [ ] rowKey = r . getRow ( ) ; TaskKey taskKey = taskKeyConv . fromBytes ( rowKey ) ; if ( ! key . equals ( taskKey ) ) { LOG . warn ( "Found task not in the current job " + Bytes . toStringBinary ( rowKey ) ) ; break ; } taskDeletes . add ( new Delete ( r . getRow ( ) ) ) ; } } deleteCount += taskDeletes . size ( ) ; if ( taskDeletes . size ( ) > 0 ) { LOG . info ( "Deleting " + taskDeletes . size ( ) + " tasks for job " + key ) ; taskTable . delete ( taskDeletes ) ; } } finally { scanner . close ( ) ; taskTable . close ( ) ; } return deleteCount ; }
Removes the job s row from the job_history table and all related task rows from the job_history_task table .
39,219
public static String getUserNameInConf ( Configuration jobConf ) throws IllegalArgumentException { String userName = jobConf . get ( Constants . USER_CONF_KEY_HADOOP2 ) ; if ( StringUtils . isBlank ( userName ) ) { userName = jobConf . get ( Constants . USER_CONF_KEY ) ; if ( StringUtils . isBlank ( userName ) ) { throw new IllegalArgumentException ( " Found neither " + Constants . USER_CONF_KEY + " nor " + Constants . USER_CONF_KEY_HADOOP2 ) ; } } return userName ; }
Get the user name from the job conf check for hadoop2 config param then hadoop1
39,220
public static boolean contains ( Configuration jobConf , String name ) { if ( StringUtils . isNotBlank ( jobConf . get ( name ) ) ) { return true ; } else { return false ; } }
checks if the jobConf contains a certain parameter
39,221
public static String getQueueName ( Configuration jobConf ) { String hRavenQueueName = jobConf . get ( Constants . QUEUENAME_HADOOP2 ) ; if ( StringUtils . isBlank ( hRavenQueueName ) ) { hRavenQueueName = jobConf . get ( Constants . FAIR_SCHEDULER_POOLNAME_HADOOP1 ) ; if ( StringUtils . isBlank ( hRavenQueueName ) ) { hRavenQueueName = jobConf . get ( Constants . CAPACITY_SCHEDULER_QUEUENAME_HADOOP1 ) ; if ( StringUtils . isBlank ( hRavenQueueName ) ) { hRavenQueueName = Constants . DEFAULT_QUEUENAME ; LOG . info ( " Found neither " + Constants . FAIR_SCHEDULER_POOLNAME_HADOOP1 + " nor " + Constants . QUEUENAME_HADOOP2 + " nor " + Constants . CAPACITY_SCHEDULER_QUEUENAME_HADOOP1 + " hence presuming FIFO scheduler " + " and setting the queuename to " + Constants . DEFAULT_QUEUENAME ) ; } } } return hRavenQueueName ; }
retrieves the queue name from a hadoop conf looks for hadoop2 and hadoop1 settings
39,222
public List < VersionInfo > getDistinctVersions ( String cluster , String user , String appId ) throws IOException { Get get = new Get ( getRowKey ( cluster , user , appId ) ) ; List < VersionInfo > versions = Lists . newArrayList ( ) ; Long ts = 0L ; Table versionsTable = null ; try { versionsTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . HISTORY_APP_VERSION_TABLE ) ) ; Result r = versionsTable . get ( get ) ; if ( r != null && ! r . isEmpty ( ) ) { for ( Cell c : r . listCells ( ) ) { ts = 0L ; try { ts = Bytes . toLong ( CellUtil . cloneValue ( c ) ) ; versions . add ( new VersionInfo ( Bytes . toString ( CellUtil . cloneQualifier ( c ) ) , ts ) ) ; } catch ( IllegalArgumentException e1 ) { LOG . error ( "Caught conversion error while converting timestamp to long value " + e1 . getMessage ( ) ) ; throw e1 ; } } } if ( versions . size ( ) > 0 ) { Collections . sort ( versions ) ; } } finally { if ( versionsTable != null ) { versionsTable . close ( ) ; } } return versions ; }
Returns the list of distinct versions for the given application sorted in reverse chronological order
39,223
public boolean addVersion ( String cluster , String user , String appId , String version , long timestamp ) throws IOException { boolean updated = false ; byte [ ] rowKey = getRowKey ( cluster , user , appId ) ; byte [ ] versionCol = Bytes . toBytes ( version ) ; int attempts = 0 ; int maxAttempts = 3 ; boolean checkForUpdate = true ; while ( checkForUpdate && attempts < maxAttempts ) { attempts ++ ; Put p = null ; byte [ ] expectedValue = null ; Get get = new Get ( rowKey ) ; get . addColumn ( Constants . INFO_FAM_BYTES , versionCol ) ; Table versionsTable = null ; try { versionsTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . HISTORY_APP_VERSION_TABLE ) ) ; Result r = versionsTable . get ( get ) ; if ( r != null && ! r . isEmpty ( ) ) { byte [ ] storedValue = r . getValue ( Constants . INFO_FAM_BYTES , versionCol ) ; long storedTS = Bytes . toLong ( storedValue ) ; if ( timestamp < storedTS ) { p = new Put ( rowKey ) ; p . addColumn ( Constants . INFO_FAM_BYTES , versionCol , Bytes . toBytes ( timestamp ) ) ; expectedValue = storedValue ; } else { checkForUpdate = false ; } } else { p = new Put ( rowKey ) ; p . addColumn ( Constants . INFO_FAM_BYTES , versionCol , Bytes . toBytes ( timestamp ) ) ; } if ( p != null ) { updated = versionsTable . checkAndPut ( rowKey , Constants . INFO_FAM_BYTES , versionCol , expectedValue , p ) ; checkForUpdate = ! updated ; if ( ! updated ) { LOG . warn ( "Update of cluster=" + cluster + ", user=" + user + ", app=" + appId + ", version=" + version + " to timestamp " + timestamp + " failed because currently set value changed!" + " (attempt " + attempts + " of " + maxAttempts + ")" ) ; } } } finally { if ( versionsTable != null ) { versionsTable . close ( ) ; } } } return updated ; }
Adds an entry for the given version if it does not already exist . If the given timestamp is earlier than the currently stored timestamp for the version it will be updated .
39,224
public List < Flow > fetchFlowsWithConfig ( String cluster , String username , String batchDesc , String signature , int limit , String ... configProps ) throws IOException { LOG . info ( String . format ( "Fetching last %d matching jobs for cluster=%s, user.name=%s, " + "batch.desc=%s, pig.logical.plan.signature=%s" , limit , cluster , username , batchDesc , signature ) ) ; String configParam = "" ; if ( configProps != null && configProps . length > 0 ) { configParam = StringUtil . buildParam ( "includeConf" , configProps ) ; } String urlString = signature == null ? String . format ( "http://%s/api/v1/flow/%s/%s/%s?limit=%d&%s" , apiHostname , cluster , username , StringUtil . cleanseToken ( batchDesc ) , limit , configParam ) : String . format ( "http://%s/api/v1/flow/%s/%s/%s/%s?limit=%d&%s" , apiHostname , cluster , username , StringUtil . cleanseToken ( batchDesc ) , signature , limit , configParam ) ; return retrieveFlowsFromURL ( urlString ) ; }
Fetches a list of flows that include jobs in that flow that include the specified configuration properties
39,225
public List < Flow > fetchFlowsWithConfig ( String cluster , String username , String batchDesc , String signature , int limit , List < String > flowResponseFilters , List < String > jobResponseFilters , List < String > configPropertyFields ) throws IOException { LOG . info ( String . format ( "Fetching last %d matching jobs for cluster=%s, user.name=%s, " + "batch.desc=%s, pig.logical.plan.signature=%s" , limit , cluster , username , batchDesc , signature ) ) ; StringBuilder urlStringBuilder = buildFlowURL ( cluster , username , batchDesc , signature , limit , flowResponseFilters , jobResponseFilters ) ; if ( ( configPropertyFields != null ) && ( configPropertyFields . size ( ) > 0 ) ) { urlStringBuilder . append ( AND ) ; urlStringBuilder . append ( StringUtil . buildParam ( "includeConf" , configPropertyFields ) ) ; } return retrieveFlowsFromURL ( urlStringBuilder . toString ( ) ) ; }
Fetches a list of flows that include jobs in that flow that include the specified flow fields and job fields specified configuration properties
39,226
private StringBuilder buildFlowURL ( String cluster , String username , String batchDesc , String signature , int limit , List < String > flowResponseFilters , List < String > jobResponseFilters ) throws IOException { StringBuilder urlStringBuilder = new StringBuilder ( ) ; urlStringBuilder . append ( "http://" ) ; urlStringBuilder . append ( apiHostname ) ; urlStringBuilder . append ( RestJSONResource . SLASH ) ; urlStringBuilder . append ( URL_PORTION_API_V1 ) ; urlStringBuilder . append ( FLOW_API ) ; urlStringBuilder . append ( RestJSONResource . SLASH ) ; urlStringBuilder . append ( cluster ) ; urlStringBuilder . append ( RestJSONResource . SLASH ) ; urlStringBuilder . append ( username ) ; urlStringBuilder . append ( RestJSONResource . SLASH ) ; urlStringBuilder . append ( StringUtil . cleanseToken ( batchDesc ) ) ; if ( StringUtils . isNotEmpty ( signature ) ) { urlStringBuilder . append ( RestJSONResource . SLASH ) ; urlStringBuilder . append ( signature ) ; } urlStringBuilder . append ( QUESTION_MARK ) ; urlStringBuilder . append ( LIMIT ) ; urlStringBuilder . append ( EQUAL_TO ) ; urlStringBuilder . append ( limit ) ; if ( ( flowResponseFilters != null ) && ( flowResponseFilters . size ( ) > 0 ) ) { urlStringBuilder . append ( AND ) ; urlStringBuilder . append ( StringUtil . buildParam ( "include" , flowResponseFilters ) ) ; } if ( ( jobResponseFilters != null ) && ( jobResponseFilters . size ( ) > 0 ) ) { urlStringBuilder . append ( AND ) ; urlStringBuilder . append ( StringUtil . buildParam ( "includeJobField" , jobResponseFilters ) ) ; } return urlStringBuilder ; }
builds up a StringBuilder with the parameters for the FLOW API
39,227
public List < TaskDetails > fetchTaskDetails ( String cluster , String jobId ) throws IOException { String urlString = String . format ( "http://%s/api/v1/tasks/%s/%s" , apiHostname , cluster , jobId ) ; return retrieveTaskDetailsFromUrl ( urlString ) ; }
Fetch details tasks of a given job .
39,228
public List < TaskDetails > fetchTaskDetails ( String cluster , String jobId , List < String > taskResponseFilters ) throws IOException { String taskFilters = StringUtil . buildParam ( "include" , taskResponseFilters ) ; String urlString = String . format ( "http://%s/api/v1/tasks/%s/%s?%s" , apiHostname , cluster , jobId , taskFilters ) ; return retrieveTaskDetailsFromUrl ( urlString ) ; }
Fetch details tasks of a given job for the specified fields
39,229
private void aggreagteJobStats ( JobDetails jobDetails , byte [ ] rowKey , Context context , AggregationConstants . AGGREGATION_TYPE aggType ) throws IOException , InterruptedException { byte [ ] aggStatusCol = null ; switch ( aggType ) { case DAILY : aggStatusCol = AggregationConstants . JOB_DAILY_AGGREGATION_STATUS_COL_BYTES ; break ; case WEEKLY : aggStatusCol = AggregationConstants . JOB_WEEKLY_AGGREGATION_STATUS_COL_BYTES ; break ; default : LOG . error ( "Unknown aggregation type " + aggType ) ; return ; } boolean aggStatus = appSummaryService . aggregateJobDetails ( jobDetails , aggType ) ; context . progress ( ) ; LOG . debug ( "Status of aggreagting stats for " + aggType + "=" + aggStatus ) ; if ( aggStatus ) { Put aggStatusPut = rawService . getAggregatedStatusPut ( rowKey , aggStatusCol , aggStatus ) ; context . write ( RAW_TABLE , aggStatusPut ) ; } }
aggregate this job s stats only if re - aggregation is turned on OR aggreation is on AND job not already aggregated
39,230
private Put getMegaByteMillisPut ( Long mbMillis , JobKey jobKey ) { Put pMb = new Put ( jobKeyConv . toBytes ( jobKey ) ) ; pMb . addColumn ( Constants . INFO_FAM_BYTES , Constants . MEGABYTEMILLIS_BYTES , Bytes . toBytes ( mbMillis ) ) ; return pMb ; }
generates a put for the megabytemillis
39,231
Properties loadCostProperties ( Path cachePath , String machineType ) { Properties prop = new Properties ( ) ; InputStream inp = null ; try { inp = new FileInputStream ( cachePath . toString ( ) ) ; prop . load ( inp ) ; return prop ; } catch ( FileNotFoundException fnf ) { LOG . error ( "cost properties does not exist, using default values" ) ; return null ; } catch ( IOException e ) { LOG . error ( "error loading properties, using default values" ) ; return null ; } finally { if ( inp != null ) { try { inp . close ( ) ; } catch ( IOException ignore ) { } } } }
looks for cost file in distributed cache
39,232
private Double getJobCost ( Long mbMillis , Configuration currentConf ) { Double computeTco = 0.0 ; Long machineMemory = 0L ; Properties prop = null ; String machineType = currentConf . get ( Constants . HRAVEN_MACHINE_TYPE , "default" ) ; LOG . debug ( " machine type " + machineType ) ; try { Path [ ] cacheFiles = DistributedCache . getLocalCacheFiles ( currentConf ) ; if ( null != cacheFiles && cacheFiles . length > 0 ) { for ( Path cachePath : cacheFiles ) { LOG . debug ( " distributed cache path " + cachePath ) ; if ( cachePath . getName ( ) . equals ( Constants . COST_PROPERTIES_FILENAME ) ) { prop = loadCostProperties ( cachePath , machineType ) ; break ; } } } else { LOG . error ( "Unable to find anything (" + Constants . COST_PROPERTIES_FILENAME + ") in distributed cache, continuing with defaults" ) ; } } catch ( IOException ioe ) { LOG . error ( "IOException reading from distributed cache for " + Constants . COST_PROPERTIES_HDFS_DIR + ", continuing with defaults" + ioe . toString ( ) ) ; } if ( prop != null ) { String computeTcoStr = prop . getProperty ( machineType + ".computecost" ) ; try { computeTco = Double . parseDouble ( computeTcoStr ) ; } catch ( NumberFormatException nfe ) { LOG . error ( "error in conversion to long for compute tco " + computeTcoStr + " using default value of 0" ) ; } String machineMemStr = prop . getProperty ( machineType + ".machinememory" ) ; try { machineMemory = Long . parseLong ( machineMemStr ) ; } catch ( NumberFormatException nfe ) { LOG . error ( "error in conversion to long for machine memory " + machineMemStr + " using default value of 0" ) ; } } else { LOG . error ( "Could not load properties file, using defaults" ) ; } Double jobCost = JobHistoryFileParserBase . calculateJobCost ( mbMillis , computeTco , machineMemory ) ; LOG . info ( "from cost properties file, jobCost is " + jobCost + " based on compute tco: " + computeTco + " machine memory: " + machineMemory + " for machine type " + machineType ) ; return jobCost ; }
calculates the cost of this job based on mbMillis machineType and cost details from the properties file
39,233
private Put getJobCostPut ( Double jobCost , JobKey jobKey ) { Put pJobCost = new Put ( jobKeyConv . toBytes ( jobKey ) ) ; pJobCost . addColumn ( Constants . INFO_FAM_BYTES , Constants . JOBCOST_BYTES , Bytes . toBytes ( jobCost ) ) ; return pJobCost ; }
generates a put for the job cost
39,234
public int compareTo ( JobDetails otherJob ) { if ( otherJob == null ) { return - 1 ; } return new CompareToBuilder ( ) . append ( this . jobKey , otherJob . getJobKey ( ) ) . toComparison ( ) ; }
Compares two JobDetails objects on the basis of their JobKey
39,235
Long getCounterValueAsLong ( final CounterMap counters , final String counterGroupName , final String counterName ) { Counter c1 = counters . getCounter ( counterGroupName , counterName ) ; if ( c1 != null ) { return c1 . getValue ( ) ; } else { return 0L ; } }
return a value for that counters from the NavigableMap as a Long
39,236
private HadoopVersion getHadoopVersionFromResult ( final JobHistoryKeys key , final NavigableMap < byte [ ] , byte [ ] > infoValues ) { byte [ ] value = infoValues . get ( JobHistoryKeys . KEYS_TO_BYTES . get ( key ) ) ; if ( value != null ) { String hv = Bytes . toString ( value ) ; return HadoopVersion . valueOf ( hv ) ; } else { return HadoopVersion . ONE ; } }
return an enum value from the NavigableMap for hadoop version
39,237
private byte [ ] readJobFile ( FileStatus fileStatus ) throws IOException { byte [ ] rawBytes = null ; FSDataInputStream fsdis = null ; try { long fileLength = fileStatus . getLen ( ) ; int fileLengthInt = ( int ) fileLength ; rawBytes = new byte [ fileLengthInt ] ; fsdis = hdfs . open ( fileStatus . getPath ( ) ) ; IOUtils . readFully ( fsdis , rawBytes , 0 , fileLengthInt ) ; } finally { IOUtils . closeStream ( fsdis ) ; } return rawBytes ; }
Get the raw bytes and the last modification millis for this JobFile
39,238
public static String cleanseToken ( String token ) { if ( token == null || token . length ( ) == 0 ) { return token ; } ; String cleansed = token . replaceAll ( SPACE , UNDERSCORE ) ; cleansed = cleansed . replaceAll ( Constants . SEP , UNDERSCORE ) ; return cleansed ; }
Takes a string token to be used as a key or qualifier and cleanses out reserved tokens . This operation is not symetrical . Logic is to replace all spaces and exclamation points with underscores .
39,239
public static String buildParam ( String paramName , List < String > paramArgs ) throws IOException { StringBuilder sb = new StringBuilder ( ) ; for ( String arg : paramArgs ) { if ( sb . length ( ) > 0 ) { sb . append ( "&" ) ; } sb . append ( paramName ) . append ( "=" ) . append ( URLEncoder . encode ( arg , "UTF-8" ) ) ; } return sb . toString ( ) ; }
builds up a String with the parameters for the filtering of fields
39,240
public synchronized void seek ( long position ) throws IOException { if ( position < 0 || position >= count ) { throw new IOException ( "cannot seek position " + position + " as it is out of bounds" ) ; } pos = ( int ) position ; }
Seeks and sets position to the specified value .
39,241
protected JobDesc create ( QualifiedJobId qualifiedJobId , Configuration jobConf , String appId , String version , Framework framework , long submitTimeMillis ) { if ( null == qualifiedJobId ) { throw new IllegalArgumentException ( "Cannot create a JobKey from a null qualifiedJobId." ) ; } String userName = HadoopConfUtil . getUserNameInConf ( jobConf ) ; return new JobDesc ( qualifiedJobId , userName , appId , version , submitTimeMillis , framework ) ; }
Factory method to be used by subclasses .
39,242
protected String cleanAppId ( String appId ) { return ( appId != null ) ? StringUtil . cleanseToken ( appId ) : Constants . UNKNOWN ; }
Given a potential value for appId return a string that is safe to use in the jobKey
39,243
public void populate ( Result result ) { NavigableMap < byte [ ] , byte [ ] > infoValues = result . getFamilyMap ( HdfsConstants . DISK_INFO_FAM_BYTES ) ; this . fileCount += ByteUtil . getValueAsLong ( HdfsConstants . FILE_COUNT_COLUMN_BYTES , infoValues ) ; this . dirCount += ByteUtil . getValueAsLong ( HdfsConstants . DIR_COUNT_COLUMN_BYTES , infoValues ) ; this . spaceConsumed += ByteUtil . getValueAsLong ( HdfsConstants . SPACE_CONSUMED_COLUMN_BYTES , infoValues ) ; this . accessCountTotal += ByteUtil . getValueAsLong ( HdfsConstants . ACCESS_COUNT_TOTAL_COLUMN_BYTES , infoValues ) ; this . owner = ByteUtil . getValueAsString ( HdfsConstants . OWNER_COLUMN_BYTES , infoValues ) ; this . quota += ByteUtil . getValueAsLong ( HdfsConstants . QUOTA_COLUMN_BYTES , infoValues ) ; this . spaceQuota += ByteUtil . getValueAsLong ( HdfsConstants . SPACE_QUOTA_COLUMN_BYTES , infoValues ) ; this . tmpFileCount += ByteUtil . getValueAsLong ( HdfsConstants . TMP_FILE_COUNT_COLUMN_BYTES , infoValues ) ; this . tmpSpaceConsumed += ByteUtil . getValueAsLong ( HdfsConstants . TMP_SPACE_CONSUMED_COLUMN_BYTES , infoValues ) ; this . trashFileCount += ByteUtil . getValueAsLong ( HdfsConstants . TRASH_FILE_COUNT_COLUMN_BYTES , infoValues ) ; this . trashSpaceConsumed += ByteUtil . getValueAsLong ( HdfsConstants . TRASH_SPACE_CONSUMED_COLUMN_BYTES , infoValues ) ; this . accessCost += ByteUtil . getValueAsDouble ( HdfsConstants . ACCESS_COST_COLUMN_BYTES , infoValues ) ; this . storageCost += ByteUtil . getValueAsDouble ( HdfsConstants . STORAGE_COST_COLUMN_BYTES , infoValues ) ; this . hdfsCost = calculateHDFSCost ( ) ; }
populates the hdfs stats by looking through the hbase result
39,244
private static void traverseDirs ( List < FileStatus > fileStatusesList , FileSystem hdfs , Path inputPath , JobFileModifiedRangePathFilter jobFileModifiedRangePathFilter ) throws IOException { FileStatus allFiles [ ] = hdfs . listStatus ( inputPath ) ; for ( FileStatus aFile : allFiles ) { if ( aFile . isDir ( ) ) { traverseDirs ( fileStatusesList , hdfs , aFile . getPath ( ) , jobFileModifiedRangePathFilter ) ; } else { if ( jobFileModifiedRangePathFilter . accept ( aFile . getPath ( ) ) ) { fileStatusesList . add ( aFile ) ; } } } }
Recursively traverses the dirs to get the list of files for a given path filtered as per the input path range filter
39,245
public static FileStatus [ ] listFiles ( boolean recurse , FileSystem hdfs , Path inputPath , JobFileModifiedRangePathFilter jobFileModifiedRangePathFilter ) throws IOException { if ( recurse ) { List < FileStatus > fileStatusesList = new ArrayList < FileStatus > ( ) ; traverseDirs ( fileStatusesList , hdfs , inputPath , jobFileModifiedRangePathFilter ) ; FileStatus [ ] fileStatuses = ( FileStatus [ ] ) fileStatusesList . toArray ( new FileStatus [ fileStatusesList . size ( ) ] ) ; return fileStatuses ; } else { return hdfs . listStatus ( inputPath , jobFileModifiedRangePathFilter ) ; } }
Gets the list of files for a given path filtered as per the input path range filter Can go into directories recursively
39,246
static String getJobIdFromPath ( Path aPath ) { String fileName = aPath . getName ( ) ; JobFile jf = new JobFile ( fileName ) ; String jobId = jf . getJobid ( ) ; if ( jobId == null ) { throw new ProcessingException ( "job id is null for " + aPath . toUri ( ) ) ; } return jobId ; }
extracts the job id from a Path
39,247
public static JobDescFactoryBase getFrameworkSpecificJobDescFactory ( Configuration jobConf ) { Framework framework = getFramework ( jobConf ) ; switch ( framework ) { case PIG : return PIG_JOB_DESC_FACTORY ; case SCALDING : return SCALDING_JOB_DESC_FACTORY ; default : return MR_JOB_DESC_FACTORY ; } }
get framework specific JobDescFactory based on configuration
39,248
public static String getCluster ( Configuration jobConf ) { String jobtracker = jobConf . get ( RESOURCE_MANAGER_KEY ) ; if ( jobtracker == null ) { jobtracker = jobConf . get ( JOBTRACKER_KEY ) ; } String cluster = null ; if ( jobtracker != null ) { int portIdx = jobtracker . indexOf ( ':' ) ; if ( portIdx > - 1 ) { jobtracker = jobtracker . substring ( 0 , portIdx ) ; } cluster = Cluster . getIdentifier ( jobtracker ) ; } return cluster ; }
Returns the cluster that a give job was run on by mapping the jobtracker hostname to an identifier .
39,249
String stripAppId ( String origId ) { if ( origId == null || origId . isEmpty ( ) ) { return "" ; } Matcher m = stripBracketsPattern . matcher ( origId ) ; String cleanedAppId = m . replaceAll ( "" ) ; Matcher tailMatcher = stripSequencePattern . matcher ( cleanedAppId ) ; if ( tailMatcher . matches ( ) ) { cleanedAppId = tailMatcher . group ( 1 ) ; } return cleanedAppId ; }
Strips out metadata in brackets to get a clean app name . There are multiple job name formats used by various frameworks . This method attempts to normalize these job names into a somewhat human readable appId format .
39,250
static long getFlowSubmitTimeMillis ( Configuration jobConf , long submitTimeMillis ) { long cascadingSubmitTimeMillis = jobConf . getLong ( Constants . CASCADING_RUN_CONF_KEY , 0 ) ; if ( cascadingSubmitTimeMillis == 0 ) { String flowId = jobConf . get ( Constants . CASCADING_FLOW_ID_CONF_KEY ) ; if ( flowId != null && ! flowId . isEmpty ( ) ) { if ( flowId . length ( ) > 16 ) { flowId = flowId . substring ( 0 , 16 ) ; } try { long tmpFlow = Long . parseLong ( flowId , 16 ) ; long monthStart = DateUtil . getMonthStart ( submitTimeMillis ) ; cascadingSubmitTimeMillis = monthStart + ( tmpFlow % DateUtil . MONTH_IN_MILLIS ) ; } catch ( NumberFormatException nfe ) { cascadingSubmitTimeMillis = submitTimeMillis ; } } else { cascadingSubmitTimeMillis = submitTimeMillis ; } } return cascadingSubmitTimeMillis ; }
Returns the flow submit time for this job or a computed substitute that will at least be consistent for all jobs in a flow .
39,251
private List < ProcessRecord > createFromResults ( ResultScanner scanner , int maxCount ) { if ( ( maxCount <= 0 ) || ( scanner == null ) ) { return new ArrayList < ProcessRecord > ( 0 ) ; } List < ProcessRecord > records = new ArrayList < ProcessRecord > ( ) ; for ( Result result : scanner ) { byte [ ] row = result . getRow ( ) ; ProcessRecordKey key = keyConv . fromBytes ( row ) ; KeyValue keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . MIN_MOD_TIME_MILLIS_COLUMN_BYTES ) ; long minModificationTimeMillis = Bytes . toLong ( keyValue . getValue ( ) ) ; keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . PROCESSED_JOB_FILES_COLUMN_BYTES ) ; int processedJobFiles = Bytes . toInt ( keyValue . getValue ( ) ) ; keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . PROCESS_FILE_COLUMN_BYTES ) ; String processingDirectory = Bytes . toString ( keyValue . getValue ( ) ) ; keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . PROCESSING_STATE_COLUMN_BYTES ) ; ProcessState processState = ProcessState . getProcessState ( Bytes . toInt ( keyValue . getValue ( ) ) ) ; keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . MIN_JOB_ID_COLUMN_BYTES ) ; String minJobId = null ; if ( keyValue != null ) { minJobId = Bytes . toString ( keyValue . getValue ( ) ) ; } keyValue = result . getColumnLatest ( Constants . INFO_FAM_BYTES , Constants . MAX_JOB_ID_COLUMN_BYTES ) ; String maxJobId = null ; if ( keyValue != null ) { maxJobId = Bytes . toString ( keyValue . getValue ( ) ) ; } ProcessRecord processRecord = new ProcessRecord ( key . getCluster ( ) , processState , minModificationTimeMillis , key . getTimestamp ( ) , processedJobFiles , processingDirectory , minJobId , maxJobId ) ; records . add ( processRecord ) ; if ( records . size ( ) >= maxCount ) { break ; } } LOG . info ( "Returning " + records . size ( ) + " process records" ) ; return records ; }
Transform results pulled from a scanner and turn into a list of ProcessRecords .
39,252
public ProcessRecord setProcessState ( ProcessRecord processRecord , ProcessState newState ) throws IOException { Put put = new Put ( keyConv . toBytes ( processRecord . getKey ( ) ) ) ; put . addColumn ( Constants . INFO_FAM_BYTES , Constants . PROCESSING_STATE_COLUMN_BYTES , Bytes . toBytes ( newState . getCode ( ) ) ) ; Table processRecordTable = null ; try { processRecordTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . JOB_FILE_PROCESS_TABLE ) ) ; processRecordTable . put ( put ) ; } finally { if ( processRecordTable != null ) { processRecordTable . close ( ) ; } } ProcessRecord updatedProcessRecord = new ProcessRecord ( processRecord . getCluster ( ) , newState , processRecord . getMinModificationTimeMillis ( ) , processRecord . getMaxModificationTimeMillis ( ) , processRecord . getProcessedJobFiles ( ) , processRecord . getProcessFile ( ) , processRecord . getMinJobId ( ) , processRecord . getMaxJobId ( ) ) ; return updatedProcessRecord ; }
Set the process state for a given processRecord .
39,253
public int compareTo ( Flow otherFlow ) { if ( otherFlow == null ) { return - 1 ; } return new CompareToBuilder ( ) . append ( this . key , otherFlow . getFlowKey ( ) ) . toComparison ( ) ; }
Compares two Flow objects on the basis of their FlowKeys
39,254
public List < AppSummary > getNewApps ( JobHistoryService jhs , String cluster , String user , long startTime , long endTime , int limit ) throws IOException { byte [ ] startRow = null ; if ( StringUtils . isNotBlank ( user ) ) { startRow = ByteUtil . join ( Constants . SEP_BYTES , Bytes . toBytes ( cluster ) , Bytes . toBytes ( user ) ) ; } else { startRow = ByteUtil . join ( Constants . SEP_BYTES , Bytes . toBytes ( cluster ) ) ; } LOG . info ( "Reading app version rows start at " + Bytes . toStringBinary ( startRow ) ) ; Scan scan = new Scan ( ) ; scan . setStartRow ( startRow ) ; FilterList filters = new FilterList ( FilterList . Operator . MUST_PASS_ALL ) ; filters . addFilter ( new WhileMatchFilter ( new PrefixFilter ( startRow ) ) ) ; scan . setFilter ( filters ) ; List < AppKey > newAppsKeys = new ArrayList < AppKey > ( ) ; try { newAppsKeys = createNewAppKeysFromResults ( scan , startTime , endTime , limit ) ; } catch ( IOException e ) { LOG . error ( "Caught exception while trying to scan, returning empty list of flows: " + e . toString ( ) ) ; } List < AppSummary > newApps = new ArrayList < AppSummary > ( ) ; for ( AppKey ak : newAppsKeys ) { AppSummary anApp = new AppSummary ( ak ) ; List < Flow > flows = jhs . getFlowSeries ( ak . getCluster ( ) , ak . getUserName ( ) , ak . getAppId ( ) , null , Boolean . FALSE , startTime , endTime , Integer . MAX_VALUE ) ; for ( Flow f : flows ) { anApp . addFlow ( f ) ; } newApps . add ( anApp ) ; } return newApps ; }
scans the app version table to look for jobs that showed up in the given time range creates the flow key that maps to these apps
39,255
public List < AppKey > createNewAppKeysFromResults ( Scan scan , long startTime , long endTime , int maxCount ) throws IOException { ResultScanner scanner = null ; List < AppKey > newAppsKeys = new ArrayList < AppKey > ( ) ; Table versionsTable = null ; try { Stopwatch timer = new Stopwatch ( ) . start ( ) ; int rowCount = 0 ; long colCount = 0 ; long resultSize = 0 ; versionsTable = hbaseConnection . getTable ( TableName . valueOf ( Constants . HISTORY_APP_VERSION_TABLE ) ) ; scanner = versionsTable . getScanner ( scan ) ; for ( Result result : scanner ) { if ( result != null && ! result . isEmpty ( ) ) { rowCount ++ ; colCount += result . size ( ) ; AppKey appKey = getNewAppKeyFromResult ( result , startTime , endTime ) ; if ( appKey != null ) { newAppsKeys . add ( appKey ) ; } if ( newAppsKeys . size ( ) >= maxCount ) { break ; } } } timer . stop ( ) ; LOG . info ( " Fetched from hbase " + rowCount + " rows, " + colCount + " columns, " + resultSize + " bytes ( " + resultSize / ( 1024 * 1024 ) + ") MB, in total time of " + timer ) ; } finally { if ( scanner != null ) { scanner . close ( ) ; } if ( versionsTable != null ) { versionsTable . close ( ) ; } } return newAppsKeys ; }
creates a list of appkeys from the hbase scan
39,256
private AppKey getNewAppKeyFromResult ( Result result , long startTime , long endTime ) throws IOException { byte [ ] rowKey = result . getRow ( ) ; byte [ ] [ ] keyComponents = ByteUtil . split ( rowKey , Constants . SEP_BYTES ) ; String cluster = Bytes . toString ( keyComponents [ 0 ] ) ; String user = Bytes . toString ( keyComponents [ 1 ] ) ; String appId = Bytes . toString ( keyComponents [ 2 ] ) ; NavigableMap < byte [ ] , byte [ ] > valueMap = result . getFamilyMap ( Constants . INFO_FAM_BYTES ) ; long runId = Long . MAX_VALUE ; for ( Map . Entry < byte [ ] , byte [ ] > entry : valueMap . entrySet ( ) ) { long tsl = Bytes . toLong ( entry . getValue ( ) ) ; if ( tsl < runId ) { runId = tsl ; } } if ( ( runId >= startTime ) && ( runId <= endTime ) ) { AppKey ak = new AppKey ( cluster , user , appId ) ; return ak ; } return null ; }
constructs App key from the result set based on cluster user appId picks those results that satisfy the time range criteria
39,257
public boolean aggregateJobDetails ( JobDetails jobDetails , AggregationConstants . AGGREGATION_TYPE aggType ) { Table aggTable = null ; try { switch ( aggType ) { case DAILY : aggTable = hbaseConnection . getTable ( TableName . valueOf ( AggregationConstants . AGG_DAILY_TABLE ) ) ; break ; case WEEKLY : aggTable = hbaseConnection . getTable ( TableName . valueOf ( AggregationConstants . AGG_WEEKLY_TABLE ) ) ; ; break ; default : LOG . error ( "Unknown aggregation type : " + aggType ) ; return false ; } JobKey jobKey = jobDetails . getJobKey ( ) ; AppAggregationKey appAggKey = new AppAggregationKey ( jobKey . getCluster ( ) , jobKey . getUserName ( ) , jobKey . getAppId ( ) , getTimestamp ( jobKey . getRunId ( ) , aggType ) ) ; LOG . info ( "Aggregating " + aggType + " stats for " + jobKey . toString ( ) ) ; Increment aggIncrement = incrementAppSummary ( appAggKey , jobDetails ) ; aggTable . increment ( aggIncrement ) ; boolean status = updateMoreAggInfo ( aggTable , appAggKey , jobDetails ) ; return status ; } catch ( Exception e ) { LOG . error ( "Caught exception while attempting to aggregate for " + aggType + " table " , e ) ; return false ; } finally { if ( aggTable != null ) { try { aggTable . close ( ) ; } catch ( IOException e ) { LOG . error ( "Caught exception while attempting to close table " , e ) ; } } } }
creates a list of puts that aggregate the job details and stores in daily or weekly aggregation table
39,258
long getNumberRunsScratch ( Map < byte [ ] , byte [ ] > rawFamily ) { long numberRuns = 0L ; if ( rawFamily != null ) { numberRuns = rawFamily . size ( ) ; } if ( numberRuns == 0L ) { LOG . error ( "Number of runs in scratch column family can't be 0," + " if processing within TTL" ) ; throw new ProcessingException ( "Number of runs is 0" ) ; } return numberRuns ; }
interprets the number of runs based on number of columns in raw col family
39,259
private Increment incrementAppSummary ( AppAggregationKey appAggKey , JobDetails jobDetails ) { Increment aggIncrement = new Increment ( aggConv . toBytes ( appAggKey ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . TOTAL_MAPS_BYTES , jobDetails . getTotalMaps ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . TOTAL_REDUCES_BYTES , jobDetails . getTotalReduces ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . MEGABYTEMILLIS_BYTES , jobDetails . getMegabyteMillis ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . SLOTS_MILLIS_MAPS_BYTES , jobDetails . getMapSlotMillis ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . SLOTS_MILLIS_REDUCES_BYTES , jobDetails . getReduceSlotMillis ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . SLOTS_MILLIS_REDUCES_BYTES , jobDetails . getReduceSlotMillis ( ) ) ; aggIncrement . addColumn ( Constants . INFO_FAM_BYTES , AggregationConstants . TOTAL_JOBS_BYTES , 1L ) ; byte [ ] numberRowsCol = Bytes . toBytes ( jobDetails . getJobKey ( ) . getRunId ( ) ) ; aggIncrement . addColumn ( AggregationConstants . SCRATCH_FAM_BYTES , numberRowsCol , 1L ) ; return aggIncrement ; }
creates an Increment to aggregate job details
39,260
boolean updateQueue ( AppAggregationKey appAggKey , Table aggTable , JobDetails jobDetails ) throws IOException { byte [ ] rowKey = aggConv . toBytes ( appAggKey ) ; Get g = new Get ( rowKey ) ; g . addColumn ( AggregationConstants . INFO_FAM_BYTES , AggregationConstants . HRAVEN_QUEUE_BYTES ) ; Result r = aggTable . get ( g ) ; Cell existingQueuesCell = r . getColumnLatestCell ( AggregationConstants . INFO_FAM_BYTES , AggregationConstants . HRAVEN_QUEUE_BYTES ) ; String existingQueues = null ; byte [ ] existingQueuesBytes = null ; if ( existingQueuesCell != null ) { existingQueues = Bytes . toString ( CellUtil . cloneValue ( existingQueuesCell ) ) ; existingQueuesBytes = Bytes . toBytes ( existingQueues ) ; } String insertQueues = createQueueListValue ( jobDetails , existingQueues ) ; if ( insertQueues . equalsIgnoreCase ( existingQueues ) ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Queue already present in aggregation for this app " + existingQueues + " " + insertQueues ) ; } return true ; } else { return executeCheckAndPut ( aggTable , rowKey , existingQueuesBytes , Bytes . toBytes ( insertQueues ) , AggregationConstants . INFO_FAM_BYTES , AggregationConstants . HRAVEN_QUEUE_BYTES ) ; } }
updates the queue list for this app aggregation
39,261
boolean executeCheckAndPut ( Table aggTable , byte [ ] rowKey , byte [ ] existingValueBytes , byte [ ] newValueBytes , byte [ ] famBytes , byte [ ] colBytes ) throws IOException { Put put = new Put ( rowKey ) ; put . addColumn ( famBytes , colBytes , newValueBytes ) ; boolean statusCheckAndPut = aggTable . checkAndPut ( rowKey , famBytes , colBytes , existingValueBytes , put ) ; return statusCheckAndPut ; }
method to execute an hbase checkAndPut operation
39,262
boolean incrNumberRuns ( List < Cell > column , Table aggTable , AppAggregationKey appAggKey ) throws IOException { long expectedValueBeforePut = 0L ; if ( column . size ( ) > 0 ) { try { expectedValueBeforePut = Bytes . toLong ( column . get ( 0 ) . getValue ( ) ) ; } catch ( NumberFormatException e ) { LOG . error ( "Could not read existing value for number of runs during aggregation" + appAggKey . toString ( ) ) ; return false ; } } byte [ ] rowKey = aggConv . toBytes ( appAggKey ) ; long insertValue = 1L ; byte [ ] expectedValueBeforePutBytes = null ; if ( expectedValueBeforePut != 0L ) { insertValue = 1 + expectedValueBeforePut ; expectedValueBeforePutBytes = Bytes . toBytes ( expectedValueBeforePut ) ; } byte [ ] insertValueBytes = Bytes . toBytes ( insertValue ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( " before statusCheckAndPut " + insertValue + " " + expectedValueBeforePut ) ; } return executeCheckAndPut ( aggTable , rowKey , expectedValueBeforePutBytes , insertValueBytes , AggregationConstants . INFO_FAM_BYTES , AggregationConstants . NUMBER_RUNS_BYTES ) ; }
checks and increments the number of runs for this app aggregation . no need to retry since another map task may have updated it in the mean time
39,263
static void loadHadoopClustersProps ( String filename ) { Properties prop = new Properties ( ) ; if ( StringUtils . isBlank ( filename ) ) { filename = Constants . HRAVEN_CLUSTER_PROPERTIES_FILENAME ; } try { InputStream inp = Cluster . class . getResourceAsStream ( "/" + filename ) ; if ( inp == null ) { LOG . error ( filename + " for mapping clusters to cluster identifiers in hRaven does not exist" ) ; return ; } prop . load ( inp ) ; Set < String > hostnames = prop . stringPropertyNames ( ) ; for ( String h : hostnames ) { CLUSTERS_BY_HOST . put ( h , prop . getProperty ( h ) ) ; } } catch ( IOException e ) { throw new ExceptionInInitializerError ( " Could not load properties file " + filename + " for mapping clusters to cluster identifiers in hRaven" ) ; } }
testing with different properties file names
39,264
public void load ( ResourceLocation resourceLocation ) { IResource res = Silenced . get ( ( ) -> Minecraft . getMinecraft ( ) . getResourceManager ( ) . getResource ( resourceLocation ) ) ; if ( res == null ) return ; GsonBuilder gsonBuilder = new GsonBuilder ( ) ; gsonBuilder . registerTypeAdapter ( AnimationImporter . class , ( InstanceCreator < AnimationImporter > ) type -> this ) ; gsonBuilder . registerTypeAdapter ( Multimap . class , ( JsonDeserializer < Multimap < String , Anim > > ) this :: deserializeAnim ) ; Gson gson = gsonBuilder . create ( ) ; try ( Reader reader = new InputStreamReader ( res . getInputStream ( ) , "UTF-8" ) ) { JsonReader jsonReader = new JsonReader ( reader ) ; jsonReader . setLenient ( true ) ; gson . fromJson ( jsonReader , AnimationImporter . class ) ; } catch ( Exception e ) { MalisisCore . log . error ( "Failed to read {}" , resourceLocation , e ) ; } }
Loads and reads the JSON .
39,265
public Multimap < String , Anim > deserializeAnim ( JsonElement json , Type typeOfT , JsonDeserializationContext context ) throws JsonParseException { Multimap < String , Anim > anims = ArrayListMultimap . create ( ) ; JsonObject obj = json . getAsJsonObject ( ) ; TypeToken < ArrayList < Anim > > token = new TypeToken < ArrayList < Anim > > ( ) { } ; for ( Entry < String , JsonElement > entry : obj . entrySet ( ) ) anims . putAll ( entry . getKey ( ) , context . deserialize ( entry . getValue ( ) , token . getType ( ) ) ) ; return anims ; }
Deserialize anims multimap .
39,266
public boolean onButtonPress ( MouseButton button ) { if ( ! isEnabled ( ) ) return false ; return parent != null ? parent . onButtonPress ( button ) : false ; }
On button press .
39,267
public boolean onButtonRelease ( MouseButton button ) { if ( ! isEnabled ( ) ) return false ; return parent != null ? parent . onButtonRelease ( button ) : false ; }
On button release .
39,268
public boolean onDoubleClick ( MouseButton button ) { if ( ! isEnabled ( ) ) return false ; return parent != null ? parent . onDoubleClick ( button ) : false ; }
On double click .
39,269
public boolean onDrag ( MouseButton button ) { if ( ! isEnabled ( ) ) return false ; return parent != null ? parent . onDrag ( button ) : false ; }
On drag .
39,270
public boolean onScrollWheel ( int delta ) { if ( ! isEnabled ( ) ) return false ; for ( IControlComponent c : controlComponents ) if ( c . onScrollWheel ( delta ) ) return true ; return parent != null && ! ( this instanceof IControlComponent ) ? parent . onScrollWheel ( delta ) : false ; }
On scroll wheel .
39,271
public void execute ( MinecraftServer server , ICommandSender sender , String [ ] params ) throws CommandException { if ( params . length == 0 ) throw new WrongUsageException ( "malisiscore.commands.usage" ) ; if ( ! parameters . contains ( params [ 0 ] ) ) throw new WrongUsageException ( "malisiscore.commands.usage" ) ; switch ( params [ 0 ] ) { case "config" : configCommand ( sender , params ) ; break ; case "version" : IMalisisMod mod = null ; if ( params . length == 1 ) mod = MalisisCore . instance ; else { mod = MalisisCore . getMod ( params [ 1 ] ) ; if ( mod == null ) MalisisCore . message ( "malisiscore.commands.modnotfound" , params [ 1 ] ) ; } if ( mod != null ) MalisisCore . message ( "malisiscore.commands.modversion" , mod . getName ( ) , mod . getVersion ( ) ) ; break ; case "debug" : debugCommand ( sender , params ) ; break ; default : MalisisCore . message ( "Not yet implemented" ) ; break ; } }
Processes the command .
39,272
public static MethodNode findMethod ( ClassNode clazz , String name ) { for ( MethodNode method : clazz . methods ) { if ( method . name . equals ( name ) ) { return method ; } } return null ; }
Finds the method with the given name . If multiple methods with the same name exist the first one will be returned
39,273
public static AbstractInsnNode findInstruction ( MethodNode method , InsnList matches , int index ) { AbstractInsnNode node = method . instructions . get ( index ) ; AbstractInsnNode match = matches . getFirst ( ) ; while ( node != null ) { if ( insnEqual ( node , match ) ) { AbstractInsnNode m = match . getNext ( ) ; AbstractInsnNode n = node . getNext ( ) ; while ( m != null && n != null && insnEqual ( m , n ) ) { m = m . getNext ( ) ; n = n . getNext ( ) ; } if ( m == null ) return node ; } node = node . getNext ( ) ; } return null ; }
Finds instruction a specific instruction list inside a method starting from the specified index .
39,274
public MBlockPos add ( int x , int y , int z ) { return new MBlockPos ( this . getX ( ) + x , this . getY ( ) + y , this . getZ ( ) + z ) ; }
Add the given coordinates to the coordinates of this BlockPos
39,275
public MBlockPos offset ( EnumFacing facing , int n ) { return new MBlockPos ( this . getX ( ) + facing . getFrontOffsetX ( ) * n , this . getY ( ) + facing . getFrontOffsetY ( ) * n , this . getZ ( ) + facing . getFrontOffsetZ ( ) * n ) ; }
Offsets this BlockPos n blocks in the given direction
39,276
public void setPickedItemStack ( ItemStack itemStack ) { pickedItemStack = checkNotNull ( itemStack ) ; owner . inventory . setItemStack ( itemStack ) ; }
Sets the currently picked itemStack . Update player inventory .
39,277
public boolean shouldEndDrag ( int button ) { if ( ! isDraggingItemStack ( ) ) return false ; if ( dragType == DRAG_TYPE_ONE || dragType == DRAG_TYPE_SPREAD ) return dragType == button && draggedSlots . size ( ) > 1 ; return dragType == DRAG_TYPE_PICKUP ; }
Checks if the dragging should end based on the mouse button clicked .
39,278
public boolean shouldResetDrag ( int button ) { if ( ! isDraggingItemStack ( ) ) return false ; if ( dragType == DRAG_TYPE_SPREAD ) return button == 1 && draggedSlots . size ( ) > 1 ; if ( dragType == DRAG_TYPE_ONE ) return button == 0 && draggedSlots . size ( ) > 1 ; return dragType != DRAG_TYPE_PICKUP ; }
Checks if the dragging should be reset based on the mouse button clicked .
39,279
public void detectAndSendChanges ( ) { playerInventoryCache . sendChanges ( ) ; inventoryCaches . values ( ) . forEach ( InventoryCache :: sendChanges ) ; pickedItemStackCache . update ( ) ; if ( pickedItemStackCache . hasChanged ( ) ) UpdateInventorySlotsMessage . updatePickedItemStack ( pickedItemStackCache . get ( ) , ( EntityPlayerMP ) owner , windowId ) ; }
Sends all changes for base inventory player s inventory picked up itemStack and dragged itemStacks .
39,280
private ItemStack handleDropPickedStack ( boolean fullStack ) { ItemUtils . ItemStackSplitter iss = new ItemUtils . ItemStackSplitter ( pickedItemStack ) ; iss . split ( fullStack ? ItemUtils . FULL_STACK : 1 ) ; owner . dropItem ( iss . split , true ) ; setPickedItemStack ( iss . source ) ; return iss . source ; }
Drops one or the full itemStack currently picked up .
39,281
private ItemStack handleNormalClick ( MalisisSlot slot , boolean fullStack ) { if ( ! getPickedItemStack ( ) . isEmpty ( ) && ! slot . isItemValid ( pickedItemStack ) ) return getPickedItemStack ( ) ; if ( ! getPickedItemStack ( ) . isEmpty ( ) ) { if ( slot . isState ( PLAYER_INSERT | PLAYER_EXTRACT ) ) setPickedItemStack ( slot . insert ( pickedItemStack , fullStack ? ItemUtils . FULL_STACK : 1 , true ) ) ; } else if ( slot . isState ( PLAYER_EXTRACT ) ) setPickedItemStack ( slot . extract ( fullStack ? ItemUtils . FULL_STACK : ItemUtils . HALF_STACK ) ) ; return getPickedItemStack ( ) ; }
Handles the normal left or right click .
39,282
private ItemStack handleShiftClick ( MalisisInventory inventory , MalisisSlot slot ) { ItemStack itemStack = transferSlotOutOfInventory ( inventory , slot ) ; slot . setItemStack ( itemStack ) ; slot . onSlotChanged ( ) ; return itemStack ; }
Handles shift clicking a slot .
39,283
private ItemStack handleHotbar ( MalisisInventory inventory , MalisisSlot hoveredSlot , int num ) { MalisisSlot hotbarSlot = getPlayerInventory ( ) . getSlot ( num ) ; if ( inventory == getPlayerInventory ( ) || hoveredSlot . getItemStack ( ) . isEmpty ( ) ) { if ( hoveredSlot . isState ( PLAYER_INSERT ) ) { ItemStack dest = hotbarSlot . extract ( ItemUtils . FULL_STACK ) ; ItemStack src = hoveredSlot . extract ( ItemUtils . FULL_STACK ) ; dest = hoveredSlot . insert ( dest ) ; if ( ! dest . isEmpty ( ) ) { hotbarSlot . insert ( dest ) ; inventory . transferInto ( src ) ; } else src = hotbarSlot . insert ( src ) ; } } else { if ( hoveredSlot . isState ( PLAYER_EXTRACT ) ) { ItemStack dest = hoveredSlot . extract ( ItemUtils . FULL_STACK ) ; ItemStack left = hotbarSlot . insert ( dest , ItemUtils . FULL_STACK , true ) ; getPlayerInventory ( ) . transferInto ( left , false ) ; } } return hotbarSlot . getItemStack ( ) ; }
Handles player pressing 1 - 9 key while hovering a slot .
39,284
private ItemStack handleDropSlot ( MalisisSlot hoveredSlot , boolean fullStack ) { ItemStack itemStack = hoveredSlot . getItemStack ( ) ; if ( itemStack . isEmpty ( ) || ! hoveredSlot . isState ( PLAYER_EXTRACT ) ) return itemStack ; ItemUtils . ItemStackSplitter iss = new ItemUtils . ItemStackSplitter ( hoveredSlot . getItemStack ( ) ) ; iss . split ( fullStack ? ItemUtils . FULL_STACK : 1 ) ; hoveredSlot . setItemStack ( iss . source ) ; hoveredSlot . onSlotChanged ( ) ; owner . dropItem ( iss . split , true ) ; if ( iss . amount != 0 ) hoveredSlot . onPickupFromSlot ( owner , iss . split ) ; return iss . split ; }
Drops itemStack from hovering slot .
39,285
private ItemStack handleDoubleClick ( MalisisInventory inventory , MalisisSlot slot , boolean shiftClick ) { if ( ! inventory . state . is ( PLAYER_EXTRACT ) ) return ItemStack . EMPTY ; if ( ! shiftClick && ! pickedItemStack . isEmpty ( ) ) { for ( int i = 0 ; i < 2 ; i ++ ) { for ( MalisisInventory inv : getInventories ( ) ) if ( inv . pullItemStacks ( pickedItemStack , i == 0 ) ) break ; if ( pickedItemStack . getCount ( ) < pickedItemStack . getMaxStackSize ( ) ) getPlayerInventory ( ) . pullItemStacks ( pickedItemStack , i == 0 ) ; } setPickedItemStack ( pickedItemStack ) ; } else if ( ! lastShiftClicked . isEmpty ( ) ) { if ( inventory == getPlayerInventory ( ) ) { } for ( MalisisSlot s : inventory . getNonEmptySlots ( ) ) { ItemStack itemStack = s . getItemStack ( ) ; if ( s . isState ( PLAYER_EXTRACT ) && ItemUtils . areItemStacksStackable ( itemStack , lastShiftClicked ) ) { itemStack = transferSlotOutOfInventory ( inventory , s ) ; slot . setItemStack ( itemStack ) ; slot . onSlotChanged ( ) ; if ( ! itemStack . isEmpty ( ) ) return itemStack ; } } } lastShiftClicked = ItemStack . EMPTY ; return ItemStack . EMPTY ; }
Handle double clicking on a slot .
39,286
private ItemStack handlePickBlock ( MalisisSlot slot ) { if ( slot . getItemStack ( ) . isEmpty ( ) || ! pickedItemStack . isEmpty ( ) ) return ItemStack . EMPTY ; ItemStack itemStack = ItemUtils . copy ( slot . getItemStack ( ) ) ; itemStack . setCount ( itemStack . getMaxStackSize ( ) ) ; setPickedItemStack ( itemStack ) ; return itemStack ; }
Picks up the itemStack in the slot .
39,287
protected void resetDrag ( ) { if ( ! isDraggingItemStack ( ) ) return ; pickedItemStack = draggedItemStack . copy ( ) ; draggedSlots . forEach ( s -> s . setDraggedItemStack ( ItemStack . EMPTY ) ) ; draggedSlots . clear ( ) ; draggedItemStack = null ; dragType = - 1 ; }
Resets the dragging state .
39,288
public float getStringWidth ( String text , FontOptions options ) { if ( StringUtils . isEmpty ( text ) ) return 0 ; StringWalker walker = new StringWalker ( text , options ) ; walker . walkToEnd ( ) ; return walker . width ( ) ; }
Gets the rendering width of the text .
39,289
public float getStringHeight ( String text , FontOptions options ) { StringWalker walker = new StringWalker ( text , options ) ; walker . walkToEnd ( ) ; return walker . lineHeight ( ) ; }
Gets the rendering height of strings .
39,290
public float getMaxStringWidth ( List < String > strings , FontOptions options ) { float width = 0 ; for ( String str : strings ) width = Math . max ( width , getStringWidth ( str , options ) ) ; return width ; }
Gets max rendering width of an array of string .
39,291
public float getCharHeight ( char c , FontOptions options ) { return getCharData ( c ) . getCharHeight ( ) / fontGeneratorOptions . fontSize * ( options != null ? options . getFontScale ( ) : 1 ) * 9 ; }
Gets the rendering height of a character .
39,292
public float getCharPosition ( String str , FontOptions options , int position , int charOffset ) { if ( StringUtils . isEmpty ( str ) ) return 0 ; str = processString ( str , options ) ; StringWalker walker = new StringWalker ( str , options ) ; walker . skipChars ( true ) ; return 0 ; }
Determines the character for a given X coordinate .
39,293
public void setFontOptions ( FontOptions fontOptions ) { checkNotNull ( fontOptions ) ; buildLines = this . fontOptions . isBold ( ) != fontOptions . isBold ( ) || this . fontOptions . getFontScale ( ) != fontOptions . getFontScale ( ) ; this . fontOptions = fontOptions ; }
Sets the font options to use to render .
39,294
private String resolveParameter ( String key ) { ICachedData < ? > o = parameters . get ( key ) ; if ( o == null ) return translated ? I18n . format ( key ) : key ; return Objects . toString ( o . get ( ) ) . replace ( "$" , "\\$" ) ; }
Resolve parameter values to use in the text .
39,295
private boolean hasParametersChanged ( ) { boolean changed = false ; for ( ICachedData < ? > data : parameters . values ( ) ) { data . update ( ) ; if ( data . hasChanged ( ) ) changed |= true ; } return changed ; }
Checks whether any parameter has changed .
39,296
public String applyParameters ( String str ) { Matcher matcher = pattern . matcher ( str ) ; StringBuffer sb = new StringBuffer ( ) ; while ( matcher . find ( ) ) matcher . appendReplacement ( sb , resolveParameter ( matcher . group ( "key" ) ) ) ; matcher . appendTail ( sb ) ; str = sb . toString ( ) ; return translated ? I18n . format ( str ) : str ; }
Applies parameters to the text .
39,297
public void render ( GuiRenderer renderer ) { update ( ) ; if ( StringUtils . isEmpty ( cache ) ) return ; int x = screenPosition . x ( ) ; int y = screenPosition . y ( ) ; int z = zIndex . getAsInt ( ) ; ClipArea area = null ; if ( parent instanceof UIComponent ) z += parent . getZIndex ( ) ; if ( parent instanceof IClipable ) area = ( ( IClipable ) parent ) . getClipArea ( ) ; render ( renderer , x , y , z , area ) ; }
Renders all the text based on its set position .
39,298
public static void register ( IMalisisMod mod , Object messageHandler ) { register ( mod , messageHandler . getClass ( ) , messageHandler ) ; }
Registers an object to handle mod messages .
39,299
@ SuppressWarnings ( "unchecked" ) public static < T > T message ( String modid , String messageName , Object ... data ) { if ( ! Loader . isModLoaded ( modid ) ) return null ; Collection < Pair < Object , Method > > messageList = messages . get ( modid + ":" + messageName ) ; if ( messageList . size ( ) == 0 ) { MalisisCore . log . warn ( "No message handler matching the parameters passed for {}" , modid + ":" + messageName ) ; return null ; } for ( Pair < Object , Method > message : messageList ) { if ( checkParameters ( message . getRight ( ) , data ) ) { try { return ( T ) message . getRight ( ) . invoke ( message . getLeft ( ) , data ) ; } catch ( ReflectiveOperationException e ) { MalisisCore . log . warn ( "An error happened processing the message :" , e ) ; } } } return null ; }
Sends a message to the another mod .