idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
35,700
public static int getSupervisorPortNum ( Map conf , int sysCpuNum , Long physicalMemSize ) { double cpuWeight = ConfigExtension . getSupervisorSlotsPortCpuWeight ( conf ) ; int cpuPortNum = ( int ) ( sysCpuNum / cpuWeight ) ; if ( cpuPortNum < 1 ) { LOG . info ( "Invalid supervisor.slots.port.cpu.weight setting :" + cpuWeight + ", cpu cores:" + sysCpuNum ) ; cpuPortNum = 1 ; } Double memWeight = ConfigExtension . getSupervisorSlotsPortMemWeight ( conf ) ; int memPortNum = Integer . MAX_VALUE ; if ( physicalMemSize == null ) { LOG . info ( "Failed to get memory size" ) ; } else { LOG . debug ( "Get system memory size: " + physicalMemSize ) ; long workerMemSize = ConfigExtension . getMemSizePerWorker ( conf ) ; memPortNum = ( int ) ( physicalMemSize / ( workerMemSize * memWeight ) ) ; if ( memPortNum < 1 ) { LOG . info ( "System memory is too small for Jstorm" ) ; memPortNum = 1 ; } } return Math . min ( cpuPortNum , memPortNum ) ; }
calculate port number from cpu number and physical memory size
35,701
public static Map < String , Double > getMetrics ( Map conf , String topologyName , MetaType metricType , Integer window ) { NimbusClientWrapper nimbusClient = null ; Iface client = null ; Map < String , Double > summary = new HashMap < > ( ) ; try { nimbusClient = new NimbusClientWrapper ( ) ; nimbusClient . init ( conf ) ; client = nimbusClient . getClient ( ) ; String topologyId = client . getTopologyId ( topologyName ) ; if ( metricType == null ) { metricType = MetaType . TASK ; } List < MetricInfo > allTaskMetrics = client . getMetrics ( topologyId , metricType . getT ( ) ) ; if ( allTaskMetrics == null ) { throw new RuntimeException ( "Failed to get metrics" ) ; } if ( window == null || ! AsmWindow . TIME_WINDOWS . contains ( window ) ) { window = AsmWindow . M1_WINDOW ; } for ( MetricInfo taskMetrics : allTaskMetrics ) { Map < String , Map < Integer , MetricSnapshot > > metrics = taskMetrics . get_metrics ( ) ; if ( metrics == null ) { System . out . println ( "Failed to get task metrics" ) ; continue ; } for ( Entry < String , Map < Integer , MetricSnapshot > > entry : metrics . entrySet ( ) ) { String key = entry . getKey ( ) ; MetricSnapshot metric = entry . getValue ( ) . get ( window ) ; if ( metric == null ) { throw new RuntimeException ( "Failed to get one minute metrics of " + key ) ; } if ( metric . get_metricType ( ) == MetricType . COUNTER . getT ( ) ) { summary . put ( key , ( double ) metric . get_longValue ( ) ) ; } else if ( metric . get_metricType ( ) == MetricType . GAUGE . getT ( ) ) { summary . put ( key , metric . get_doubleValue ( ) ) ; } else { summary . put ( key , metric . get_mean ( ) ) ; } } } return summary ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } finally { if ( client != null ) { nimbusClient . cleanup ( ) ; } } }
Get Topology Metrics
35,702
public < T > T time ( Callable < T > event ) throws Exception { final long startTime = System . currentTimeMillis ( ) ; try { return event . call ( ) ; } finally { update ( System . currentTimeMillis ( ) - startTime ) ; } }
Times and records the duration of event .
35,703
private Map < Integer , LocalAssignment > getLocalAssign ( StormClusterState stormClusterState , String supervisorId , Map < String , Assignment > assignments ) throws Exception { Map < Integer , LocalAssignment > portToAssignment = new HashMap < > ( ) ; for ( Entry < String , Assignment > assignEntry : assignments . entrySet ( ) ) { String topologyId = assignEntry . getKey ( ) ; Assignment assignment = assignEntry . getValue ( ) ; Map < Integer , LocalAssignment > portTasks = readMyTasks ( stormClusterState , topologyId , supervisorId , assignment ) ; if ( portTasks == null ) { continue ; } for ( Entry < Integer , LocalAssignment > entry : portTasks . entrySet ( ) ) { Integer port = entry . getKey ( ) ; LocalAssignment la = entry . getValue ( ) ; if ( ! portToAssignment . containsKey ( port ) ) { portToAssignment . put ( port , la ) ; } else { throw new RuntimeException ( "Should not have multiple topologies assigned to one port" ) ; } } } return portToAssignment ; }
a port must be assigned to a topology
35,704
@ SuppressWarnings ( "unused" ) private Map < Integer , LocalAssignment > readMyTasks ( StormClusterState stormClusterState , String topologyId , String supervisorId , Assignment assignmentInfo ) throws Exception { Map < Integer , LocalAssignment > portTasks = new HashMap < > ( ) ; Set < ResourceWorkerSlot > workers = assignmentInfo . getWorkers ( ) ; if ( workers == null ) { LOG . error ( "No worker found for assignment {}!" , assignmentInfo ) ; return portTasks ; } for ( ResourceWorkerSlot worker : workers ) { if ( ! supervisorId . equals ( worker . getNodeId ( ) ) ) { continue ; } portTasks . put ( worker . getPort ( ) , new LocalAssignment ( topologyId , worker . getTasks ( ) , Common . topologyIdToName ( topologyId ) , worker . getMemSize ( ) , worker . getCpu ( ) , worker . getJvm ( ) , assignmentInfo . getTimeStamp ( ) ) ) ; } return portTasks ; }
get local node s tasks
35,705
public static Map < String , String > getTopologyCodeLocations ( Map < String , Assignment > assignments , String supervisorId ) throws Exception { Map < String , String > rtn = new HashMap < > ( ) ; for ( Entry < String , Assignment > entry : assignments . entrySet ( ) ) { String topologyId = entry . getKey ( ) ; Assignment assignmentInfo = entry . getValue ( ) ; Set < ResourceWorkerSlot > workers = assignmentInfo . getWorkers ( ) ; for ( ResourceWorkerSlot worker : workers ) { String node = worker . getNodeId ( ) ; if ( supervisorId . equals ( node ) ) { rtn . put ( topologyId , assignmentInfo . getMasterCodeDir ( ) ) ; break ; } } } return rtn ; }
get master code dir for each topology
35,706
public void emitDirect ( int taskId , String streamId , Tuple anchor , List < Object > tuple ) { emitDirect ( taskId , streamId , Arrays . asList ( anchor ) , tuple ) ; }
Emits a tuple directly to the specified task id on the specified stream . If the target bolt does not subscribe to this bolt using a direct grouping the tuple will not be sent . If the specified output stream is not declared as direct or the target bolt subscribes with a non - direct grouping an error will occur at runtime . The emitted values must be immutable .
35,707
private boolean monitorApplication ( ApplicationId appId ) throws YarnException , IOException { Integer monitorTimes = JOYConstants . MONITOR_TIMES ; while ( true ) { try { Thread . sleep ( JOYConstants . MONITOR_TIME_INTERVAL ) ; } catch ( InterruptedException e ) { LOG . debug ( "Thread sleep in monitoring loop interrupted" ) ; } ApplicationReport report = jstormClientContext . yarnClient . getApplicationReport ( appId ) ; try { File writename = new File ( JOYConstants . RPC_ADDRESS_FILE ) ; writename . createNewFile ( ) ; BufferedWriter out = new BufferedWriter ( new FileWriter ( writename ) ) ; out . write ( report . getHost ( ) + JOYConstants . NEW_LINE ) ; out . write ( report . getRpcPort ( ) + JOYConstants . NEW_LINE ) ; out . flush ( ) ; out . close ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } LOG . info ( "Got application report from ASM for" + ", appId=" + appId . getId ( ) + ", clientToAMToken=" + report . getClientToAMToken ( ) + ", appDiagnostics=" + report . getDiagnostics ( ) + ", appMasterHost=" + report . getHost ( ) + ", appQueue=" + report . getQueue ( ) + ", appMasterRpcPort=" + report . getRpcPort ( ) + ", appStartTime=" + report . getStartTime ( ) + ", yarnAppState=" + report . getYarnApplicationState ( ) . toString ( ) + ", distributedFinalState=" + report . getFinalApplicationStatus ( ) . toString ( ) + ", appTrackingUrl=" + report . getTrackingUrl ( ) + ", appUser=" + report . getUser ( ) ) ; YarnApplicationState state = report . getYarnApplicationState ( ) ; FinalApplicationStatus dsStatus = report . getFinalApplicationStatus ( ) ; if ( YarnApplicationState . FINISHED == state ) { if ( FinalApplicationStatus . SUCCEEDED == dsStatus ) { LOG . info ( "Application has completed successfully. Breaking monitoring loop" ) ; return true ; } else { LOG . info ( "Application did finished unsuccessfully." + " YarnState=" + state . toString ( ) + ", DSFinalStatus=" + dsStatus . toString ( ) + ". Breaking monitoring loop" ) ; return false ; } } else if ( YarnApplicationState . KILLED == state || YarnApplicationState . FAILED == state ) { LOG . info ( "Application did not finish." + " YarnState=" + state . toString ( ) + ", DSFinalStatus=" + dsStatus . toString ( ) + ". Breaking monitoring loop" ) ; return false ; } else if ( YarnApplicationState . RUNNING == state ) { LOG . info ( "Application is running successfully. Breaking monitoring loop" ) ; return true ; } else { if ( monitorTimes -- <= 0 ) { forceKillApplication ( appId ) ; return false ; } } } }
Monitor the submitted application for completion . Kill application if time expires .
35,708
public List < Long > getAllVersions ( ) throws IOException { List < Long > ret = new ArrayList < > ( ) ; for ( String s : listDir ( _root ) ) { if ( s . endsWith ( FINISHED_VERSION_SUFFIX ) ) { ret . add ( validateAndGetVersion ( s ) ) ; } } Collections . sort ( ret ) ; Collections . reverse ( ret ) ; return ret ; }
Sorted from most recent to oldest
35,709
public static void main ( String [ ] args ) { try { KeyGenerator kgen = KeyGenerator . getInstance ( "Blowfish" ) ; SecretKey skey = kgen . generateKey ( ) ; byte [ ] raw = skey . getEncoded ( ) ; String keyString = new String ( Hex . encodeHex ( raw ) ) ; System . out . println ( "storm -c " + SECRET_KEY + "=" + keyString + " -c " + Config . TOPOLOGY_TUPLE_SERIALIZER + "=" + BlowfishTupleSerializer . class . getName ( ) + " ..." ) ; } catch ( Exception ex ) { LOG . error ( ex . getMessage ( ) ) ; ex . printStackTrace ( ) ; } }
Produce a blowfish key to be used in Storm jar command
35,710
public boolean checkHeartBeat ( ) { String dataPath = executorMeta . getLocalDir ( ) ; File localstate = new File ( dataPath + "/data/" + startType + "/" + startType + ".heartbeat/" ) ; Long modefyTime = localstate . lastModified ( ) ; if ( System . currentTimeMillis ( ) - modefyTime > JOYConstants . EXECUTOR_HEARTBEAT_TIMEOUT ) { LOG . info ( "----------------------" ) ; modefyTime = localstate . lastModified ( ) ; LOG . info ( modefyTime . toString ( ) ) ; LOG . info ( Long . toString ( new Date ( ) . getTime ( ) ) ) ; LOG . info ( dataPath + "/data/" + startType + "/" + startType + ".heartbeat/" ) ; LOG . info ( "can't get heartbeat over " + String . valueOf ( JOYConstants . EXECUTOR_HEARTBEAT_TIMEOUT ) + " seconds" ) ; return false ; } else return true ; }
check supervisor s heartBeat
35,711
public boolean setJstormConf ( String key , String value ) { String line = " " + key + ": " + value ; try { Files . write ( Paths . get ( "deploy/jstorm/conf/storm.yaml" ) , line . getBytes ( ) , StandardOpenOption . APPEND ) ; } catch ( IOException e ) { LOG . error ( e ) ; return false ; } return true ; }
set local conf
35,712
private Map < String , String > generateSidToHost ( ) { Map < String , String > sidToHostname = new HashMap < > ( ) ; if ( oldAssignment != null ) { sidToHostname . putAll ( oldAssignment . getNodeHost ( ) ) ; } for ( Entry < String , SupervisorInfo > entry : cluster . entrySet ( ) ) { String supervisorId = entry . getKey ( ) ; SupervisorInfo supervisorInfo = entry . getValue ( ) ; sidToHostname . put ( supervisorId , supervisorInfo . getHostName ( ) ) ; } return sidToHostname ; }
Do we need just handle the case when type is ASSIGN_TYPE_NEW?
35,713
public static int tryPort ( int port ) throws IOException { ServerSocket socket = new ServerSocket ( port ) ; int rtn = socket . getLocalPort ( ) ; socket . close ( ) ; return rtn ; }
Check whether the port is available to bind
35,714
private Set < String > get_cleanup_ids ( StormClusterState clusterState , List < String > activeTopologies ) throws Exception { List < String > task_ids = clusterState . task_storms ( ) ; List < String > heartbeat_ids = clusterState . heartbeat_storms ( ) ; List < String > error_ids = clusterState . task_error_storms ( ) ; List < String > assignment_ids = clusterState . assignments ( null ) ; List < String > metric_ids = clusterState . get_metrics ( ) ; HashSet < String > latest_code_ids = new HashSet < > ( ) ; Set < String > code_ids = BlobStoreUtils . code_ids ( nimbusData . getBlobStore ( ) ) ; Set < String > to_cleanup_ids = new HashSet < > ( ) ; Set < String > pendingTopologies = nimbusData . getPendingSubmitTopologies ( ) . buildMap ( ) . keySet ( ) ; if ( task_ids != null ) { to_cleanup_ids . addAll ( task_ids ) ; } if ( heartbeat_ids != null ) { to_cleanup_ids . addAll ( heartbeat_ids ) ; } if ( error_ids != null ) { to_cleanup_ids . addAll ( error_ids ) ; } if ( assignment_ids != null ) { to_cleanup_ids . addAll ( assignment_ids ) ; } if ( code_ids != null ) { to_cleanup_ids . addAll ( code_ids ) ; } if ( metric_ids != null ) { to_cleanup_ids . addAll ( metric_ids ) ; } if ( activeTopologies != null ) { to_cleanup_ids . removeAll ( activeTopologies ) ; latest_code_ids . removeAll ( activeTopologies ) ; } to_cleanup_ids . removeAll ( pendingTopologies ) ; to_cleanup_ids . removeAll ( latest_code_ids ) ; LOG . info ( "Skip removing topology of " + latest_code_ids ) ; return to_cleanup_ids ; }
get topology ids that need to be cleaned up
35,715
public Assignment mkAssignment ( TopologyAssignEvent event ) throws Exception { String topologyId = event . getTopologyId ( ) ; LOG . info ( "Determining assignment for " + topologyId ) ; TopologyAssignContext context = prepareTopologyAssign ( event ) ; Set < ResourceWorkerSlot > assignments ; if ( ! StormConfig . local_mode ( nimbusData . getConf ( ) ) ) { IToplogyScheduler scheduler = schedulers . get ( DEFAULT_SCHEDULER_NAME ) ; assignments = scheduler . assignTasks ( context ) ; } else { assignments = mkLocalAssignment ( context ) ; } Assignment assignment = null ; if ( assignments != null && assignments . size ( ) > 0 ) { Map < String , String > nodeHost = getTopologyNodeHost ( context . getCluster ( ) , context . getOldAssignment ( ) , assignments ) ; Map < Integer , Integer > startTimes = getTaskStartTimes ( context , nimbusData , topologyId , context . getOldAssignment ( ) , assignments ) ; String codeDir = ( String ) nimbusData . getConf ( ) . get ( Config . STORM_LOCAL_DIR ) ; assignment = new Assignment ( codeDir , assignments , nodeHost , startTimes ) ; if ( event . isScaleTopology ( ) ) { assignment . setAssignmentType ( Assignment . AssignmentType . ScaleTopology ) ; } StormClusterState stormClusterState = nimbusData . getStormClusterState ( ) ; stormClusterState . set_assignment ( topologyId , assignment ) ; NimbusUtils . updateTaskHbStartTime ( nimbusData , assignment , topologyId ) ; NimbusUtils . updateTopologyTaskTimeout ( nimbusData , topologyId ) ; LOG . info ( "Successfully make assignment for topology id " + topologyId + ": " + assignment ) ; } return assignment ; }
make assignments for a topology The nimbus core function this function has been totally rewrite
35,716
public static Set < Integer > getNewOrChangedTaskIds ( Set < ResourceWorkerSlot > oldWorkers , Set < ResourceWorkerSlot > workers ) { Set < Integer > rtn = new HashSet < > ( ) ; HashMap < String , ResourceWorkerSlot > workerPortMap = HostPortToWorkerMap ( oldWorkers ) ; for ( ResourceWorkerSlot worker : workers ) { ResourceWorkerSlot oldWorker = workerPortMap . get ( worker . getHostPort ( ) ) ; if ( oldWorker != null ) { Set < Integer > oldTasks = oldWorker . getTasks ( ) ; for ( Integer task : worker . getTasks ( ) ) { if ( ! ( oldTasks . contains ( task ) ) ) rtn . add ( task ) ; } } else { if ( worker . getTasks ( ) != null ) { rtn . addAll ( worker . getTasks ( ) ) ; } } } return rtn ; }
get all task ids which are newly assigned or reassigned
35,717
public static List < WorkerSlot > sortSlots ( Set < WorkerSlot > allSlots , int needSlotNum ) { Map < String , List < WorkerSlot > > nodeMap = new HashMap < > ( ) ; for ( WorkerSlot np : allSlots ) { String node = np . getNodeId ( ) ; List < WorkerSlot > list = nodeMap . get ( node ) ; if ( list == null ) { list = new ArrayList < > ( ) ; nodeMap . put ( node , list ) ; } list . add ( np ) ; } for ( Entry < String , List < WorkerSlot > > entry : nodeMap . entrySet ( ) ) { List < WorkerSlot > ports = entry . getValue ( ) ; Collections . sort ( ports , new Comparator < WorkerSlot > ( ) { public int compare ( WorkerSlot first , WorkerSlot second ) { String firstNode = first . getNodeId ( ) ; String secondNode = second . getNodeId ( ) ; if ( ! firstNode . equals ( secondNode ) ) { return firstNode . compareTo ( secondNode ) ; } else { return first . getPort ( ) - second . getPort ( ) ; } } } ) ; } List < List < WorkerSlot > > splitup = new ArrayList < > ( nodeMap . values ( ) ) ; Collections . sort ( splitup , new Comparator < List < WorkerSlot > > ( ) { public int compare ( List < WorkerSlot > o1 , List < WorkerSlot > o2 ) { return o2 . size ( ) - o1 . size ( ) ; } } ) ; List < WorkerSlot > sortedFreeSlots = JStormUtils . interleave_all ( splitup ) ; if ( sortedFreeSlots . size ( ) <= needSlotNum ) { return sortedFreeSlots ; } return sortedFreeSlots . subList ( 0 , needSlotNum ) ; }
sort slots the purpose is to ensure that the tasks are assigned in balancing
35,718
public Set < Integer > getUnstoppedSlots ( Set < Integer > aliveTasks , Map < String , SupervisorInfo > supInfos , Assignment existAssignment ) { Set < Integer > ret = new HashSet < > ( ) ; Set < ResourceWorkerSlot > oldWorkers = existAssignment . getWorkers ( ) ; Set < String > aliveSupervisors = supInfos . keySet ( ) ; for ( ResourceWorkerSlot worker : oldWorkers ) { for ( Integer taskId : worker . getTasks ( ) ) { if ( ! aliveTasks . contains ( taskId ) ) { continue ; } String oldTaskSupervisorId = worker . getNodeId ( ) ; if ( ! aliveSupervisors . contains ( oldTaskSupervisorId ) ) { ret . add ( taskId ) ; } } } return ret ; }
Get unstopped slots from alive task list
35,719
public static void getFreeSlots ( Map < String , SupervisorInfo > supervisorInfos , StormClusterState stormClusterState ) throws Exception { Map < String , Assignment > assignments = Cluster . get_all_assignment ( stormClusterState , null ) ; for ( Entry < String , Assignment > entry : assignments . entrySet ( ) ) { Assignment assignment = entry . getValue ( ) ; Set < ResourceWorkerSlot > workers = assignment . getWorkers ( ) ; for ( ResourceWorkerSlot worker : workers ) { SupervisorInfo supervisorInfo = supervisorInfos . get ( worker . getNodeId ( ) ) ; if ( supervisorInfo == null ) { continue ; } supervisorInfo . getAvailableWorkerPorts ( ) . remove ( worker . getPort ( ) ) ; } } }
Get free resources
35,720
public Set < Integer > getAliveTasks ( String topologyId , Set < Integer > taskIds ) throws Exception { Set < Integer > aliveTasks = new HashSet < > ( ) ; for ( int taskId : taskIds ) { boolean isDead = NimbusUtils . isTaskDead ( nimbusData , topologyId , taskId ) ; if ( ! isDead ) { aliveTasks . add ( taskId ) ; } } return aliveTasks ; }
find all alive task ids . Do not assume that clocks are synchronized . Task heartbeat is only used so that nimbus knows when it s received a new heartbeat . All timing is done by nimbus and tracked through task - heartbeat - cache
35,721
public void backupAssignment ( Assignment assignment , TopologyAssignEvent event ) { String topologyId = event . getTopologyId ( ) ; String topologyName = event . getTopologyName ( ) ; try { StormClusterState zkClusterState = nimbusData . getStormClusterState ( ) ; Map < Integer , String > tasks = Cluster . get_all_task_component ( zkClusterState , topologyId , null ) ; Map < String , List < Integer > > componentTasks = JStormUtils . reverse_map ( tasks ) ; for ( Entry < String , List < Integer > > entry : componentTasks . entrySet ( ) ) { List < Integer > keys = entry . getValue ( ) ; Collections . sort ( keys ) ; } AssignmentBak assignmentBak = new AssignmentBak ( componentTasks , assignment ) ; zkClusterState . backup_assignment ( topologyName , assignmentBak ) ; } catch ( Exception e ) { LOG . warn ( "Failed to backup " + topologyId + " assignment " + assignment , e ) ; } }
Backup topology assignment to ZK
35,722
public BoltDeclarer setBolt ( String id , ITridentBatchBolt bolt , Integer parallelism , Set < String > committerBatches , Map < String , String > batchGroups ) { markBatchGroups ( id , batchGroups ) ; Component c = new Component ( bolt , parallelism , committerBatches ) ; _bolts . put ( id , c ) ; return new BoltDeclarerImpl ( c ) ; }
map from stream name to batch id
35,723
private void commitProgress ( FileOffset position ) { if ( position == null ) { return ; } if ( lock != null && canCommitNow ( ) ) { try { String pos = position . toString ( ) ; lock . heartbeat ( pos ) ; LOG . debug ( "{} Committed progress. {}" , spoutId , pos ) ; acksSinceLastCommit = 0 ; commitTimeElapsed . set ( false ) ; setupCommitElapseTimer ( ) ; } catch ( IOException e ) { LOG . error ( "Unable to commit progress Will retry later. Spout ID = " + spoutId , e ) ; } } }
will commit progress into lock file if commit threshold is reached
35,724
private FileLock getOldestExpiredLock ( ) throws IOException { DirLock dirlock = DirLock . tryLock ( hdfs , lockDirPath ) ; if ( dirlock == null ) { dirlock = DirLock . takeOwnershipIfStale ( hdfs , lockDirPath , lockTimeoutSec ) ; if ( dirlock == null ) { LOG . debug ( "Spout {} could not take over ownership of DirLock for {}" , spoutId , lockDirPath ) ; return null ; } LOG . debug ( "Spout {} now took over ownership of abandoned DirLock for {}" , spoutId , lockDirPath ) ; } else { LOG . debug ( "Spout {} now owns DirLock for {}" , spoutId , lockDirPath ) ; } try { if ( clocksInSync ) { return FileLock . acquireOldestExpiredLock ( hdfs , lockDirPath , lockTimeoutSec , spoutId ) ; } if ( lastExpiredLock == null ) { lastExpiredLock = FileLock . locateOldestExpiredLock ( hdfs , lockDirPath , lockTimeoutSec ) ; lastExpiredLockTime = System . currentTimeMillis ( ) ; return null ; } if ( hasExpired ( lastExpiredLockTime ) ) { return null ; } FileLock . LogEntry lastEntry = FileLock . getLastEntry ( hdfs , lastExpiredLock . getKey ( ) ) ; if ( lastEntry . equals ( lastExpiredLock . getValue ( ) ) ) { FileLock result = FileLock . takeOwnership ( hdfs , lastExpiredLock . getKey ( ) , lastEntry , spoutId ) ; lastExpiredLock = null ; return result ; } else { lastExpiredLock = null ; return null ; } } finally { dirlock . release ( ) ; LOG . debug ( "Released DirLock {}, SpoutID {} " , dirlock . getLockFile ( ) , spoutId ) ; } }
If clocks in sync then acquires the oldest expired lock Else on first call just remembers the oldest expired lock on next call check if the lock is updated . if not updated then acquires the lock
35,725
private FileReader createFileReader ( Path file ) throws IOException { if ( readerType . equalsIgnoreCase ( Configs . SEQ ) ) { return new SequenceFileReader ( this . hdfs , file , conf ) ; } if ( readerType . equalsIgnoreCase ( Configs . TEXT ) ) { return new TextFileReader ( this . hdfs , file , conf ) ; } try { Class < ? > clsType = Class . forName ( readerType ) ; Constructor < ? > constructor = clsType . getConstructor ( FileSystem . class , Path . class , Map . class ) ; return ( FileReader ) constructor . newInstance ( this . hdfs , file , conf ) ; } catch ( Exception e ) { LOG . error ( e . getMessage ( ) , e ) ; throw new RuntimeException ( "Unable to instantiate " + readerType + " reader" , e ) ; } }
Creates a reader that reads from beginning of file
35,726
private Path renameToInProgressFile ( Path file ) throws IOException { Path newFile = new Path ( file . toString ( ) + inprogress_suffix ) ; try { if ( hdfs . rename ( file , newFile ) ) { return newFile ; } throw new RenameException ( file , newFile ) ; } catch ( IOException e ) { throw new RenameException ( file , newFile , e ) ; } }
Renames files with . inprogress suffix
35,727
private Path getFileForLockFile ( Path lockFile , Path sourceDirPath ) throws IOException { String lockFileName = lockFile . getName ( ) ; Path dataFile = new Path ( sourceDirPath + Path . SEPARATOR + lockFileName + inprogress_suffix ) ; if ( hdfs . exists ( dataFile ) ) { return dataFile ; } dataFile = new Path ( sourceDirPath + Path . SEPARATOR + lockFileName ) ; if ( hdfs . exists ( dataFile ) ) { return dataFile ; } return null ; }
Returns the corresponding input file in the sourceDirPath for the specified lock file . If no such file is found then returns null
35,728
private Path renameCompletedFile ( Path file ) throws IOException { String fileName = file . toString ( ) ; String fileNameMinusSuffix = fileName . substring ( 0 , fileName . indexOf ( inprogress_suffix ) ) ; String newName = new Path ( fileNameMinusSuffix ) . getName ( ) ; Path newFile = new Path ( archiveDirPath + Path . SEPARATOR + newName ) ; LOG . info ( "Completed consuming file {}" , fileNameMinusSuffix ) ; if ( ! hdfs . rename ( file , newFile ) ) { throw new IOException ( "Rename failed for file: " + file ) ; } LOG . debug ( "Renamed file {} to {} " , file , newFile ) ; return newFile ; }
renames files and returns the new file path
35,729
public static StormZkClusterState mkStormZkClusterState ( Map conf ) throws Exception { Map realConf = getFullConf ( conf ) ; return new StormZkClusterState ( realConf ) ; }
This function bring some hacks to JStorm this isn t a good way
35,730
public void enqueue ( TaskMessage message , Channel channel ) { while ( ! bstartRec ) { LOG . info ( "check whether deserialize queues have already been created" ) ; boolean isFinishInit = true ; for ( Integer task : workerTasks ) { if ( deserializeQueues . get ( task ) == null ) { isFinishInit = false ; JStormUtils . sleepMs ( 10 ) ; break ; } } if ( isFinishInit ) { bstartRec = isFinishInit ; } } short type = message . get_type ( ) ; if ( type == TaskMessage . NORMAL_MESSAGE ) { int task = message . task ( ) ; DisruptorQueue queue = deserializeQueues . get ( task ) ; if ( queue == null ) { LOG . warn ( "Received invalid message directed at task {}. Dropping..." , task ) ; LOG . debug ( "Message data: {}" , JStormUtils . toPrintableString ( message . message ( ) ) ) ; return ; } if ( ! isBackpressureEnable ) { queue . publish ( message . message ( ) ) ; } else { flowCtrlHandler . flowCtrl ( channel , queue , task , message . message ( ) ) ; } } else if ( type == TaskMessage . CONTROL_MESSAGE ) { if ( recvControlQueue == null ) { LOG . info ( "Can not find the recvControlQueue. Dropping this control message" ) ; return ; } recvControlQueue . publish ( message ) ; } else { LOG . warn ( "Unexpected message (type={}) was received from task {}" , type , message . task ( ) ) ; } }
enqueue a received message
35,731
protected void closeChannel ( Channel channel ) { MessageDecoder . removeTransmitHistogram ( channel ) ; channel . close ( ) . awaitUninterruptibly ( ) ; allChannels . remove ( channel ) ; }
close a channel
35,732
public void close ( ) { LOG . info ( "Begin to shutdown NettyServer" ) ; if ( allChannels != null ) { new Thread ( new Runnable ( ) { public void run ( ) { try { allChannels . close ( ) . await ( 1 , TimeUnit . SECONDS ) ; LOG . info ( "Successfully close all channel" ) ; factory . releaseExternalResources ( ) ; } catch ( Exception ignored ) { } allChannels = null ; } } ) . start ( ) ; JStormUtils . sleepMs ( 1000 ) ; } LOG . info ( "Successfully shutdown NettyServer" ) ; }
close all channels and release resources
35,733
private TimeWindow mergeSessionWindows ( TimeWindow oldWindow , TimeWindow newWindow ) { if ( oldWindow . intersects ( newWindow ) ) { return oldWindow . cover ( newWindow ) ; } return newWindow ; }
merges two windows if there s an overlap between two windows return merged window ; otherwise return the new window itself .
35,734
public static String getMetricValue ( MetricSnapshot snapshot ) { if ( snapshot == null ) return null ; MetricType type = MetricType . parse ( snapshot . get_metricType ( ) ) ; switch ( type ) { case COUNTER : return format ( snapshot . get_longValue ( ) ) ; case GAUGE : return format ( snapshot . get_doubleValue ( ) ) ; case METER : return format ( snapshot . get_m1 ( ) ) ; case HISTOGRAM : return format ( snapshot . get_mean ( ) ) ; default : return "0" ; } }
get MetricSnapshot formatted value string
35,735
public static String extractGroup ( String [ ] strs ) { if ( strs . length < 6 ) return null ; return strs [ strs . length - 2 ] ; }
Extract Group from WM
35,736
public static String extractMetricName ( String [ ] strs ) { if ( strs . length < 6 ) return null ; return strs [ strs . length - 1 ] ; }
Extract MetricName from CC
35,737
public static UIComponentMetric getComponentMetric ( MetricInfo info , int window , String compName , List < ComponentSummary > componentSummaries ) { UIComponentMetric compMetric = new UIComponentMetric ( compName ) ; if ( info != null ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String name = metric . getKey ( ) ; String [ ] split_name = name . split ( "@" ) ; String componentName = UIMetricUtils . extractComponentName ( split_name ) ; if ( componentName != null && ! componentName . equals ( compName ) ) continue ; String metricName = UIMetricUtils . extractMetricName ( split_name ) ; String parentComp = null ; if ( metricName != null && metricName . contains ( "." ) ) { parentComp = metricName . split ( "\\." ) [ 0 ] ; metricName = metricName . split ( "\\." ) [ 1 ] ; } MetricSnapshot snapshot = metric . getValue ( ) . get ( window ) ; compMetric . setMetricValue ( snapshot , parentComp , metricName ) ; } } compMetric . mergeValue ( ) ; ComponentSummary summary = null ; for ( ComponentSummary cs : componentSummaries ) { if ( cs . get_name ( ) . equals ( compName ) ) { summary = cs ; break ; } } if ( summary != null ) { compMetric . setParallel ( summary . get_parallel ( ) ) ; compMetric . setType ( summary . get_type ( ) ) ; } return compMetric ; }
get the specific component metric
35,738
public static List < UITaskMetric > getTaskMetrics ( MetricInfo info , String component , int window ) { TreeMap < Integer , UITaskMetric > taskData = new TreeMap < > ( ) ; if ( info != null ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String name = metric . getKey ( ) ; String [ ] split_name = name . split ( "@" ) ; int taskId = JStormUtils . parseInt ( UIMetricUtils . extractTaskId ( split_name ) ) ; String componentName = UIMetricUtils . extractComponentName ( split_name ) ; if ( componentName != null && ! componentName . equals ( component ) ) continue ; String metricName = UIMetricUtils . extractMetricName ( split_name ) ; String parentComp = null ; if ( metricName != null && metricName . contains ( "." ) ) { parentComp = metricName . split ( "\\." ) [ 0 ] ; metricName = metricName . split ( "\\." ) [ 1 ] ; } if ( ! metric . getValue ( ) . containsKey ( window ) ) { LOG . info ( "task snapshot {} missing window:{}" , metric . getKey ( ) , window ) ; continue ; } MetricSnapshot snapshot = metric . getValue ( ) . get ( window ) ; UITaskMetric taskMetric ; if ( taskData . containsKey ( taskId ) ) { taskMetric = taskData . get ( taskId ) ; } else { taskMetric = new UITaskMetric ( component , taskId ) ; taskData . put ( taskId , taskMetric ) ; } taskMetric . setMetricValue ( snapshot , parentComp , metricName ) ; } } for ( UITaskMetric t : taskData . values ( ) ) { t . mergeValue ( ) ; } return new ArrayList < > ( taskData . values ( ) ) ; }
get all task metrics in the specific component
35,739
public static UITaskMetric getTaskMetric ( List < MetricInfo > taskStreamMetrics , String component , int id , int window ) { UITaskMetric taskMetric = new UITaskMetric ( component , id ) ; if ( taskStreamMetrics . size ( ) > 1 ) { MetricInfo info = taskStreamMetrics . get ( 0 ) ; if ( info != null ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String name = metric . getKey ( ) ; String [ ] split_name = name . split ( "@" ) ; int taskId = JStormUtils . parseInt ( UIMetricUtils . extractTaskId ( split_name ) ) ; if ( taskId != id ) continue ; String metricName = UIMetricUtils . extractMetricName ( split_name ) ; String parentComp = null ; if ( metricName != null && metricName . contains ( "." ) ) { parentComp = metricName . split ( "\\." ) [ 0 ] ; metricName = metricName . split ( "\\." ) [ 1 ] ; } MetricSnapshot snapshot = metric . getValue ( ) . get ( window ) ; taskMetric . setMetricValue ( snapshot , parentComp , metricName ) ; } } } taskMetric . mergeValue ( ) ; return taskMetric ; }
get the specific task metric
35,740
public Object remove ( K key ) { for ( Map < K , V > bucket : buckets ) { Object value = bucket . remove ( key ) ; if ( value != null ) { return value ; } } return null ; }
On the side of performance scanning from header is faster on the side of logic it should scan from the end to first .
35,741
public Principal principal ( ) { if ( _subject == null ) return null ; Set < Principal > princs = _subject . getPrincipals ( ) ; if ( princs . size ( ) == 0 ) return null ; return ( Principal ) ( princs . toArray ( ) [ 0 ] ) ; }
The primary principal associated current subject
35,742
public void init ( Cluster cluster , Map < String , Node > nodeIdToNode ) { _cluster = cluster ; _nodeIdToNode = nodeIdToNode ; }
Initialize the pool .
35,743
public void init ( ) { try { initPlugin ( ) ; } catch ( RuntimeException e ) { LOG . error ( "init metrics plugin error:" , e ) ; System . exit ( - 1 ) ; } pushRefreshEvent ( ) ; pushFlushEvent ( ) ; pushMergeEvent ( ) ; pushUploadEvent ( ) ; pushDiagnosisEvent ( ) ; LOG . info ( "Finish" ) ; }
init plugins and start event
35,744
public TopologyMetric getTopologyMetric ( String topologyId ) { long start = System . nanoTime ( ) ; try { TopologyMetric ret = new TopologyMetric ( ) ; List < MetricInfo > topologyMetrics = metricCache . getMetricData ( topologyId , MetaType . TOPOLOGY ) ; List < MetricInfo > componentMetrics = metricCache . getMetricData ( topologyId , MetaType . COMPONENT ) ; List < MetricInfo > workerMetrics = metricCache . getMetricData ( topologyId , MetaType . WORKER ) ; MetricInfo dummy = MetricUtils . mkMetricInfo ( ) ; if ( topologyMetrics . size ( ) > 0 ) { ret . set_topologyMetric ( topologyMetrics . get ( topologyMetrics . size ( ) - 1 ) ) ; } else { ret . set_topologyMetric ( dummy ) ; } if ( componentMetrics . size ( ) > 0 ) { ret . set_componentMetric ( componentMetrics . get ( 0 ) ) ; } else { ret . set_componentMetric ( dummy ) ; } if ( workerMetrics . size ( ) > 0 ) { ret . set_workerMetric ( workerMetrics . get ( 0 ) ) ; } else { ret . set_workerMetric ( dummy ) ; } ret . set_taskMetric ( dummy ) ; ret . set_streamMetric ( dummy ) ; ret . set_nettyMetric ( dummy ) ; return ret ; } finally { long end = System . nanoTime ( ) ; SimpleJStormMetric . updateNimbusHistogram ( "getTopologyMetric" , ( end - start ) / TimeUtils . NS_PER_US ) ; } }
get topology metrics note that only topology & component & worker metrics are returned
35,745
public void start ( ) { rotationTimer = new Timer ( true ) ; TimerTask task = new TimerTask ( ) { public void run ( ) { rotationTimerTriggered . set ( true ) ; } } ; rotationTimer . scheduleAtFixedRate ( task , interval , interval ) ; }
Start the timer to run at fixed intervals .
35,746
@ SuppressWarnings ( "unchecked" ) public Object commit ( long batchId , Object state ) { List < Object > stateList = ( List < Object > ) state ; if ( stateOperator != null ) { Object commitState = stateOperator . commit ( batchId , stateList ) ; stateList . add ( commitState ) ; } return stateList ; }
to topology master for persistence
35,747
public void mergeMeters ( MetricInfo metricInfo , String meta , Map < Integer , MetricSnapshot > data ) { Map < Integer , MetricSnapshot > existing = metricInfo . get_metrics ( ) . get ( meta ) ; if ( existing == null ) { metricInfo . put_to_metrics ( meta , data ) ; } else { for ( Map . Entry < Integer , MetricSnapshot > dataEntry : data . entrySet ( ) ) { Integer win = dataEntry . getKey ( ) ; MetricSnapshot snapshot = dataEntry . getValue ( ) ; MetricSnapshot old = existing . get ( win ) ; if ( old == null ) { existing . put ( win , snapshot ) ; } else { if ( snapshot . get_ts ( ) >= old . get_ts ( ) ) { old . set_ts ( snapshot . get_ts ( ) ) ; old . set_mean ( old . get_mean ( ) + snapshot . get_mean ( ) ) ; old . set_m1 ( old . get_m1 ( ) + snapshot . get_m1 ( ) ) ; old . set_m5 ( old . get_m5 ( ) + snapshot . get_m5 ( ) ) ; old . set_m15 ( old . get_m15 ( ) + snapshot . get_m15 ( ) ) ; } } } } }
meters are not sampled .
35,748
public void mergeHistograms ( MetricInfo metricInfo , String meta , Map < Integer , MetricSnapshot > data , Map < String , Integer > metaCounters , Map < String , Map < Integer , Histogram > > histograms ) { Map < Integer , MetricSnapshot > existing = metricInfo . get_metrics ( ) . get ( meta ) ; if ( existing == null ) { metricInfo . put_to_metrics ( meta , data ) ; Map < Integer , Histogram > histogramMap = new HashMap < > ( ) ; for ( Map . Entry < Integer , MetricSnapshot > dataEntry : data . entrySet ( ) ) { Histogram histogram = MetricUtils . metricSnapshot2Histogram ( dataEntry . getValue ( ) ) ; histogramMap . put ( dataEntry . getKey ( ) , histogram ) ; } histograms . put ( meta , histogramMap ) ; } else { for ( Map . Entry < Integer , MetricSnapshot > dataEntry : data . entrySet ( ) ) { Integer win = dataEntry . getKey ( ) ; MetricSnapshot snapshot = dataEntry . getValue ( ) ; MetricSnapshot old = existing . get ( win ) ; if ( old == null ) { existing . put ( win , snapshot ) ; histograms . get ( meta ) . put ( win , MetricUtils . metricSnapshot2Histogram ( snapshot ) ) ; } else { if ( snapshot . get_ts ( ) >= old . get_ts ( ) ) { old . set_ts ( snapshot . get_ts ( ) ) ; Histogram histogram = histograms . get ( meta ) . get ( win ) ; Snapshot updateSnapshot = histogram . getSnapshot ( ) ; if ( updateSnapshot instanceof JAverageSnapshot ) { sumMetricSnapshot ( ( ( JAverageSnapshot ) updateSnapshot ) . getMetricSnapshot ( ) , snapshot ) ; } else { MetricUtils . updateHistogramPoints ( histogram , snapshot . get_points ( ) , snapshot . get_pointSize ( ) ) ; } } } } } updateMetricCounters ( meta , metaCounters ) ; }
histograms are sampled but we just update points
35,749
protected void updateMetricCounters ( String metricName , Map < String , Integer > metricNameCounters ) { if ( metricNameCounters . containsKey ( metricName ) ) { metricNameCounters . put ( metricName , metricNameCounters . get ( metricName ) + 1 ) ; } else { metricNameCounters . put ( metricName , 1 ) ; } }
computes occurrences of specified metric name
35,750
private void mergeCounters ( Map < String , Map < Integer , MetricSnapshot > > newCounters , Map < String , Map < Integer , MetricSnapshot > > oldCounters ) { for ( Map . Entry < String , Map < Integer , MetricSnapshot > > entry : newCounters . entrySet ( ) ) { String metricName = entry . getKey ( ) ; Map < Integer , MetricSnapshot > snapshots = entry . getValue ( ) ; Map < Integer , MetricSnapshot > oldSnapshots = oldCounters . get ( metricName ) ; if ( oldSnapshots != null && oldSnapshots . size ( ) > 0 ) { for ( Map . Entry < Integer , MetricSnapshot > snapshotEntry : snapshots . entrySet ( ) ) { Integer win = snapshotEntry . getKey ( ) ; MetricSnapshot snapshot = snapshotEntry . getValue ( ) ; MetricSnapshot oldSnapshot = oldSnapshots . get ( win ) ; if ( oldSnapshot != null ) { snapshot . set_longValue ( snapshot . get_longValue ( ) + oldSnapshot . get_longValue ( ) ) ; } } } } }
sum old counter snapshots and new counter snapshots sums are stored in new snapshots .
35,751
private void track ( Event < T > windowEvent ) { evictionPolicy . track ( windowEvent ) ; triggerPolicy . track ( windowEvent ) ; }
feed the event to the eviction and trigger policies for bookkeeping and optionally firing the trigger .
35,752
private List < Event < T > > scanEvents ( boolean fullScan ) { LOG . debug ( "Scan events, eviction policy {}" , evictionPolicy ) ; List < T > eventsToExpire = new ArrayList < > ( ) ; List < Event < T > > eventsToProcess = new ArrayList < > ( ) ; try { lock . lock ( ) ; Iterator < Event < T > > it = queue . iterator ( ) ; while ( it . hasNext ( ) ) { Event < T > windowEvent = it . next ( ) ; Action action = evictionPolicy . evict ( windowEvent ) ; if ( action == Action . EXPIRE ) { eventsToExpire . add ( windowEvent . get ( ) ) ; it . remove ( ) ; } else if ( ! fullScan || action == Action . STOP ) { break ; } else if ( action == Action . PROCESS ) { eventsToProcess . add ( windowEvent ) ; } } expiredEvents . addAll ( eventsToExpire ) ; } finally { lock . unlock ( ) ; } eventsSinceLastExpiry . set ( 0 ) ; LOG . debug ( "[{}] events expired from window." , eventsToExpire . size ( ) ) ; if ( ! eventsToExpire . isEmpty ( ) ) { LOG . debug ( "invoking windowLifecycleListener.onExpiry" ) ; windowLifecycleListener . onExpiry ( eventsToExpire ) ; } return eventsToProcess ; }
Scan events in the queue using the expiration policy to check if the event should be evicted or not .
35,753
public long getEarliestEventTs ( long startTs , long endTs ) { long minTs = Long . MAX_VALUE ; for ( Event < T > event : queue ) { if ( event . getTimestamp ( ) > startTs && event . getTimestamp ( ) <= endTs ) { minTs = Math . min ( minTs , event . getTimestamp ( ) ) ; } } return minTs ; }
Scans the event queue and returns the next earliest event ts between the startTs and endTs
35,754
public int getEventCount ( long referenceTime ) { int count = 0 ; for ( Event < T > event : queue ) { if ( event . getTimestamp ( ) <= referenceTime ) { ++ count ; } } return count ; }
Scans the event queue and returns number of events having timestamp less than or equal to the reference time .
35,755
private void mkRefreshConfThread ( final NimbusData nimbusData ) { nimbusData . getScheduExec ( ) . scheduleAtFixedRate ( new RunnableCallback ( ) { public void run ( ) { LOG . debug ( "checking changes in storm.yaml..." ) ; Map newConf = Utils . readStormConfig ( ) ; if ( Utils . isConfigChanged ( nimbusData . getConf ( ) , newConf ) ) { LOG . warn ( "detected changes in storm.yaml, updating..." ) ; synchronized ( nimbusData . getConf ( ) ) { nimbusData . getConf ( ) . clear ( ) ; nimbusData . getConf ( ) . putAll ( newConf ) ; } RefreshableComponents . refresh ( newConf ) ; } else { LOG . debug ( "no changes detected, stay put." ) ; } } public Object getResult ( ) { return 15 ; } } , 15 , 15 , TimeUnit . SECONDS ) ; LOG . info ( "Successfully init configuration refresh thread" ) ; }
handle manual conf changes check every 15 sec
35,756
public static List < HostAndPort > splitToHostsAndPorts ( String hostPortQuorumList ) { String [ ] strings = StringUtils . getStrings ( hostPortQuorumList ) ; int len = 0 ; if ( strings != null ) { len = strings . length ; } List < HostAndPort > list = new ArrayList < HostAndPort > ( len ) ; if ( strings != null ) { for ( String s : strings ) { list . add ( HostAndPort . fromString ( s . trim ( ) ) . withDefaultPort ( DEFAULT_PORT ) ) ; } } return list ; }
Split a quorum list into a list of hostnames and ports
35,757
public static String buildHostsOnlyList ( List < HostAndPort > hostAndPorts ) { StringBuilder sb = new StringBuilder ( ) ; for ( HostAndPort hostAndPort : hostAndPorts ) { sb . append ( hostAndPort . getHostText ( ) ) . append ( "," ) ; } if ( sb . length ( ) > 0 ) { sb . delete ( sb . length ( ) - 1 , sb . length ( ) ) ; } return sb . toString ( ) ; }
Build up to a hosts only list
35,758
public static Config buildConfig ( TopologyDef topologyDef ) { Config conf = new Config ( ) ; conf . putAll ( topologyDef . getConfig ( ) ) ; return conf ; }
Given a topology definition return a populated org . apache . storm . Config instance .
35,759
public static StormTopology buildTopology ( ExecutionContext context ) throws IllegalAccessException , InstantiationException , ClassNotFoundException , NoSuchMethodException , InvocationTargetException { StormTopology topology = null ; TopologyDef topologyDef = context . getTopologyDef ( ) ; if ( ! topologyDef . validate ( ) ) { throw new IllegalArgumentException ( "Invalid topology config. Spouts, bolts and streams cannot be " + "defined in the same configuration as a topologySource." ) ; } buildComponents ( context ) ; if ( topologyDef . isDslTopology ( ) ) { LOG . info ( "Detected DSL topology..." ) ; TopologyBuilder builder = new TopologyBuilder ( ) ; buildSpouts ( context , builder ) ; buildBolts ( context ) ; buildStreamDefinitions ( context , builder ) ; topology = builder . createTopology ( ) ; } else { LOG . info ( "A topology source has been specified..." ) ; ObjectDef def = topologyDef . getTopologySource ( ) ; topology = buildExternalTopology ( def , context ) ; } return topology ; }
Given a topology definition return a Storm topology that can be run either locally or remotely .
35,760
private static void buildComponents ( ExecutionContext context ) throws ClassNotFoundException , NoSuchMethodException , IllegalAccessException , InvocationTargetException , InstantiationException { Collection < BeanDef > cDefs = context . getTopologyDef ( ) . getComponents ( ) ; if ( cDefs != null ) { for ( BeanDef bean : cDefs ) { Object obj = buildObject ( bean , context ) ; context . addComponent ( bean . getId ( ) , obj ) ; } } }
Given a topology definition resolve and instantiate all components found and return a map keyed by the component id .
35,761
private static IRichSpout buildSpout ( SpoutDef def , ExecutionContext context ) throws ClassNotFoundException , IllegalAccessException , InstantiationException , NoSuchMethodException , InvocationTargetException { return ( IRichSpout ) buildObject ( def , context ) ; }
Given a spout definition return a Storm spout implementation by attempting to find a matching constructor in the given spout class . Perform list to array conversion as necessary .
35,762
private static void buildBolts ( ExecutionContext context ) throws ClassNotFoundException , IllegalAccessException , InstantiationException , NoSuchMethodException , InvocationTargetException { for ( BoltDef def : context . getTopologyDef ( ) . getBolts ( ) ) { Class clazz = Class . forName ( def . getClassName ( ) ) ; Object bolt = buildObject ( def , context ) ; context . addBolt ( def . getId ( ) , bolt ) ; } }
Given a list of bolt definitions build a map of Storm bolts with the bolt definition id as the key . Attempt to coerce the given constructor arguments to a matching bolt constructor as much as possible .
35,763
private static Map mapifySerializations ( List sers ) { Map rtn = new HashMap ( ) ; if ( sers != null ) { int size = sers . size ( ) ; for ( int i = 0 ; i < size ; i ++ ) { if ( sers . get ( i ) instanceof Map ) { rtn . putAll ( ( Map ) sers . get ( i ) ) ; } else { rtn . put ( sers . get ( i ) , null ) ; } } } return rtn ; }
add custom KRYO serialization
35,764
public static StormTopology normalizeTopology ( Map stormConf , StormTopology topology , boolean fromConf ) { StormTopology ret = topology . deepCopy ( ) ; Map < String , Object > rawComponents = ThriftTopologyUtils . getComponents ( topology ) ; Map < String , Object > components = ThriftTopologyUtils . getComponents ( ret ) ; if ( ! rawComponents . keySet ( ) . equals ( components . keySet ( ) ) ) { String errMsg = "Failed to normalize topology binary!" ; LOG . info ( errMsg + " raw components:" + rawComponents . keySet ( ) + ", normalized " + components . keySet ( ) ) ; throw new InvalidParameterException ( errMsg ) ; } for ( Entry < String , Object > entry : components . entrySet ( ) ) { Object component = entry . getValue ( ) ; String componentName = entry . getKey ( ) ; ComponentCommon common = null ; if ( component instanceof Bolt ) { common = ( ( Bolt ) component ) . get_common ( ) ; if ( fromConf ) { Integer paraNum = ConfigExtension . getBoltParallelism ( stormConf , componentName ) ; if ( paraNum != null ) { LOG . info ( "Set " + componentName + " as " + paraNum ) ; common . set_parallelism_hint ( paraNum ) ; } } } if ( component instanceof SpoutSpec ) { common = ( ( SpoutSpec ) component ) . get_common ( ) ; if ( fromConf ) { Integer paraNum = ConfigExtension . getSpoutParallelism ( stormConf , componentName ) ; if ( paraNum != null ) { LOG . info ( "Set " + componentName + " as " + paraNum ) ; common . set_parallelism_hint ( paraNum ) ; } } } if ( component instanceof StateSpoutSpec ) { common = ( ( StateSpoutSpec ) component ) . get_common ( ) ; if ( fromConf ) { Integer paraNum = ConfigExtension . getSpoutParallelism ( stormConf , componentName ) ; if ( paraNum != null ) { LOG . info ( "Set " + componentName + " as " + paraNum ) ; common . set_parallelism_hint ( paraNum ) ; } } } Map componentMap = new HashMap ( ) ; String jsonConfString = common . get_json_conf ( ) ; if ( jsonConfString != null ) { componentMap . putAll ( ( Map ) JStormUtils . from_json ( jsonConfString ) ) ; } Integer taskNum = componentParalism ( stormConf , common ) ; componentMap . put ( Config . TOPOLOGY_TASKS , taskNum ) ; common . set_parallelism_hint ( taskNum ) ; LOG . info ( "Set " + componentName + " parallelism " + taskNum ) ; common . set_json_conf ( JStormUtils . to_json ( componentMap ) ) ; } return ret ; }
finalize component s task parallelism
35,765
public static void cleanupCorruptTopologies ( NimbusData data ) throws Exception { BlobStore blobStore = data . getBlobStore ( ) ; Set < String > code_ids = Sets . newHashSet ( BlobStoreUtils . code_ids ( blobStore . listKeys ( ) ) ) ; Set < String > active_ids = Sets . newHashSet ( data . getStormClusterState ( ) . active_storms ( ) ) ; Set < String > blobsIdsOnZk = Sets . newHashSet ( data . getStormClusterState ( ) . blobstore ( null ) ) ; Set < String > topologyIdsOnZkbyBlobs = BlobStoreUtils . code_ids ( blobsIdsOnZk . iterator ( ) ) ; Set < String > corrupt_ids = Sets . difference ( active_ids , code_ids ) ; Set < String > redundantIds = Sets . difference ( topologyIdsOnZkbyBlobs , code_ids ) ; Set < String > unionIds = Sets . union ( corrupt_ids , redundantIds ) ; for ( String corrupt : unionIds ) { LOG . info ( "Corrupt topology {} has state on zookeeper but doesn't have a local dir on nimbus. Cleaning up..." , corrupt ) ; try { cleanupTopology ( data , corrupt ) ; } catch ( Exception e ) { LOG . warn ( "Failed to cleanup topology {}, {}" , corrupt , e ) ; } } LOG . info ( "Successfully cleaned up all old topologies" ) ; }
clean the topology which is in ZK but not in local dir
35,766
public Set < String > getGroups ( String user ) throws IOException { if ( cachedGroups . containsKey ( user ) ) { return cachedGroups . get ( user ) ; } Set < String > groups = getUnixGroups ( user ) ; if ( ! groups . isEmpty ( ) ) cachedGroups . put ( user , groups ) ; return groups ; }
Returns list of groups for a user
35,767
private static Set < String > getUnixGroups ( final String user ) throws IOException { String result = "" ; try { result = ShellUtils . execCommand ( ShellUtils . getGroupsForUserCommand ( user ) ) ; } catch ( ExitCodeException e ) { LOG . warn ( "got exception trying to get groups for user " + user , e ) ; return new HashSet < String > ( ) ; } StringTokenizer tokenizer = new StringTokenizer ( result , ShellUtils . TOKEN_SEPARATOR_REGEX ) ; Set < String > groups = new HashSet < String > ( ) ; while ( tokenizer . hasMoreTokens ( ) ) { groups . add ( tokenizer . nextToken ( ) ) ; } return groups ; }
Get the current user s group list from Unix by running the command groups NOTE . For non - existing user it will return EMPTY list
35,768
public Map < String , Object > toMap ( ) { Map < String , Object > ret = new HashMap < > ( ) ; ret . put ( MetricUploader . METRIC_TIME , timestamp ) ; ret . put ( MetricUploader . METRIC_TYPE , type ) ; return ret ; }
metrics report time
35,769
public void update ( Number obj ) { if ( enable == false ) { return ; } if ( intervalCheck . check ( ) ) { flush ( ) ; } synchronized ( this ) { unflushed = updater . update ( obj , unflushed ) ; } }
In order to improve performance Do
35,770
public void onEvent ( Object event , long sequence , boolean endOfBatch ) throws Exception { if ( event == null ) { return ; } handleEvent ( event , endOfBatch ) ; }
This function need to be implements
35,771
public void refreshTopologies ( ) { TimeTicker ticker = new TimeTicker ( TimeUnit . MILLISECONDS , true ) ; try { doRefreshTopologies ( ) ; LOG . debug ( "Refresh topologies, cost:{}" , ticker . stopAndRestart ( ) ) ; if ( ! context . getNimbusData ( ) . isLeader ( ) ) { syncTopologyMetaForFollower ( ) ; LOG . debug ( "Sync topology meta, cost:{}" , ticker . stop ( ) ) ; } } catch ( Exception ex ) { LOG . error ( "handleRefreshEvent error:" , ex ) ; } }
refresh metric settings of topologies & metric meta
35,772
private void syncMetaFromCache ( String topology , TopologyMetricContext tmContext ) { if ( ! tmContext . syncMeta ( ) ) { Map < String , Long > meta = context . getMetricCache ( ) . getMeta ( topology ) ; if ( meta != null ) { tmContext . getMemMeta ( ) . putAll ( meta ) ; } tmContext . setSyncMeta ( true ) ; } }
sync metric meta from rocks db into mem cache on startup
35,773
private void syncSysMetaFromRemote ( ) { for ( String topology : JStormMetrics . SYS_TOPOLOGIES ) { if ( context . getTopologyMetricContexts ( ) . containsKey ( topology ) ) { syncMetaFromRemote ( topology , context . getTopologyMetricContexts ( ) . get ( topology ) , Lists . newArrayList ( MetaType . TOPOLOGY , MetaType . WORKER , MetaType . NIMBUS ) ) ; } } }
sync sys topologies from remote because we want to keep all historic metric data thus metric id cannot be changed .
35,774
private void tryHostLock ( String hostPath ) throws Exception { if ( registryOperations . exists ( hostPath ) ) { try { ServiceRecord host = registryOperations . resolve ( hostPath ) ; Long cTime = Long . parseLong ( host . get ( JOYConstants . CTIME , JOYConstants . DEFAULT_CTIME ) ) ; Date now = new Date ( ) ; if ( now . getTime ( ) - cTime > JOYConstants . HOST_LOCK_TIMEOUT || cTime > now . getTime ( ) ) registryOperations . delete ( hostPath , true ) ; } catch ( Exception ex ) { LOG . error ( ex ) ; } } int failedCount = JOYConstants . RETRY_TIMES ; while ( ! registryOperations . mknode ( hostPath , true ) ) { Thread . sleep ( JOYConstants . SLEEP_INTERVAL ) ; failedCount -- ; if ( failedCount <= 0 ) break ; } if ( failedCount > 0 ) { ServiceRecord sr = new ServiceRecord ( ) ; Date date = new Date ( ) ; date . getTime ( ) ; sr . set ( JOYConstants . CTIME , String . valueOf ( date . getTime ( ) ) ) ; registryOperations . bind ( hostPath , sr , BindFlags . OVERWRITE ) ; return ; } throw new Exception ( "can't get host lock" ) ; }
see if anyone is updating host s port list if not start update this host itself timeout is 45 seconds
35,775
public static List < TaskEntity > getTaskEntities ( TopologyInfo topologyInfo ) { Map < Integer , TaskEntity > tasks = new HashMap < > ( ) ; for ( TaskSummary ts : topologyInfo . get_tasks ( ) ) { tasks . put ( ts . get_taskId ( ) , new TaskEntity ( ts ) ) ; } for ( ComponentSummary cs : topologyInfo . get_components ( ) ) { String compName = cs . get_name ( ) ; String type = cs . get_type ( ) ; for ( int id : cs . get_taskIds ( ) ) { if ( tasks . containsKey ( id ) ) { tasks . get ( id ) . setComponent ( compName ) ; tasks . get ( id ) . setType ( type ) ; } else { LOG . debug ( "missing task id:{}" , id ) ; } } } return new ArrayList < > ( tasks . values ( ) ) ; }
get all task entities in the specific topology
35,776
public static List < TaskEntity > getTaskEntities ( TopologyInfo topologyInfo , String componentName ) { TreeMap < Integer , TaskEntity > tasks = new TreeMap < > ( ) ; for ( ComponentSummary cs : topologyInfo . get_components ( ) ) { String compName = cs . get_name ( ) ; String type = cs . get_type ( ) ; if ( componentName . equals ( compName ) ) { for ( int id : cs . get_taskIds ( ) ) { tasks . put ( id , new TaskEntity ( id , compName , type ) ) ; } } } for ( TaskSummary ts : topologyInfo . get_tasks ( ) ) { if ( tasks . containsKey ( ts . get_taskId ( ) ) ) { TaskEntity te = tasks . get ( ts . get_taskId ( ) ) ; te . setHost ( ts . get_host ( ) ) ; te . setPort ( ts . get_port ( ) ) ; te . setStatus ( ts . get_status ( ) ) ; te . setUptime ( ts . get_uptime ( ) ) ; te . setErrors ( ts . get_errors ( ) ) ; } } return new ArrayList < > ( tasks . values ( ) ) ; }
get the task entities in the specific component
35,777
public static TaskEntity getTaskEntity ( List < TaskSummary > tasks , int taskId ) { TaskEntity entity = null ; for ( TaskSummary task : tasks ) { if ( task . get_taskId ( ) == taskId ) { entity = new TaskEntity ( task ) ; break ; } } return entity ; }
get the specific task entity
35,778
public static Map resetZKConfig ( Map conf , String clusterName ) { ClusterConfig nimbus = clusterConfig . get ( clusterName ) ; if ( nimbus == null ) return conf ; conf . put ( Config . STORM_ZOOKEEPER_ROOT , nimbus . getZkRoot ( ) ) ; conf . put ( Config . STORM_ZOOKEEPER_SERVERS , nimbus . getZkServers ( ) ) ; conf . put ( Config . STORM_ZOOKEEPER_PORT , nimbus . getZkPort ( ) ) ; return conf ; }
to get nimbus client we should reset ZK config
35,779
public static String prettyUptime ( int secs ) { String [ ] [ ] PRETTYSECDIVIDERS = { new String [ ] { "s" , "60" } , new String [ ] { "m" , "60" } , new String [ ] { "h" , "24" } , new String [ ] { "d" , null } } ; int diversize = PRETTYSECDIVIDERS . length ; LinkedList < String > tmp = new LinkedList < > ( ) ; int div = secs ; for ( int i = 0 ; i < diversize ; i ++ ) { if ( PRETTYSECDIVIDERS [ i ] [ 1 ] != null ) { Integer d = Integer . parseInt ( PRETTYSECDIVIDERS [ i ] [ 1 ] ) ; tmp . addFirst ( div % d + PRETTYSECDIVIDERS [ i ] [ 0 ] ) ; div = div / d ; } else { tmp . addFirst ( div + PRETTYSECDIVIDERS [ i ] [ 0 ] ) ; } if ( div <= 0 ) break ; } Joiner joiner = Joiner . on ( " " ) ; return joiner . join ( tmp ) ; }
seconds to string like 30m 40s and 1d 20h 30m 40s
35,780
public static Long parseLong ( String s , long defaultValue ) { try { Long value = Long . parseLong ( s ) ; return value ; } catch ( NumberFormatException e ) { } return defaultValue ; }
return the default value instead of throw an exception
35,781
private static void fillValue2Node ( List < MetricInfo > componentMetrics , Map < String , TopologyNode > nodes ) { String NODE_DIM = MetricDef . EMMITTED_NUM ; List < String > FILTER = Arrays . asList ( MetricDef . EMMITTED_NUM , MetricDef . SEND_TPS , MetricDef . RECV_TPS ) ; for ( MetricInfo info : componentMetrics ) { if ( info == null ) continue ; for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String name = metric . getKey ( ) ; String [ ] split_name = name . split ( "@" ) ; String metricName = UIMetricUtils . extractMetricName ( split_name ) ; String compName = UIMetricUtils . extractComponentName ( split_name ) ; TopologyNode node = nodes . get ( compName ) ; if ( node != null && FILTER . contains ( metricName ) ) { for ( Map . Entry < Integer , MetricSnapshot > winData : metric . getValue ( ) . entrySet ( ) ) { node . putMapValue ( metricName , winData . getKey ( ) , UIMetricUtils . getMetricValue ( winData . getValue ( ) ) ) ; } } if ( metricName == null || ! metricName . equals ( NODE_DIM ) ) { continue ; } MetricSnapshot snapshot = metric . getValue ( ) . get ( AsmWindow . M1_WINDOW ) ; if ( node != null ) { node . setValue ( snapshot . get_longValue ( ) ) ; nodes . get ( compName ) . setTitle ( "Emitted: " + UIMetricUtils . getMetricValue ( snapshot ) ) ; } } } }
fill emitted num to nodes
35,782
private static void fillTLCValue2Edge ( List < MetricInfo > componentMetrics , Map < String , TopologyEdge > edges ) { String EDGE_DIM = "." + MetricDef . TUPLE_LIEF_CYCLE ; for ( MetricInfo info : componentMetrics ) { if ( info == null ) continue ; for ( Map . Entry < String , Map < Integer , MetricSnapshot > > metric : info . get_metrics ( ) . entrySet ( ) ) { String name = metric . getKey ( ) ; String [ ] split_name = name . split ( "@" ) ; String metricName = UIMetricUtils . extractMetricName ( split_name ) ; if ( metricName == null || ! metricName . contains ( EDGE_DIM ) ) { continue ; } String componentId = UIMetricUtils . extractComponentName ( split_name ) ; String src = metricName . split ( "\\." ) [ 0 ] ; String key = src + ":" + componentId ; MetricSnapshot snapshot = metric . getValue ( ) . get ( AsmWindow . M1_WINDOW ) ; TopologyEdge edge = edges . get ( key ) ; if ( edge != null ) { double value = snapshot . get_mean ( ) / 1000 ; edge . setCycleValue ( value ) ; edge . appendTitle ( "TupleLifeCycle: " + UIMetricUtils . format . format ( value ) + "ms" ) ; for ( Map . Entry < Integer , MetricSnapshot > winData : metric . getValue ( ) . entrySet ( ) ) { double v = winData . getValue ( ) . get_mean ( ) / 1000 ; edge . putMapValue ( MetricDef . TUPLE_LIEF_CYCLE + "(ms)" , winData . getKey ( ) , UIMetricUtils . format . format ( v ) ) ; } } } } }
fill tuple life cycle time to edges
35,783
public BoltDeclarer setBolt ( String id , BaseWindowedBolt < Tuple > bolt , Number parallelism_hint ) throws IllegalArgumentException { boolean isEventTime = WindowAssigner . isEventTime ( bolt . getWindowAssigner ( ) ) ; if ( isEventTime && bolt . getTimestampExtractor ( ) == null ) { throw new IllegalArgumentException ( "timestamp extractor must be defined in event time!" ) ; } return setBolt ( id , new WindowedBoltExecutor ( bolt ) , parallelism_hint ) ; }
Define a new bolt in this topology . This defines a windowed bolt intended for windowing operations .
35,784
public SpoutDeclarer setSpout ( String id , IRichSpout spout , Number parallelism_hint ) throws IllegalArgumentException { validateUnusedId ( id ) ; initCommon ( id , spout , parallelism_hint ) ; _spouts . put ( id , spout ) ; return new SpoutGetter ( id ) ; }
Define a new spout in this topology with the specified parallelism . If the spout declares itself as non - distributed the parallelism_hint will be ignored and only one task will be allocated to this component .
35,785
public SpoutDeclarer setSpout ( String id , IControlSpout spout ) { return setSpout ( id , spout , null ) ; }
Define a new bolt in this topology . This defines a control spout which is a simpler to use but more restricted kind of bolt . Control spouts are intended for making sending control message more simply
35,786
public BoltDeclarer setBolt ( String id , IControlBolt bolt , Number parallelism_hint ) { return setBolt ( id , new ControlBoltExecutor ( bolt ) , parallelism_hint ) ; }
Define a new bolt in this topology . This defines a control bolt which is a simpler to use but more restricted kind of bolt . Control bolts are intended for making sending control message more simply
35,787
public void addWorkerHook ( IWorkerHook workerHook ) { if ( null == workerHook ) { throw new IllegalArgumentException ( "WorkerHook must not be null." ) ; } _workerHooks . add ( ByteBuffer . wrap ( Utils . javaSerialize ( workerHook ) ) ) ; }
Add a new worker lifecycle hook
35,788
private void maybeAddWatermarkInputs ( ComponentCommon common , IRichBolt bolt ) { if ( bolt instanceof WindowedBoltExecutor ) { Set < String > comps = new HashSet < > ( ) ; for ( GlobalStreamId globalStreamId : common . get_inputs ( ) . keySet ( ) ) { comps . add ( globalStreamId . get_componentId ( ) ) ; } for ( String comp : comps ) { common . put_to_inputs ( new GlobalStreamId ( comp , Common . WATERMARK_STREAM_ID ) , Grouping . all ( new NullStruct ( ) ) ) ; } } }
Add watermark stream to source components of window bolts
35,789
public static String getShowTimeStr ( Integer time ) { if ( time == null ) { return MINUTE_WINDOW_STR ; } else if ( time . equals ( MINUTE_WINDOW ) ) { return MINUTE_WINDOW_STR ; } else if ( time . equals ( HOUR_WINDOW ) ) { return HOUR_WINDOW_STR ; } else if ( time . equals ( DAY_WINDOW ) ) { return DAY_WINDOW_STR ; } else if ( time . equals ( ALL_TIME_WINDOW ) ) { return ALL_WINDOW_STR ; } else { return MINUTE_WINDOW_STR ; } }
Default is the latest result
35,790
public static String prettyUptimeStr ( int secs ) { int diversize = PRETTYSECDIVIDERS . length ; List < String > tmp = new ArrayList < String > ( ) ; int div = secs ; for ( int i = 0 ; i < diversize ; i ++ ) { if ( PRETTYSECDIVIDERS [ i ] [ 1 ] != null ) { Integer d = Integer . parseInt ( PRETTYSECDIVIDERS [ i ] [ 1 ] ) ; tmp . add ( div % d + PRETTYSECDIVIDERS [ i ] [ 0 ] ) ; div = div / d ; } else { tmp . add ( div + PRETTYSECDIVIDERS [ i ] [ 0 ] ) ; } } String rtn = "" ; int tmpSzie = tmp . size ( ) ; for ( int j = tmpSzie - 1 ; j > - 1 ; j -- ) { rtn += tmp . get ( j ) ; } return rtn ; }
seconds to string like 1d20h30m40s
35,791
public static < T > List < T > getParents ( DirectedGraph g , T n ) { List < IndexedEdge > incoming = new ArrayList ( g . incomingEdgesOf ( n ) ) ; Collections . sort ( incoming ) ; List < T > ret = new ArrayList ( ) ; for ( IndexedEdge e : incoming ) { ret . add ( ( T ) e . source ) ; } return ret ; }
Assumes edge contains an index
35,792
public Set < ResourceWorkerSlot > getKeepAssign ( DefaultTopologyAssignContext defaultContext , Set < Integer > needAssigns ) { Set < Integer > keepAssignIds = new HashSet < > ( ) ; keepAssignIds . addAll ( defaultContext . getAllTaskIds ( ) ) ; keepAssignIds . removeAll ( defaultContext . getUnstoppedTaskIds ( ) ) ; keepAssignIds . removeAll ( needAssigns ) ; Set < ResourceWorkerSlot > keeps = new HashSet < > ( ) ; if ( keepAssignIds . isEmpty ( ) ) { return keeps ; } Assignment oldAssignment = defaultContext . getOldAssignment ( ) ; if ( oldAssignment == null ) { return keeps ; } keeps . addAll ( defaultContext . getOldWorkers ( ) ) ; for ( ResourceWorkerSlot worker : defaultContext . getOldWorkers ( ) ) { for ( Integer task : worker . getTasks ( ) ) { if ( ! keepAssignIds . contains ( task ) ) { keeps . remove ( worker ) ; break ; } } } return keeps ; }
Get the task Map which the task is alive and will be kept only when type is ASSIGN_TYPE_MONITOR it is valid
35,793
public JStormCache putMetricData ( String topologyId , TopologyMetric tpMetric ) { Map < String , Object > batchData = new HashMap < > ( ) ; long ts = System . currentTimeMillis ( ) ; int tp = 0 , comp = 0 , compStream = 0 , task = 0 , stream = 0 , worker = 0 , netty = 0 ; if ( tpMetric . get_componentMetric ( ) . get_metrics_size ( ) > 0 ) { batchData . put ( METRIC_DATA_30M_COMPONENT + topologyId , new Object [ ] { ts , tpMetric . get_componentMetric ( ) } ) ; comp += tpMetric . get_componentMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . is_set_compStreamMetric ( ) && tpMetric . get_compStreamMetric ( ) . get_metrics_size ( ) > 0 ) { batchData . put ( METRIC_DATA_30M_COMP_STREAM + topologyId , new Object [ ] { ts , tpMetric . get_compStreamMetric ( ) } ) ; compStream += tpMetric . get_compStreamMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . get_taskMetric ( ) . get_metrics_size ( ) > 0 ) { tryCombineMetricInfo ( METRIC_DATA_30M_TASK + topologyId , tpMetric . get_taskMetric ( ) , MetaType . TASK , ts ) ; task += tpMetric . get_taskMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . get_streamMetric ( ) . get_metrics_size ( ) > 0 ) { tryCombineMetricInfo ( METRIC_DATA_30M_STREAM + topologyId , tpMetric . get_streamMetric ( ) , MetaType . STREAM , ts ) ; stream += tpMetric . get_streamMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . get_workerMetric ( ) . get_metrics_size ( ) > 0 ) { tryCombineMetricInfo ( METRIC_DATA_30M_WORKER + topologyId , tpMetric . get_workerMetric ( ) , MetaType . WORKER , ts ) ; worker += tpMetric . get_workerMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . get_nettyMetric ( ) . get_metrics_size ( ) > 0 ) { tryCombineMetricInfo ( METRIC_DATA_30M_NETTY + topologyId , tpMetric . get_nettyMetric ( ) , MetaType . NETTY , ts ) ; netty += tpMetric . get_nettyMetric ( ) . get_metrics_size ( ) ; } if ( tpMetric . get_topologyMetric ( ) . get_metrics_size ( ) > 0 ) { String keyPrefix = METRIC_DATA_30M_TOPOLOGY + topologyId + "-" ; int page = getRingAvailableIndex ( keyPrefix ) ; batchData . put ( keyPrefix + page , new Object [ ] { ts , tpMetric . get_topologyMetric ( ) } ) ; tp += tpMetric . get_topologyMetric ( ) . get_metrics_size ( ) ; } LOG . info ( "caching metric data for topology:{},tp:{},comp:{},comp_stream:{},task:{},stream:{},worker:{},netty:{},cost:{}" , topologyId , tp , comp , compStream , task , stream , worker , netty , System . currentTimeMillis ( ) - ts ) ; return putBatch ( batchData ) ; }
store 30min metric data . the metric data is stored in a ring .
35,794
public Stream setMemoryLoad ( Number onHeap , Number offHeap ) { _node . setMemoryLoad ( onHeap , offHeap ) ; return this ; }
Sets the Memory Load resources for the current operation .
35,795
public Stream project ( Fields keepFields ) { projectionValidation ( keepFields ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , keepFields , new Fields ( ) , new ProjectedProcessor ( keepFields ) ) ) ; }
Filters out fields from a stream resulting in a Stream containing only the fields specified by keepFields .
35,796
public Stream partitionAggregate ( Fields inputFields , Aggregator agg , Fields functionFields ) { projectionValidation ( inputFields ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , functionFields , functionFields , new AggregateProcessor ( inputFields , agg ) ) ) ; }
creates brand new tuples with brand new fields
35,797
public Stream map ( MapFunction function ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new MapFunctionExecutor ( function ) ) ) ) ; }
Returns a stream consisting of the result of applying the given mapping function to the values of this stream .
35,798
public Stream flatMap ( FlatMapFunction function ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new FlatMapFunctionExecutor ( function ) ) ) ) ; }
Returns a stream consisting of the results of replacing each value of this stream with the contents produced by applying the provided mapping function to each value . This has the effect of applying a one - to - many transformation to the values of the stream and then flattening the resulting elements into a new stream .
35,799
public Stream peek ( Consumer action ) { projectionValidation ( getOutputFields ( ) ) ; return _topology . addSourcedNode ( this , new ProcessorNode ( _topology . getUniqueStreamId ( ) , _name , getOutputFields ( ) , getOutputFields ( ) , new MapProcessor ( getOutputFields ( ) , new ConsumerExecutor ( action ) ) ) ) ; }
Returns a stream consisting of the trident tuples of this stream additionally performing the provided action on each trident tuple as they are consumed from the resulting stream . This is mostly useful for debugging to see the tuples as they flow past a certain point in a pipeline .