idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
35,500
public Emoji getEmoji ( String unicode ) { Node tree = root ; for ( char c : unicode . toCharArray ( ) ) { if ( ! tree . hasChild ( c ) ) { return null ; } tree = tree . getChild ( c ) ; } return tree . getEmoji ( ) ; }
Finds Emoji instance from emoji unicode
35,501
private String stringJoin ( String [ ] array , int count ) { String joined = "" ; for ( int i = 0 ; i < count ; i ++ ) joined += array [ i ] ; return joined ; }
Method to replace String . join since it was only introduced in java8
35,502
public static boolean isEmoji ( String string ) { if ( string == null ) return false ; EmojiParser . UnicodeCandidate unicodeCandidate = EmojiParser . getNextUnicodeCandidate ( string . toCharArray ( ) , 0 ) ; return unicodeCandidate != null && unicodeCandidate . getEmojiStartIndex ( ) == 0 && unicodeCandidate . getFitzpatrickEndIndex ( ) == string . length ( ) ; }
Tests if a given String is an emoji .
35,503
public BaseWindowedBolt withWindow ( Count windowLength , Duration slidingInterval ) { return withWindowLength ( windowLength ) . withSlidingInterval ( slidingInterval ) ; }
Tuple count and time duration based sliding window configuration .
35,504
public Object getStateOrCreate ( long txid , StateInitializer init ) { if ( _curr . containsKey ( txid ) ) { return _curr . get ( txid ) ; } else { getState ( txid , init ) ; return null ; } }
Returns null if it was created the value otherwise .
35,505
public CheckPointState nextState ( boolean recovering ) { CheckPointState nextState ; switch ( state ) { case PREPARING : nextState = recovering ? new CheckPointState ( txid - 1 , State . COMMITTED ) : new CheckPointState ( txid , State . COMMITTING ) ; break ; case COMMITTING : nextState = new CheckPointState ( txid , State . COMMITTED ) ; break ; case COMMITTED : nextState = recovering ? this : new CheckPointState ( txid + 1 , State . PREPARING ) ; break ; default : throw new IllegalStateException ( "Unknown state " + state ) ; } return nextState ; }
Get the next state based on this checkpoint state .
35,506
public Action nextAction ( boolean recovering ) { Action action ; switch ( state ) { case PREPARING : action = recovering ? Action . ROLLBACK : Action . PREPARE ; break ; case COMMITTING : action = Action . COMMIT ; break ; case COMMITTED : action = recovering ? Action . INITSTATE : Action . PREPARE ; break ; default : throw new IllegalStateException ( "Unknown state " + state ) ; } return action ; }
Get the next action to perform based on this checkpoint state .
35,507
public void updateKryoSerializer ( ) { WorkerTopologyContext workerTopologyContext = contextMaker . makeWorkerTopologyContext ( sysTopology ) ; KryoTupleDeserializer kryoTupleDeserializer = new KryoTupleDeserializer ( stormConf , workerTopologyContext , workerTopologyContext . getRawTopology ( ) ) ; KryoTupleSerializer kryoTupleSerializer = new KryoTupleSerializer ( stormConf , workerTopologyContext . getRawTopology ( ) ) ; atomKryoDeserializer . getAndSet ( kryoTupleDeserializer ) ; atomKryoSerializer . getAndSet ( kryoTupleSerializer ) ; }
create kryo serializer
35,508
public void rebalanceMqList ( ) throws Exception { LOG . info ( "Begin to do rebalance operation" ) ; Set < MessageQueue > newMqs = getMQ ( ) ; Set < MessageQueue > oldMqs = currentOffsets . keySet ( ) ; if ( oldMqs . equals ( newMqs ) == true ) { LOG . info ( "No change of meta queues " + newMqs ) ; return ; } Set < MessageQueue > removeMqs = new HashSet < MessageQueue > ( ) ; removeMqs . addAll ( oldMqs ) ; removeMqs . removeAll ( newMqs ) ; Set < MessageQueue > addMqs = new HashSet < MessageQueue > ( ) ; addMqs . addAll ( newMqs ) ; addMqs . removeAll ( oldMqs ) ; LOG . info ( "Remove " + removeMqs ) ; for ( MessageQueue mq : removeMqs ) { Long offset = frontOffsets . remove ( mq ) ; updateOffsetToZk ( mq , offset ) ; backendOffset . remove ( mq ) ; } LOG . info ( "Add " + addMqs ) ; for ( MessageQueue mq : addMqs ) { long offset = getOffsetFromZk ( mq ) ; frontOffsets . put ( mq , offset ) ; backendOffset . put ( mq , offset ) ; } }
rebalanceMqList must run after commit
35,509
public void addAllBolts ( List < BoltDef > bolts , boolean override ) { for ( BoltDef bolt : bolts ) { String id = bolt . getId ( ) ; if ( this . boltMap . get ( id ) == null || override ) { this . boltMap . put ( bolt . getId ( ) , bolt ) ; } else { LOG . warn ( "Ignoring attempt to create bolt '{}' with override == false." , id ) ; } } }
used by includes implementation
35,510
public TriggerResult onElement ( Object element , long timestamp , TimeWindow window , TriggerContext ctx ) { ctx . registerEventTimeTimer ( window . getEnd ( ) + ctx . getMaxLagMs ( ) , window ) ; return TriggerResult . CONTINUE ; }
If a watermark arrives we need to check all pending windows . If any of the pending window suffices we should fire immediately by registering a timer without delay . Otherwise we register a timer whose time is the window end plus max lag time
35,511
public static String drpcPids ( Map conf ) throws IOException { String ret = drpc_local_dir ( conf ) + FILE_SEPERATEOR + "pids" ; try { FileUtils . forceMkdir ( new File ( ret ) ) ; } catch ( IOException e ) { LOG . error ( "Failed to create dir " + ret , e ) ; throw e ; } return ret ; }
Return drpc s pid dir
35,512
public static Map read_supervisor_topology_conf ( Map conf , String topologyId ) throws IOException { String topologyRoot = StormConfig . supervisor_stormdist_root ( conf , topologyId ) ; String confPath = StormConfig . stormconf_path ( topologyRoot ) ; return ( Map ) readLocalObject ( topologyId , confPath ) ; }
merge storm conf into cluster conf
35,513
private int getNodesForIsolatedTop ( TopologyDetails td , Set < Node > allNodes , NodePool [ ] lesserPools , int nodesRequested ) { String topId = td . getId ( ) ; LOG . debug ( "Topology {} is isolated" , topId ) ; int nodesFromUsAvailable = nodesAvailable ( ) ; int nodesFromOthersAvailable = NodePool . nodesAvailable ( lesserPools ) ; int nodesUsed = _topologyIdToNodes . get ( topId ) . size ( ) ; int nodesNeeded = nodesRequested - nodesUsed ; LOG . debug ( "Nodes... requested {} used {} available from us {} " + "avail from other {} needed {}" , new Object [ ] { nodesRequested , nodesUsed , nodesFromUsAvailable , nodesFromOthersAvailable , nodesNeeded } ) ; if ( ( nodesNeeded - nodesFromUsAvailable ) > ( _maxNodes - _usedNodes ) ) { _cluster . setStatus ( topId , "Max Nodes(" + _maxNodes + ") for this user would be exceeded. " + ( ( nodesNeeded - nodesFromUsAvailable ) - ( _maxNodes - _usedNodes ) ) + " more nodes needed to run topology." ) ; return 0 ; } int nodesNeededFromOthers = Math . min ( Math . min ( _maxNodes - _usedNodes , nodesFromOthersAvailable ) , nodesNeeded ) ; int nodesNeededFromUs = nodesNeeded - nodesNeededFromOthers ; LOG . debug ( "Nodes... needed from us {} needed from others {}" , nodesNeededFromUs , nodesNeededFromOthers ) ; if ( nodesNeededFromUs > nodesFromUsAvailable ) { _cluster . setStatus ( topId , "Not Enough Nodes Available to Schedule Topology" ) ; return 0 ; } Collection < Node > found = NodePool . takeNodes ( nodesNeededFromOthers , lesserPools ) ; _usedNodes += found . size ( ) ; allNodes . addAll ( found ) ; Collection < Node > foundMore = takeNodes ( nodesNeededFromUs ) ; _usedNodes += foundMore . size ( ) ; allNodes . addAll ( foundMore ) ; int totalTasks = td . getExecutors ( ) . size ( ) ; int origRequest = td . getNumWorkers ( ) ; int slotsRequested = Math . min ( totalTasks , origRequest ) ; int slotsUsed = Node . countSlotsUsed ( allNodes ) ; int slotsFree = Node . countFreeSlotsAlive ( allNodes ) ; int slotsToUse = Math . min ( slotsRequested - slotsUsed , slotsFree ) ; if ( slotsToUse <= 0 ) { _cluster . setStatus ( topId , "Node has partially crashed, if this situation persists rebalance the topology." ) ; } return slotsToUse ; }
Get the nodes needed to schedule an isolated topology .
35,514
private int getNodesForNotIsolatedTop ( TopologyDetails td , Set < Node > allNodes , NodePool [ ] lesserPools ) { String topId = td . getId ( ) ; LOG . debug ( "Topology {} is not isolated" , topId ) ; int totalTasks = td . getExecutors ( ) . size ( ) ; int origRequest = td . getNumWorkers ( ) ; int slotsRequested = Math . min ( totalTasks , origRequest ) ; int slotsUsed = Node . countSlotsUsed ( topId , allNodes ) ; int slotsFree = Node . countFreeSlotsAlive ( allNodes ) ; int slotsAvailable = 0 ; if ( slotsRequested > slotsFree ) { slotsAvailable = NodePool . slotsAvailable ( lesserPools ) ; } int slotsToUse = Math . min ( slotsRequested - slotsUsed , slotsFree + slotsAvailable ) ; LOG . debug ( "Slots... requested {} used {} free {} available {} to be used {}" , new Object [ ] { slotsRequested , slotsUsed , slotsFree , slotsAvailable , slotsToUse } ) ; if ( slotsToUse <= 0 ) { _cluster . setStatus ( topId , "Not Enough Slots Available to Schedule Topology" ) ; return 0 ; } int slotsNeeded = slotsToUse - slotsFree ; int numNewNodes = NodePool . getNodeCountIfSlotsWereTaken ( slotsNeeded , lesserPools ) ; LOG . debug ( "Nodes... new {} used {} max {}" , new Object [ ] { numNewNodes , _usedNodes , _maxNodes } ) ; if ( ( numNewNodes + _usedNodes ) > _maxNodes ) { _cluster . setStatus ( topId , "Max Nodes(" + _maxNodes + ") for this user would be exceeded. " + ( numNewNodes - ( _maxNodes - _usedNodes ) ) + " more nodes needed to run topology." ) ; return 0 ; } Collection < Node > found = NodePool . takeNodesBySlot ( slotsNeeded , lesserPools ) ; _usedNodes += found . size ( ) ; allNodes . addAll ( found ) ; return slotsToUse ; }
Get the nodes needed to schedule a non - isolated topology .
35,515
boolean tryAdd ( TaskMessage taskMsg ) { if ( ( encodedLength + msgEncodeLength ( taskMsg ) ) > buffer_size ) return false ; add ( taskMsg ) ; return true ; }
try to add a TaskMessage to a batch
35,516
private void writeTaskMessage ( ChannelBufferOutputStream bout , TaskMessage message ) throws Exception { int payload_len = 0 ; if ( message . message ( ) != null ) payload_len = message . message ( ) . length ; short type = message . get_type ( ) ; bout . writeShort ( type ) ; int task_id = message . task ( ) ; if ( task_id > Short . MAX_VALUE ) throw new RuntimeException ( "Task ID should not exceed " + Short . MAX_VALUE ) ; bout . writeShort ( ( short ) task_id ) ; bout . writeInt ( payload_len ) ; if ( payload_len > 0 ) bout . write ( message . message ( ) ) ; }
write a TaskMessage into a stream
35,517
public final AtomicOutputStream createBlob ( String key , SettableBlobMeta meta ) throws KeyAlreadyExistsException { return createBlobToExtend ( key , meta ) ; }
Client facing API to create a blob .
35,518
public final void setBlobMeta ( String key , SettableBlobMeta meta ) throws AuthorizationException , KeyNotFoundException { setBlobMetaToExtend ( key , meta ) ; }
Client facing API to set the metadata for a blob .
35,519
public static boolean downloadMissingBlob ( Map conf , BlobStore blobStore , String key , Set < NimbusInfo > nimbusInfos ) throws TTransportException { NimbusClient client ; ReadableBlobMeta rbm ; ClientBlobStore remoteBlobStore ; InputStreamWithMeta in ; boolean isSuccess = false ; LOG . debug ( "Download blob NimbusInfos {}" , nimbusInfos ) ; for ( NimbusInfo nimbusInfo : nimbusInfos ) { if ( isSuccess ) { break ; } try { client = new NimbusClient ( conf , nimbusInfo . getHost ( ) , nimbusInfo . getPort ( ) , null ) ; rbm = client . getClient ( ) . getBlobMeta ( key ) ; remoteBlobStore = new NimbusBlobStore ( ) ; remoteBlobStore . setClient ( conf , client ) ; in = remoteBlobStore . getBlob ( key ) ; blobStore . createBlob ( key , in , rbm . get_settable ( ) ) ; Iterator < String > keyIterator = blobStore . listKeys ( ) ; while ( keyIterator . hasNext ( ) ) { if ( keyIterator . next ( ) . equals ( key ) ) { LOG . debug ( "Success creating key, {}" , key ) ; isSuccess = true ; break ; } } } catch ( IOException exception ) { throw new RuntimeException ( exception ) ; } catch ( KeyAlreadyExistsException kae ) { LOG . info ( "KeyAlreadyExistsException Key: {} {}" , key , kae ) ; } catch ( KeyNotFoundException knf ) { LOG . info ( "KeyNotFoundException Key: {} {}" , key , knf ) ; } catch ( Exception exp ) { LOG . error ( "Exception " , exp ) ; } } if ( ! isSuccess ) { LOG . error ( "Could not download blob with key " + key ) ; } return isSuccess ; }
Download missing blobs from potential nimbodes
35,520
public static boolean downloadUpdatedBlob ( Map conf , BlobStore blobStore , String key , Set < NimbusInfo > nimbusInfos ) throws TTransportException { NimbusClient client ; ClientBlobStore remoteBlobStore ; boolean isSuccess = false ; LOG . debug ( "Download blob NimbusInfos {}" , nimbusInfos ) ; for ( NimbusInfo nimbusInfo : nimbusInfos ) { if ( isSuccess ) { break ; } try { client = new NimbusClient ( conf , nimbusInfo . getHost ( ) , nimbusInfo . getPort ( ) , null ) ; remoteBlobStore = new NimbusBlobStore ( ) ; remoteBlobStore . setClient ( conf , client ) ; isSuccess = updateBlob ( blobStore , key , remoteBlobStore . getBlob ( key ) ) ; } catch ( IOException exception ) { throw new RuntimeException ( exception ) ; } catch ( KeyNotFoundException knf ) { LOG . info ( "KeyNotFoundException {}" , knf ) ; } catch ( Exception exp ) { LOG . error ( "Exception {}" , exp ) ; } } if ( ! isSuccess ) { LOG . error ( "Could not update the blob with key" + key ) ; } return isSuccess ; }
Download updated blobs from potential nimbodes
35,521
public static List < String > getKeyListFromBlobStore ( BlobStore blobStore ) throws Exception { Iterator < String > keys = blobStore . listKeys ( ) ; List < String > keyList = new ArrayList < String > ( ) ; if ( keys != null ) { while ( keys . hasNext ( ) ) { keyList . add ( keys . next ( ) ) ; } } LOG . debug ( "KeyList from blobstore {}" , keyList ) ; return keyList ; }
Get the list of keys from blobstore
35,522
public static < R > Set < R > filterAndListKeys ( Iterator < R > keys , KeyFilter < R > filter ) { Set < R > ret = new HashSet < R > ( ) ; while ( keys . hasNext ( ) ) { R key = keys . next ( ) ; R filtered = filter . filter ( key ) ; if ( filtered != null ) { ret . add ( filtered ) ; } } return ret ; }
Filters keys based on the KeyFilter passed as the argument .
35,523
public static void cleanup_key ( String blobKey , BlobStore blobStore , StormClusterState clusterState ) { if ( blobKey . startsWith ( JStormMetrics . NIMBUS_METRIC_KEY ) || blobKey . startsWith ( JStormMetrics . CLUSTER_METRIC_KEY ) || blobKey . startsWith ( JStormMetrics . SUPERVISOR_METRIC_KEY ) ) { return ; } try { blobStore . deleteBlob ( blobKey ) ; } catch ( Exception e ) { LOG . warn ( "cleanup blob key {} error {}" , blobKey , e ) ; } try { if ( blobStore instanceof LocalFsBlobStore ) { clusterState . remove_blobstore_key ( blobKey ) ; clusterState . remove_key_version ( blobKey ) ; } } catch ( Exception e ) { LOG . warn ( "cleanup blob key {} error {}" , blobKey , e ) ; } }
remove blob information in zk for the blobkey
35,524
public static void downloadDistributeStormCode ( Map conf , String topologyId , String masterCodeDir ) throws IOException , TException { String tmpToot = null ; try { tmpToot = StormConfig . supervisorTmpDir ( conf ) + File . separator + UUID . randomUUID ( ) . toString ( ) ; String stormRoot = StormConfig . supervisor_stormdist_root ( conf , topologyId ) ; JStormServerUtils . downloadCodeFromBlobStore ( conf , tmpToot , topologyId ) ; String localFileJarTmp = StormConfig . stormjar_path ( tmpToot ) ; JStormUtils . extractDirFromJar ( localFileJarTmp , StormConfig . RESOURCES_SUBDIR , tmpToot ) ; File srcDir = new File ( tmpToot ) ; File destDir = new File ( stormRoot ) ; try { FileUtils . moveDirectory ( srcDir , destDir ) ; } catch ( FileExistsException e ) { FileUtils . copyDirectory ( srcDir , destDir ) ; FileUtils . deleteQuietly ( srcDir ) ; } } finally { if ( tmpToot != null ) { File srcDir = new File ( tmpToot ) ; FileUtils . deleteQuietly ( srcDir ) ; } } }
no need to synchronize since EventManager will execute sequentially
35,525
public static boolean setLogBackLevel ( String loggerName , String logLevel ) { String logLevelUpper = ( logLevel == null ) ? "OFF" : logLevel . toUpperCase ( ) ; try { Package logbackPackage = Package . getPackage ( LOGBACK_CLASSIC ) ; if ( logbackPackage == null ) { LOG . warn ( "Logback is not in the classpath!" ) ; return false ; } if ( ( loggerName == null ) || loggerName . trim ( ) . isEmpty ( ) ) { loggerName = ( String ) getFieldVaulue ( LOGBACK_CLASSIC_LOGGER , "ROOT_LOGGER_NAME" ) ; } Logger loggerObtained = LoggerFactory . getLogger ( loggerName ) ; if ( loggerObtained == null ) { LOG . warn ( "No logger for the name: {}" , loggerName ) ; return false ; } Object logLevelObj = getFieldVaulue ( LOGBACK_CLASSIC_LEVEL , logLevelUpper ) ; if ( logLevelObj == null ) { LOG . warn ( "No such log level: {}" , logLevelUpper ) ; return false ; } Class < ? > [ ] paramTypes = { logLevelObj . getClass ( ) } ; Object [ ] params = { logLevelObj } ; Class < ? > clz = Class . forName ( LOGBACK_CLASSIC_LOGGER ) ; Method method = clz . getMethod ( "setLevel" , paramTypes ) ; method . invoke ( loggerObtained , params ) ; LOG . info ( "LogBack level set to {} for the logger '{}'" , logLevelUpper , loggerName ) ; return true ; } catch ( NoClassDefFoundError e ) { LOG . warn ( "Couldn't set logback level to {} for the logger '{}'" , logLevelUpper , loggerName , e ) ; return false ; } catch ( Exception e ) { LOG . warn ( "Couldn't set logback level to {} for the logger '{}'" , logLevelUpper , loggerName , e ) ; return false ; } }
Dynamically sets the logback log level for the given class to the specified level .
35,526
public static boolean setLog4jLevel ( String loggerName , String logLevel ) { String logLevelUpper = ( logLevel == null ) ? "OFF" : logLevel . toUpperCase ( ) ; try { Package log4jPackage = Package . getPackage ( LOG4J_CLASSIC ) ; if ( log4jPackage == null ) { LOG . warn ( "Log4j is not in the classpath!" ) ; return false ; } Class < ? > clz = Class . forName ( LOG4J_CLASSIC_LOGGER ) ; Object loggerObtained ; if ( ( loggerName == null ) || loggerName . trim ( ) . isEmpty ( ) || loggerName . trim ( ) . equals ( "ROOT" ) ) { Method method = clz . getMethod ( "getRootLogger" ) ; loggerObtained = method . invoke ( null ) ; loggerName = "ROOT" ; } else { Method method = clz . getMethod ( "getLogger" , String . class ) ; loggerObtained = method . invoke ( null , loggerName ) ; } if ( loggerObtained == null ) { LOG . warn ( "No logger for the name: {}" , loggerName ) ; return false ; } Object logLevelObj = getFieldVaulue ( LOG4J_CLASSIC_LEVEL , logLevelUpper ) ; if ( logLevelObj == null ) { LOG . warn ( "No such log level: {}" , logLevelUpper ) ; return false ; } Class < ? > [ ] paramTypes = { logLevelObj . getClass ( ) } ; Object [ ] params = { logLevelObj } ; Method method = clz . getMethod ( "setLevel" , paramTypes ) ; method . invoke ( loggerObtained , params ) ; LOG . info ( "Log4j level set to {} for the logger '{}'" , logLevelUpper , loggerName ) ; return true ; } catch ( NoClassDefFoundError e ) { LOG . warn ( "Couldn't set log4j level to {} for the logger '{}'" , logLevelUpper , loggerName , e ) ; return false ; } catch ( Exception e ) { LOG . warn ( "Couldn't set log4j level to {} for the logger '{}'" , logLevelUpper , loggerName ) ; return false ; } }
Dynamically sets the log4j log level for the given class to the specified level .
35,527
public static int deserializeTaskId ( byte [ ] ser ) { Input _kryoInput = new Input ( 1 ) ; _kryoInput . setBuffer ( ser ) ; return _kryoInput . readInt ( ) ; }
just get target taskId
35,528
public static boolean isPortAvailable ( int port ) { try { ServerSocket socket = new ServerSocket ( port ) ; socket . close ( ) ; return true ; } catch ( IOException e ) { return false ; } }
See if a port is available for listening on by trying to listen on it and seeing if that works or fails .
35,529
public static boolean isPortAvailable ( String host , int port ) { try { Socket socket = new Socket ( host , port ) ; socket . close ( ) ; return false ; } catch ( IOException e ) { return true ; } }
See if a port is available for listening on by trying connect to it and seeing if that works or fails
35,530
public static Configuration patchConfiguration ( Configuration conf ) { if ( conf . get ( JstormXmlConfKeys . IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH ) == null ) { conf . set ( JstormXmlConfKeys . IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH , "true" ) ; } return conf ; }
Take an existing conf and patch it for needs . Useful in Service . init & RunService methods where a shared config is being passed in
35,531
public static List < String > collectionToStringList ( Collection c ) { List < String > l = new ArrayList < String > ( c . size ( ) ) ; for ( Object o : c ) { l . add ( o . toString ( ) ) ; } return l ; }
Take a collection return a list containing the string value of every element in the collection .
35,532
public static String join ( Collection collection , String separator ) { return join ( collection , separator , true ) ; }
Join an collection of objects with a separator that appears after every instance in the list - including at the end
35,533
public static String join ( Collection collection , String separator , boolean trailing ) { StringBuilder b = new StringBuilder ( ) ; if ( collection . isEmpty ( ) ) { return trailing ? separator : "" ; } for ( Object o : collection ) { b . append ( o ) ; b . append ( separator ) ; } int length = separator . length ( ) ; String s = b . toString ( ) ; return ( trailing || s . isEmpty ( ) ) ? s : ( b . substring ( 0 , b . length ( ) - length ) ) ; }
Join an collection of objects with a separator that appears after every instance in the list - optionally at the end
35,534
public static String join ( String [ ] collection , String separator , boolean trailing ) { return join ( Arrays . asList ( collection ) , separator , trailing ) ; }
Join an array of strings with a separator that appears after every instance in the list - optionally at the end
35,535
public static String joinWithInnerSeparator ( String separator , Object ... collection ) { StringBuilder b = new StringBuilder ( ) ; boolean first = true ; for ( Object o : collection ) { if ( first ) { first = false ; } else { b . append ( separator ) ; } b . append ( o . toString ( ) ) ; b . append ( separator ) ; } return b . toString ( ) ; }
Join an array of strings with a separator that appears after every instance in the list - except at the end
35,536
public static String getSupervisorSlotPorts ( int memory , int vcores , String instanceName , String supervisorHost , RegistryOperations registryOperations ) { return join ( getSupervisorPorts ( memory , vcores , instanceName , supervisorHost , registryOperations ) , JOYConstants . COMMA , false ) ; }
this is for jstorm configuration s format
35,537
private long getNextAlignedWindowTs ( long startTs , long endTs ) { long nextTs = windowManager . getEarliestEventTs ( startTs , endTs ) ; if ( nextTs == Long . MAX_VALUE || ( nextTs % slidingIntervalMs == 0 ) ) { return nextTs ; } return nextTs + ( slidingIntervalMs - ( nextTs % slidingIntervalMs ) ) ; }
Computes the next window by scanning the events in the window and finds the next aligned window between the startTs and endTs . Return the end ts of the next aligned window i . e . the ts when the window should fire .
35,538
private long computeWaterMarkTs ( ) { long ts = 0 ; if ( streamToTs . size ( ) >= inputStreams . size ( ) ) { ts = Long . MAX_VALUE ; for ( Map . Entry < GlobalStreamId , Long > entry : streamToTs . entrySet ( ) ) { ts = Math . min ( ts , entry . getValue ( ) ) ; } } return ts - eventTsLag ; }
Computes the min ts across all streams .
35,539
public static String topologyIdToName ( String topologyId ) throws InvalidTopologyException { String ret ; int index = topologyId . lastIndexOf ( '-' ) ; if ( index != - 1 && index > 2 ) { index = topologyId . lastIndexOf ( '-' , index - 1 ) ; if ( index != - 1 && index > 0 ) ret = topologyId . substring ( 0 , index ) ; else throw new InvalidTopologyException ( topologyId + " is not a valid topologyId" ) ; } else throw new InvalidTopologyException ( topologyId + " is not a valid topologyId" ) ; return ret ; }
Convert topologyId to topologyName . TopologyId = topoloygName - counter - timeStamp
35,540
public static boolean charValidate ( String name ) { if ( name . matches ( "[0-9]+" ) || name . toLowerCase ( ) . equals ( "null" ) ) { return false ; } else { return name . matches ( "[a-zA-Z0-9-_.]+" ) ; } }
Validation of topology name chars . Only alpha char number - _ . are valid .
35,541
@ SuppressWarnings ( "unchecked" ) public static void validate_ids ( StormTopology topology , String topologyId ) throws InvalidTopologyException { String topologyName = topologyIdToName ( topologyId ) ; if ( ! charValidate ( topologyName ) ) { throw new InvalidTopologyException ( topologyName + " is not a valid topology name. " + nameErrorInfo ) ; } List < String > list = new ArrayList < > ( ) ; for ( StormTopology . _Fields field : Thrift . STORM_TOPOLOGY_FIELDS ) { Object value = topology . getFieldValue ( field ) ; if ( value != null ) { Map < String , Object > obj_map = ( Map < String , Object > ) value ; Set < String > commids = obj_map . keySet ( ) ; for ( String id : commids ) { if ( system_id ( id ) || ! charComponentValidate ( id ) ) { throw new InvalidTopologyException ( id + " is not a valid component id. " + compErrorInfo ) ; } } for ( Object obj : obj_map . values ( ) ) { validate_component ( obj ) ; } list . addAll ( commids ) ; } } List < String > offending = JStormUtils . getRepeat ( list ) ; if ( ! offending . isEmpty ( ) ) { throw new InvalidTopologyException ( "Duplicate component ids: " + offending ) ; } }
Check Whether ID of Bolt or spout is system_id
35,542
public static void add_acker ( Map stormConf , StormTopology ret ) { String key = Config . TOPOLOGY_ACKER_EXECUTORS ; Integer ackerNum = JStormUtils . parseInt ( stormConf . get ( key ) , 0 ) ; HashMap < String , StreamInfo > outputs = new HashMap < > ( ) ; ArrayList < String > fields = new ArrayList < > ( ) ; fields . add ( "id" ) ; outputs . put ( ACKER_ACK_STREAM_ID , Thrift . directOutputFields ( fields ) ) ; outputs . put ( ACKER_FAIL_STREAM_ID , Thrift . directOutputFields ( fields ) ) ; IBolt ackerbolt = new Acker ( ) ; Map < GlobalStreamId , Grouping > inputs = acker_inputs ( ret ) ; Bolt acker_bolt = Thrift . mkBolt ( inputs , ackerbolt , outputs , ackerNum ) ; for ( Entry < String , Bolt > e : ret . get_bolts ( ) . entrySet ( ) ) { Bolt bolt = e . getValue ( ) ; ComponentCommon common = bolt . get_common ( ) ; List < String > ackList = JStormUtils . mk_list ( "id" , "ack-val" ) ; common . put_to_streams ( ACKER_ACK_STREAM_ID , Thrift . outputFields ( ackList ) ) ; List < String > failList = JStormUtils . mk_list ( "id" ) ; common . put_to_streams ( ACKER_FAIL_STREAM_ID , Thrift . outputFields ( failList ) ) ; bolt . set_common ( common ) ; } for ( Entry < String , SpoutSpec > kv : ret . get_spouts ( ) . entrySet ( ) ) { SpoutSpec bolt = kv . getValue ( ) ; ComponentCommon common = bolt . get_common ( ) ; List < String > initList = JStormUtils . mk_list ( "id" , "init-val" , "spout-task" ) ; common . put_to_streams ( ACKER_INIT_STREAM_ID , Thrift . outputFields ( initList ) ) ; GlobalStreamId ack_ack = new GlobalStreamId ( ACKER_COMPONENT_ID , ACKER_ACK_STREAM_ID ) ; common . put_to_inputs ( ack_ack , Thrift . mkDirectGrouping ( ) ) ; GlobalStreamId ack_fail = new GlobalStreamId ( ACKER_COMPONENT_ID , ACKER_FAIL_STREAM_ID ) ; common . put_to_inputs ( ack_fail , Thrift . mkDirectGrouping ( ) ) ; } ret . put_to_bolts ( ACKER_COMPONENT_ID , acker_bolt ) ; }
Add acker bolt to topology
35,543
@ SuppressWarnings ( "unchecked" ) public static Map component_conf ( TopologyContext topology_context , String component_id ) { Map < Object , Object > componentConf = new HashMap < > ( ) ; String jconf = topology_context . getComponentCommon ( component_id ) . get_json_conf ( ) ; if ( jconf != null ) { componentConf = ( Map < Object , Object > ) JStormUtils . from_json ( jconf ) ; } return componentConf ; }
get component configuration
35,544
public static Object get_task_object ( StormTopology topology , String component_id , URLClassLoader loader ) { Map < String , SpoutSpec > spouts = topology . get_spouts ( ) ; Map < String , Bolt > bolts = topology . get_bolts ( ) ; Map < String , StateSpoutSpec > state_spouts = topology . get_state_spouts ( ) ; ComponentObject obj = null ; if ( spouts . containsKey ( component_id ) ) { obj = spouts . get ( component_id ) . get_spout_object ( ) ; } else if ( bolts . containsKey ( component_id ) ) { obj = bolts . get ( component_id ) . get_bolt_object ( ) ; } else if ( state_spouts . containsKey ( component_id ) ) { obj = state_spouts . get ( component_id ) . get_state_spout_object ( ) ; } if ( obj == null ) { throw new RuntimeException ( "Could not find " + component_id + " in " + topology . toString ( ) ) ; } Object componentObject = Utils . getSetComponentObject ( obj , loader ) ; Object rtn ; if ( componentObject instanceof JavaObject ) { rtn = Thrift . instantiateJavaObject ( ( JavaObject ) componentObject ) ; } else if ( componentObject instanceof ShellComponent ) { if ( spouts . containsKey ( component_id ) ) { rtn = new ShellSpout ( ( ShellComponent ) componentObject ) ; } else { rtn = new ShellBolt ( ( ShellComponent ) componentObject ) ; } } else { rtn = componentObject ; } return rtn ; }
get object of component_id
35,545
public static Map getComponentMap ( DefaultTopologyAssignContext context , Integer task ) { String componentName = context . getTaskToComponent ( ) . get ( task ) ; ComponentCommon componentCommon = ThriftTopologyUtils . getComponentCommon ( context . getSysTopology ( ) , componentName ) ; Map componentMap = ( Map ) JStormUtils . from_json ( componentCommon . get_json_conf ( ) ) ; if ( componentMap == null ) { componentMap = Maps . newHashMap ( ) ; } return componentMap ; }
get the component s configuration
35,546
public Integer get_version ( String path , boolean watch ) throws Exception { return zkObj . getVersion ( zk , path , watch ) ; }
Note that get_version doesn t use zkCache avoid to conflict with get_data
35,547
public void activate ( String topologyName ) throws TException { try { NimbusUtils . transitionName ( data , topologyName , true , StatusType . activate ) ; notifyTopologyActionListener ( topologyName , "activate" ) ; } catch ( NotAliveException e ) { String errMsg = "Activate Error, topology " + topologyName + " is not alive!" ; LOG . error ( errMsg , e ) ; throw new NotAliveException ( errMsg ) ; } catch ( Exception e ) { String errMsg = "Failed to activate topology " + topologyName ; LOG . error ( errMsg , e ) ; throw new TException ( errMsg ) ; } }
set topology status as active
35,548
public void rebalance ( String topologyName , RebalanceOptions options ) throws TException { try { checkTopologyActive ( data , topologyName , true ) ; Integer wait_amt = null ; String jsonConf = null ; Boolean reassign = false ; if ( options != null ) { if ( options . is_set_wait_secs ( ) ) wait_amt = options . get_wait_secs ( ) ; if ( options . is_set_reassign ( ) ) reassign = options . is_reassign ( ) ; if ( options . is_set_conf ( ) ) jsonConf = options . get_conf ( ) ; } LOG . info ( "Begin to rebalance " + topologyName + "wait_time:" + wait_amt + ", reassign: " + reassign + ", new worker/bolt configuration:" + jsonConf ) ; Map < Object , Object > conf = ( Map < Object , Object > ) JStormUtils . from_json ( jsonConf ) ; NimbusUtils . transitionName ( data , topologyName , true , StatusType . rebalance , wait_amt , reassign , conf ) ; notifyTopologyActionListener ( topologyName , "rebalance" ) ; } catch ( NotAliveException e ) { String errMsg = "Rebalance error, topology " + topologyName + " is not alive!" ; LOG . error ( errMsg , e ) ; throw new NotAliveException ( errMsg ) ; } catch ( Exception e ) { String errMsg = "Failed to rebalance topology " + topologyName ; LOG . error ( errMsg , e ) ; throw new TException ( errMsg ) ; } }
rebalance a topology
35,549
public String beginFileUpload ( ) throws TException { String fileLoc = null ; try { String path ; String key = UUID . randomUUID ( ) . toString ( ) ; path = StormConfig . masterInbox ( conf ) + "/" + key ; FileUtils . forceMkdir ( new File ( path ) ) ; FileUtils . cleanDirectory ( new File ( path ) ) ; fileLoc = path + "/stormjar-" + key + ".jar" ; data . getUploaders ( ) . put ( fileLoc , Channels . newChannel ( new FileOutputStream ( fileLoc ) ) ) ; LOG . info ( "Begin upload file from client to " + fileLoc ) ; return path ; } catch ( FileNotFoundException e ) { LOG . error ( "File not found: " + fileLoc , e ) ; throw new TException ( e ) ; } catch ( IOException e ) { LOG . error ( "Upload file error: " + fileLoc , e ) ; throw new TException ( e ) ; } }
prepare to upload topology jar return the file location
35,550
public void uploadChunk ( String location , ByteBuffer chunk ) throws TException { TimeCacheMap < Object , Object > uploaders = data . getUploaders ( ) ; Object obj = uploaders . get ( location ) ; if ( obj == null ) { throw new TException ( "File for that location does not exist (or timed out) " + location ) ; } try { if ( obj instanceof WritableByteChannel ) { WritableByteChannel channel = ( WritableByteChannel ) obj ; channel . write ( chunk ) ; uploaders . put ( location , channel ) ; } else { throw new TException ( "Object isn't WritableByteChannel for " + location ) ; } } catch ( IOException e ) { String errMsg = " WritableByteChannel write filed when uploadChunk " + location ; LOG . error ( errMsg ) ; throw new TException ( e ) ; } }
upload topology jar data
35,551
public ClusterSummary getClusterInfo ( ) throws TException { long start = System . nanoTime ( ) ; try { StormClusterState stormClusterState = data . getStormClusterState ( ) ; Map < String , Assignment > assignments = new HashMap < > ( ) ; List < TopologySummary > topologySummaries = NimbusUtils . getTopologySummary ( stormClusterState , assignments ) ; Map < String , SupervisorInfo > supervisorInfos = Cluster . get_all_SupervisorInfo ( stormClusterState , null ) ; List < SupervisorSummary > supervisorSummaries = NimbusUtils . mkSupervisorSummaries ( supervisorInfos , assignments ) ; NimbusSummary nimbusSummary = NimbusUtils . getNimbusSummary ( stormClusterState , supervisorSummaries , data ) ; return new ClusterSummary ( nimbusSummary , supervisorSummaries , topologySummaries ) ; } catch ( TException e ) { LOG . info ( "Failed to get ClusterSummary " , e ) ; throw e ; } catch ( Exception e ) { LOG . info ( "Failed to get ClusterSummary " , e ) ; throw new TException ( e ) ; } finally { long end = System . nanoTime ( ) ; SimpleJStormMetric . updateNimbusHistogram ( "getClusterInfo" , ( end - start ) / TimeUtils . NS_PER_US ) ; } }
get cluster s summary it will contain SupervisorSummary and TopologySummary
35,552
public String getTopologyConf ( String id ) throws TException { String rtn ; try { Map < Object , Object > topologyConf = StormConfig . read_nimbus_topology_conf ( id , data . getBlobStore ( ) ) ; rtn = JStormUtils . to_json ( topologyConf ) ; } catch ( IOException e ) { LOG . info ( "Failed to get configuration of " + id , e ) ; throw new TException ( e ) ; } return rtn ; }
get topology configuration
35,553
public StormTopology getTopology ( String id ) throws TException { StormTopology topology ; try { StormTopology stormtopology = StormConfig . read_nimbus_topology_code ( id , data . getBlobStore ( ) ) ; if ( stormtopology == null ) { throw new NotAliveException ( "No topology of " + id ) ; } Map < Object , Object > topologyConf = ( Map < Object , Object > ) StormConfig . read_nimbus_topology_conf ( id , data . getBlobStore ( ) ) ; topology = Common . system_topology ( topologyConf , stormtopology ) ; } catch ( Exception e ) { LOG . error ( "Failed to get topology " + id + "," , e ) ; throw new TException ( "Failed to get system_topology" ) ; } return topology ; }
get StormTopology throw deserialize local files
35,554
public void checkTopologyActive ( NimbusData nimbus , String topologyName , boolean bActive ) throws Exception { if ( isTopologyActive ( nimbus . getStormClusterState ( ) , topologyName ) != bActive ) { if ( bActive ) { throw new NotAliveException ( topologyName + " is not alive" ) ; } else { throw new AlreadyAliveException ( topologyName + " is already alive" ) ; } } }
check whether the topology is bActive?
35,555
public boolean isTopologyActive ( StormClusterState stormClusterState , String topologyName ) throws Exception { boolean rtn = false ; if ( Cluster . get_topology_id ( stormClusterState , topologyName ) != null ) { rtn = true ; } return rtn ; }
whether the topology is active by topology name
35,556
public static ArrayList < Path > listFilesByModificationTime ( FileSystem fs , Path directory , long olderThan ) throws IOException { ArrayList < LocatedFileStatus > fstats = new ArrayList < > ( ) ; RemoteIterator < LocatedFileStatus > itr = fs . listFiles ( directory , false ) ; while ( itr . hasNext ( ) ) { LocatedFileStatus fileStatus = itr . next ( ) ; if ( olderThan > 0 ) { if ( fileStatus . getModificationTime ( ) <= olderThan ) fstats . add ( fileStatus ) ; } else { fstats . add ( fileStatus ) ; } } Collections . sort ( fstats , new ModifTimeComparator ( ) ) ; ArrayList < Path > result = new ArrayList < > ( fstats . size ( ) ) ; for ( LocatedFileStatus fstat : fstats ) { result . add ( fstat . getPath ( ) ) ; } return result ; }
list files sorted by modification time that have not been modified since olderThan . if olderThan is < = 0 then the filtering is disabled
35,557
public static FSDataOutputStream tryCreateFile ( FileSystem fs , Path file ) throws IOException { try { FSDataOutputStream os = fs . create ( file , false ) ; return os ; } catch ( FileAlreadyExistsException e ) { return null ; } catch ( RemoteException e ) { if ( e . unwrapRemoteException ( ) instanceof AlreadyBeingCreatedException ) { return null ; } else { throw e ; } } }
Returns null if file already exists . throws if there was unexpected problem
35,558
public SpoutDeclarer setSpoutWithAck ( String id , IRichSpout spout , Number parallelismHint ) { return setSpout ( id , new AckTransactionSpout ( spout ) , parallelismHint ) ; }
Build spout to provide the compatibility with Storm s ack mechanism
35,559
public BoltDeclarer setBoltWithAck ( String id , IRichBolt bolt , Number parallelismHint ) { return setBolt ( id , new AckTransactionBolt ( bolt ) , parallelismHint ) ; }
Build bolt to provide the compatibility with Storm s ack mechanism
35,560
@ SuppressWarnings ( "rawtypes" ) public void prepare ( Map stormConf ) { this . stormConf = stormConf ; int maxWorkers = Utils . getInt ( stormConf . get ( Config . STORM_MESSAGING_NETTY_CLIENT_WORKER_THREADS ) ) ; ThreadFactory bossFactory = new NettyRenameThreadFactory ( MetricDef . NETTY_CLI + "boss" ) ; ThreadFactory workerFactory = new NettyRenameThreadFactory ( MetricDef . NETTY_CLI + "worker" ) ; if ( maxWorkers > 0 ) { clientChannelFactory = new NioClientSocketChannelFactory ( Executors . newCachedThreadPool ( bossFactory ) , Executors . newCachedThreadPool ( workerFactory ) , maxWorkers ) ; } else { clientChannelFactory = new NioClientSocketChannelFactory ( Executors . newCachedThreadPool ( bossFactory ) , Executors . newCachedThreadPool ( workerFactory ) ) ; } reconnector = new ReconnectRunnable ( ) ; new AsyncLoopThread ( reconnector , true , Thread . MIN_PRIORITY , true ) ; }
initialization per Storm configuration
35,561
public void assign ( WorkerSlot slot , Collection < ExecutorDetails > executors ) { for ( ExecutorDetails executor : executors ) { this . executorToSlot . put ( executor , slot ) ; } }
Assign the slot to executors .
35,562
public void unassignBySlot ( WorkerSlot slot ) { List < ExecutorDetails > executors = new ArrayList < > ( ) ; for ( ExecutorDetails executor : this . executorToSlot . keySet ( ) ) { WorkerSlot ws = this . executorToSlot . get ( executor ) ; if ( ws . equals ( slot ) ) { executors . add ( executor ) ; } } for ( ExecutorDetails executor : executors ) { this . executorToSlot . remove ( executor ) ; } }
Release the slot occupied by this assignment .
35,563
private void handleWaterMarkEvent ( Event < T > waterMarkEvent ) { long watermarkTs = waterMarkEvent . getTimestamp ( ) ; List < Long > eventTs = windowManager . getSlidingCountTimestamps ( lastProcessedTs , watermarkTs , count ) ; for ( long ts : eventTs ) { evictionPolicy . setContext ( ts ) ; handler . onTrigger ( ) ; lastProcessedTs = ts ; } }
Triggers all the pending windows up to the waterMarkEvent timestamp based on the sliding interval count .
35,564
public TTransport connect ( TTransport transport , String serverHost , String asUser ) throws TTransportException { int maxBufferSize = type . getMaxBufferSize ( storm_conf ) ; TTransport conn = new TFramedTransport ( transport , maxBufferSize ) ; conn . open ( ) ; LOG . debug ( "Simple client transport has been established" ) ; return conn ; }
Connect to the specified server via framed transport
35,565
public synchronized void registerSignal ( int signalNumber , Runnable callback , boolean replace ) { String signalName = signalMap . get ( signalNumber ) ; if ( signalName == null ) { LOG . warn ( "Invalid signalNumber " + signalNumber ) ; return ; } LOG . info ( "Begin to register signal of {}" , signalName ) ; try { SignalHandler oldHandler = Signal . handle ( new Signal ( signalName ) , this ) ; LOG . info ( "Successfully register {} handler" , signalName ) ; Runnable old = signalHandlers . put ( signalNumber , callback ) ; if ( old != null ) { if ( ! replace ) { oldSignalHandlers . put ( signalNumber , oldHandler ) ; } else { LOG . info ( "Successfully old {} handler will be replaced" , signalName ) ; } } LOG . info ( "Successfully register signal of {}" , signalName ) ; } catch ( Exception e ) { LOG . error ( "Failed to register " + signalName + ":" + signalNumber + ", Signal already used by VM or OS: SIGILL" ) ; } }
Register signal to system if callback is null then the current process will ignore this signal
35,566
public List < Integer > get ( Integer out_task_id , String stream , List < Object > tuple , Collection < Tuple > anchors , Object root_id ) { if ( isDebug ( anchors , root_id ) ) { LOG . info ( debugIdStr + stream + " to " + out_task_id + ":" + tuple ) ; } taskStats . send_tuple ( stream , 1 ) ; List < Integer > out_tasks = new ArrayList < > ( ) ; out_tasks . add ( out_task_id ) ; return out_tasks ; }
direct send tuple to special task
35,567
public List < Integer > get ( String stream , List < Object > tuple , Collection < Tuple > anchors , Object rootId ) { List < Integer > outTasks = new ArrayList < > ( ) ; Map < String , MkGrouper > componentCrouping = streamComponentGrouper . get ( stream ) ; if ( componentCrouping == null ) { LOG . debug ( "Failed to get Grouper of " + stream + " when " + debugIdStr ) ; return outTasks ; } for ( Entry < String , MkGrouper > ee : componentCrouping . entrySet ( ) ) { String targetComponent = ee . getKey ( ) ; MkGrouper g = ee . getValue ( ) ; if ( GrouperType . direct . equals ( g . gettype ( ) ) ) { throw new IllegalArgumentException ( "Cannot do regular emit to direct stream" ) ; } outTasks . addAll ( g . grouper ( tuple ) ) ; } if ( isDebug ( anchors , rootId ) ) { LOG . info ( debugIdStr + stream + " to " + outTasks + ":" + tuple . toString ( ) ) ; } int num_out_tasks = outTasks . size ( ) ; taskStats . send_tuple ( stream , num_out_tasks ) ; return outTasks ; }
send tuple according to grouping
35,568
public static AsmMetric find ( String name ) { for ( AsmMetricRegistry registry : allRegistries ) { AsmMetric metric = registry . getMetric ( name ) ; if ( metric != null ) { return metric ; } } return null ; }
reserve for debug purposes
35,569
public static AsmHistogram registerWorkerHistogram ( String topologyId , String name , AsmHistogram histogram ) { return ( AsmHistogram ) registerWorkerMetric ( MetricUtils . workerMetricName ( topologyId , host , 0 , name , MetricType . HISTOGRAM ) , histogram ) ; }
simplified helper method to register a worker histogram
35,570
public static AsmGauge registerWorkerGauge ( String topologyId , String name , AsmGauge gauge ) { return ( AsmGauge ) registerWorkerMetric ( MetricUtils . workerMetricName ( topologyId , host , 0 , name , MetricType . GAUGE ) , gauge ) ; }
simplified helper method to register a worker gauge
35,571
public static AsmMeter registerWorkerMeter ( String topologyId , String name , AsmMeter meter ) { return ( AsmMeter ) registerWorkerMetric ( MetricUtils . workerMetricName ( topologyId , host , 0 , name , MetricType . METER ) , meter ) ; }
simplified helper method to register a worker meter
35,572
public static AsmCounter registerWorkerCounter ( String topologyId , String name , AsmCounter counter ) { return ( AsmCounter ) registerWorkerMetric ( MetricUtils . workerMetricName ( topologyId , host , 0 , name , MetricType . COUNTER ) , counter ) ; }
simplified helper method to register a worker counter
35,573
public static void killProcess ( String pid ) { synchronized ( lock ) { LOG . info ( "Begin to kill process " + pid ) ; WorkerShutdown shutdownHandle = getProcessHandle ( pid ) ; if ( shutdownHandle != null ) { shutdownHandle . shutdown ( ) ; } processMap . remove ( pid ) ; LOG . info ( "Successfully killed process " + pid ) ; } }
Kill a process
35,574
private void loginHadoopUser ( Subject subject ) { Class < ? > ugi = null ; try { ugi = Class . forName ( "org.apache.hadoop.security.UserGroupInformation" ) ; } catch ( ClassNotFoundException e ) { LOG . info ( "Hadoop was not found on the class path" ) ; return ; } try { Method isSecEnabled = ugi . getMethod ( "isSecurityEnabled" ) ; if ( ! ( ( Boolean ) isSecEnabled . invoke ( null ) ) ) { LOG . warn ( "Hadoop is on the classpath but not configured for " + "security, if you want security you need to be sure that " + "hadoop.security.authentication=kerberos in core-site.xml " + "in your jar" ) ; return ; } try { Method login = ugi . getMethod ( "loginUserFromSubject" , Subject . class ) ; login . invoke ( null , subject ) ; } catch ( NoSuchMethodException me ) { String name = getTGT ( subject ) . getClient ( ) . toString ( ) ; LOG . warn ( "The Hadoop client does not have loginUserFromSubject, Trying to hack around it. This may not work..." ) ; Class < ? > confClass = Class . forName ( "org.apache.hadoop.conf.Configuration" ) ; Constructor confCons = confClass . getConstructor ( ) ; Object conf = confCons . newInstance ( ) ; Class < ? > hknClass = Class . forName ( "org.apache.hadoop.security.HadoopKerberosName" ) ; Method hknSetConf = hknClass . getMethod ( "setConfiguration" , confClass ) ; hknSetConf . invoke ( null , conf ) ; Class < ? > authMethodClass = Class . forName ( "org.apache.hadoop.security.UserGroupInformation$AuthenticationMethod" ) ; Object kerbAuthMethod = null ; for ( Object authMethod : authMethodClass . getEnumConstants ( ) ) { if ( "KERBEROS" . equals ( authMethod . toString ( ) ) ) { kerbAuthMethod = authMethod ; break ; } } Class < ? > userClass = Class . forName ( "org.apache.hadoop.security.User" ) ; Constructor userCons = userClass . getConstructor ( String . class , authMethodClass , LoginContext . class ) ; userCons . setAccessible ( true ) ; Object user = userCons . newInstance ( name , kerbAuthMethod , null ) ; subject . getPrincipals ( ) . add ( ( Principal ) user ) ; } } catch ( Exception e ) { LOG . warn ( "Something went wrong while trying to initialize Hadoop through reflection. This version of hadoop may not be compatible." , e ) ; } }
Hadoop does not just go off of a TGT it needs a bit more . This should fill in the rest .
35,575
public void update ( Map conf ) { for ( IUpdater updater : updaters ) { try { updater . update ( conf ) ; } catch ( Exception e ) { LOG . error ( e . getMessage ( ) , e ) ; } } }
trigger all updaters update action
35,576
public static String stream2taskName ( String old ) { String [ ] parts = old . split ( DELIM ) ; if ( parts . length >= 7 ) { parts [ 0 ] = MetaType . TASK . getV ( ) + parts [ 0 ] . charAt ( 1 ) ; parts [ parts . length - 3 ] = EMPTY ; String metricName = getMergeMetricName ( parts [ parts . length - 1 ] ) ; parts [ parts . length - 1 ] = metricName ; } return concat ( parts ) ; }
make streamId empty remain other parts the same
35,577
public static String task2compName ( String old ) { String [ ] parts = old . split ( DELIM ) ; if ( parts . length >= 7 ) { parts [ 0 ] = MetaType . COMPONENT . getV ( ) + parts [ 0 ] . charAt ( 1 ) ; parts [ parts . length - 3 ] = EMPTY ; parts [ parts . length - 4 ] = "0" ; } return concat ( parts ) ; }
make taskId = 0 and streamId empty .
35,578
public static String stream2compStreamName ( String old ) { String [ ] parts = old . split ( DELIM ) ; if ( parts . length >= 7 ) { parts [ 0 ] = MetaType . COMPONENT_STREAM . getV ( ) + parts [ 0 ] . charAt ( 1 ) ; parts [ parts . length - 4 ] = "0" ; parts [ parts . length - 1 ] = getMergeMetricName ( parts [ parts . length - 1 ] ) ; } return concat ( parts ) ; }
converts a task metric name to a component - level stream metric name
35,579
public static String task2MergeCompName ( String old ) { String [ ] parts = old . split ( DELIM ) ; if ( parts . length >= 7 ) { parts [ 0 ] = MetaType . COMPONENT . getV ( ) + parts [ 0 ] . charAt ( 1 ) ; parts [ parts . length - 3 ] = EMPTY ; parts [ parts . length - 4 ] = "0" ; String metricName = getMergeMetricName ( parts [ parts . length - 1 ] ) ; parts [ parts . length - 1 ] = metricName ; } return concat ( parts ) ; }
make taskId = 0 and streamId empty and metricName remain the string after . .
35,580
public static String comp2topologyName ( String old ) { String [ ] parts = old . split ( DELIM ) ; parts [ 0 ] = MetaType . TOPOLOGY . getV ( ) + parts [ 0 ] . charAt ( 1 ) ; return concat ( parts [ 0 ] , parts [ 1 ] , EMPTY , "0" , parts [ 5 ] , parts [ 6 ] ) ; }
change component metric name to topology metric name
35,581
public static Configuration GetConfiguration ( Map storm_conf ) { Configuration login_conf = null ; String loginConfigurationFile = ( String ) storm_conf . get ( "java.security.auth.login.config" ) ; if ( ( loginConfigurationFile != null ) && ( loginConfigurationFile . length ( ) > 0 ) ) { File config_file = new File ( loginConfigurationFile ) ; if ( ! config_file . canRead ( ) ) { throw new RuntimeException ( "File " + loginConfigurationFile + " cannot be read." ) ; } try { URI config_uri = config_file . toURI ( ) ; login_conf = Configuration . getInstance ( "JavaLoginConfig" , new URIParameter ( config_uri ) ) ; } catch ( Exception ex ) { throw new RuntimeException ( ex ) ; } } return login_conf ; }
Construct a JAAS configuration object per storm configuration file
35,582
public static IPrincipalToLocal GetPrincipalToLocalPlugin ( Map storm_conf ) { IPrincipalToLocal ptol = null ; try { String ptol_klassName = ( String ) storm_conf . get ( Config . STORM_PRINCIPAL_TO_LOCAL_PLUGIN ) ; Class klass = Class . forName ( ptol_klassName ) ; ptol = ( IPrincipalToLocal ) klass . newInstance ( ) ; ptol . prepare ( storm_conf ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return ptol ; }
Construct a principal to local plugin
35,583
public static IGroupMappingServiceProvider GetGroupMappingServiceProviderPlugin ( Map storm_conf ) { IGroupMappingServiceProvider gmsp = null ; try { String gmsp_klassName = ( String ) storm_conf . get ( Config . STORM_GROUP_MAPPING_SERVICE_PROVIDER_PLUGIN ) ; Class klass = Class . forName ( gmsp_klassName ) ; gmsp = ( IGroupMappingServiceProvider ) klass . newInstance ( ) ; gmsp . prepare ( storm_conf ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return gmsp ; }
Construct a group mapping service provider plugin
35,584
public static Collection < ICredentialsRenewer > GetCredentialRenewers ( Map conf ) { try { Set < ICredentialsRenewer > ret = new HashSet < ICredentialsRenewer > ( ) ; Collection < String > clazzes = ( Collection < String > ) conf . get ( Config . NIMBUS_CREDENTIAL_RENEWERS ) ; if ( clazzes != null ) { for ( String clazz : clazzes ) { ICredentialsRenewer inst = ( ICredentialsRenewer ) Class . forName ( clazz ) . newInstance ( ) ; inst . prepare ( conf ) ; ret . add ( inst ) ; } } return ret ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Get all of the configured Credential Renwer Plugins .
35,585
public static Collection < INimbusCredentialPlugin > getNimbusAutoCredPlugins ( Map conf ) { try { Set < INimbusCredentialPlugin > ret = new HashSet < INimbusCredentialPlugin > ( ) ; Collection < String > clazzes = ( Collection < String > ) conf . get ( Config . NIMBUS_AUTO_CRED_PLUGINS ) ; if ( clazzes != null ) { for ( String clazz : clazzes ) { INimbusCredentialPlugin inst = ( INimbusCredentialPlugin ) Class . forName ( clazz ) . newInstance ( ) ; inst . prepare ( conf ) ; ret . add ( inst ) ; } } return ret ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Get all the Nimbus Auto cred plugins .
35,586
public static Collection < IAutoCredentials > GetAutoCredentials ( Map storm_conf ) { try { Set < IAutoCredentials > autos = new HashSet < IAutoCredentials > ( ) ; Collection < String > clazzes = ( Collection < String > ) storm_conf . get ( Config . TOPOLOGY_AUTO_CREDENTIALS ) ; if ( clazzes != null ) { for ( String clazz : clazzes ) { IAutoCredentials a = ( IAutoCredentials ) Class . forName ( clazz ) . newInstance ( ) ; a . prepare ( storm_conf ) ; autos . add ( a ) ; } } LOG . info ( "Got AutoCreds " + autos ) ; return autos ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Get all of the configured AutoCredential Plugins .
35,587
public static Subject populateSubject ( Subject subject , Collection < IAutoCredentials > autos , Map < String , String > credentials ) { try { if ( subject == null ) { subject = new Subject ( ) ; } for ( IAutoCredentials autoCred : autos ) { autoCred . populateSubject ( subject , credentials ) ; } return subject ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Populate a subject from credentials using the IAutoCredentials .
35,588
public static ITransportPlugin GetTransportPlugin ( ThriftConnectionType type , Map storm_conf , Configuration login_conf ) { ITransportPlugin transportPlugin = null ; try { String transport_plugin_klassName = type . getTransportPlugin ( storm_conf ) ; Class klass = Class . forName ( transport_plugin_klassName ) ; transportPlugin = ( ITransportPlugin ) klass . newInstance ( ) ; transportPlugin . prepare ( type , storm_conf , login_conf ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return transportPlugin ; }
Construct a transport plugin per storm configuration
35,589
public static IHttpCredentialsPlugin GetUiHttpCredentialsPlugin ( Map conf ) { String klassName = ( String ) conf . get ( Config . UI_HTTP_CREDS_PLUGIN ) ; return AuthUtils . GetHttpCredentialsPlugin ( conf , klassName ) ; }
Construct an HttpServletRequest credential plugin specified by the UI storm configuration
35,590
public static IHttpCredentialsPlugin GetDrpcHttpCredentialsPlugin ( Map conf ) { String klassName = ( String ) conf . get ( Config . DRPC_HTTP_CREDS_PLUGIN ) ; return AuthUtils . GetHttpCredentialsPlugin ( conf , klassName ) ; }
Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration
35,591
private boolean increment ( TridentCollector [ ] lengths , int [ ] indices , int j ) { if ( j == - 1 ) return false ; indices [ j ] ++ ; CaptureCollector capturer = ( CaptureCollector ) lengths [ j ] ; if ( indices [ j ] >= capturer . captured . size ( ) ) { indices [ j ] = 0 ; return increment ( lengths , indices , j - 1 ) ; } return true ; }
return false if can t increment anymore
35,592
public void doReconnect ( ) { if ( channelRef . get ( ) != null ) { return ; } if ( isClosed ( ) ) { return ; } if ( isConnecting . getAndSet ( true ) ) { LOG . info ( "Connect twice {}" , name ( ) ) ; return ; } long sleepMs = getSleepTimeMs ( ) ; LOG . info ( "Reconnect ... [{}], {}, sleep {}ms" , retries . get ( ) , name , sleepMs ) ; ChannelFuture future = bootstrap . connect ( remoteAddr ) ; future . addListener ( new ChannelFutureListener ( ) { public void operationComplete ( ChannelFuture future ) throws Exception { isConnecting . set ( false ) ; Channel channel = future . getChannel ( ) ; if ( future . isSuccess ( ) ) { LOG . info ( "Connection established, channel = :{}" , channel ) ; setChannel ( channel ) ; BATCH_THRESHOLD_WARN = ConfigExtension . getNettyBufferThresholdSize ( stormConf ) ; } else { if ( ! isClosed ( ) ) { LOG . info ( "Failed to reconnect ... [{}], {}, channel = {}, cause = {}" , retries . get ( ) , name , channel , future . getCause ( ) ) ; reconnect ( ) ; } } } } ) ; JStormUtils . sleepMs ( sleepMs ) ; }
The function can t be synchronized otherwise it will cause deadlock
35,593
void closeChannel ( final Channel channel ) { synchronized ( channelClosing ) { if ( closingChannel . contains ( channel ) ) { LOG . info ( channel . toString ( ) + " has already been closed" ) ; return ; } closingChannel . add ( channel ) ; } LOG . debug ( channel . toString ( ) + " begin to close" ) ; ChannelFuture closeFuture = channel . close ( ) ; closeFuture . addListener ( new ChannelFutureListener ( ) { public void operationComplete ( ChannelFuture future ) throws Exception { synchronized ( channelClosing ) { closingChannel . remove ( channel ) ; } LOG . debug ( channel . toString ( ) + " closed." ) ; } } ) ; }
Avoid channel double close
35,594
public void onEvent ( Object event , long sequence , boolean endOfBatch ) throws Exception { try { if ( event == null ) { return ; } Runnable runnable = null ; if ( event instanceof Tuple ) { if ( ( ( TupleExt ) event ) . isBatchTuple ( ) ) { List < Object > values = ( ( Tuple ) event ) . getValues ( ) ; for ( Object value : values ) { Pair < MessageId , List < Object > > val = ( Pair < MessageId , List < Object > > ) value ; TupleImplExt tuple = new TupleImplExt ( sysTopologyCtx , val . getSecond ( ) , val . getFirst ( ) , ( ( TupleImplExt ) event ) ) ; runnable = processTupleEvent ( tuple ) ; if ( runnable != null ) { runnable . run ( ) ; runnable = null ; } } } else { runnable = processTupleEvent ( ( Tuple ) event ) ; } } else if ( event instanceof TimerTrigger . TimerEvent ) { processTimerEvent ( ( TimerTrigger . TimerEvent ) event ) ; return ; } else if ( event instanceof IAckMsg ) { runnable = ( Runnable ) event ; } else if ( event instanceof Runnable ) { runnable = ( Runnable ) event ; } else { LOG . warn ( "Receive one unknown event-" + event . toString ( ) + " " + idStr ) ; return ; } if ( runnable != null ) runnable . run ( ) ; } catch ( Throwable e ) { if ( ! taskStatus . isShutdown ( ) ) { LOG . info ( "Unknown exception " , e ) ; reportError . report ( e ) ; } } }
Handle acker message
35,595
private KeyValueState < String , CheckPointState > loadCheckpointState ( Map conf , TopologyContext ctx ) { String namespace = ctx . getThisComponentId ( ) + "-" + ctx . getThisTaskId ( ) ; KeyValueState < String , CheckPointState > state = ( KeyValueState < String , CheckPointState > ) StateFactory . getState ( namespace , conf , ctx ) ; if ( state . get ( TX_STATE_KEY ) == null ) { CheckPointState txState = new CheckPointState ( - 1 , CheckPointState . State . COMMITTED ) ; state . put ( TX_STATE_KEY , txState ) ; state . commit ( ) ; LOG . debug ( "Initialized checkpoint spout state with txState {}" , txState ) ; } else { LOG . debug ( "Got checkpoint spout state {}" , state . get ( TX_STATE_KEY ) ) ; } return state ; }
Loads the last saved checkpoint state the from persistent storage .
35,596
@ SuppressWarnings ( "unchecked" ) private void checkNeedUpdateTopologies ( Map < String , StateHeartbeat > localWorkerStats , Map < Integer , LocalAssignment > localAssignments ) throws Exception { Set < String > topologies = new HashSet < > ( ) ; for ( Map . Entry < Integer , LocalAssignment > entry : localAssignments . entrySet ( ) ) { topologies . add ( entry . getValue ( ) . getTopologyId ( ) ) ; } for ( StateHeartbeat stateHb : localWorkerStats . values ( ) ) { State state = stateHb . getState ( ) ; if ( ! state . equals ( State . notStarted ) ) { String topologyId = stateHb . getHeartbeat ( ) . getTopologyId ( ) ; topologies . remove ( topologyId ) ; } } long currTime = System . currentTimeMillis ( ) ; Set < String > needRemoveTopologies = new HashSet < > ( ) ; for ( String topologyId : topologies ) { try { long lastModifyTime = StormConfig . get_supervisor_topology_Bianrymodify_time ( conf , topologyId ) ; if ( ( currTime - lastModifyTime ) / 1000 < ( JStormUtils . MIN_1 * 2 ) ) { LOG . debug ( "less than 2 minute, removing " + topologyId ) ; needRemoveTopologies . add ( topologyId ) ; } } catch ( Exception e ) { LOG . error ( "Failed to get last modified time for topology" + topologyId , e ) ; needRemoveTopologies . add ( topologyId ) ; } } topologies . removeAll ( needRemoveTopologies ) ; if ( topologies . size ( ) > 0 ) { LOG . debug ( "Following topologies are going to re-download jars, " + topologies ) ; } needDownloadTopologies . set ( topologies ) ; }
check all workers is failed or not
35,597
public void markAllNewWorkers ( Map < Integer , String > workerIds ) { int startTime = TimeUtils . current_time_secs ( ) ; for ( Entry < Integer , String > entry : workerIds . entrySet ( ) ) { String oldWorkerIds = portToWorkerId . get ( entry . getKey ( ) ) ; if ( oldWorkerIds != null ) { workerIdToStartTimeAndPort . remove ( oldWorkerIds ) ; LOG . info ( "A port is still occupied by an old worker, remove useless " + oldWorkerIds + " from workerIdToStartTimeAndPort" ) ; } portToWorkerId . put ( entry . getKey ( ) , entry . getValue ( ) ) ; workerIdToStartTimeAndPort . put ( entry . getValue ( ) , new Pair < > ( startTime , entry . getKey ( ) ) ) ; } }
mark all new Workers like 52b11418 - 7474 - 446d - bff5 - 0ecd68f4954f
35,598
public void checkNewWorkers ( Map conf ) throws IOException , InterruptedException { Set < String > workers = new HashSet < > ( ) ; for ( Entry < String , Pair < Integer , Integer > > entry : workerIdToStartTimeAndPort . entrySet ( ) ) { String workerId = entry . getKey ( ) ; int startTime = entry . getValue ( ) . getFirst ( ) ; LocalState ls = StormConfig . worker_state ( conf , workerId ) ; WorkerHeartbeat whb = ( WorkerHeartbeat ) ls . get ( Common . LS_WORKER_HEARTBEAT ) ; if ( whb == null ) { if ( ( TimeUtils . current_time_secs ( ) - startTime ) < JStormUtils . parseInt ( conf . get ( Config . SUPERVISOR_WORKER_START_TIMEOUT_SECS ) ) ) { LOG . info ( workerId + " still hasn't started" ) ; } else { LOG . error ( "Failed to start Worker " + workerId ) ; workers . add ( workerId ) ; } } else { LOG . info ( "Successfully started worker " + workerId ) ; workers . add ( workerId ) ; } } for ( String workerId : workers ) { Integer port = this . workerIdToStartTimeAndPort . get ( workerId ) . getSecond ( ) ; this . workerIdToStartTimeAndPort . remove ( workerId ) ; this . portToWorkerId . remove ( port ) ; } }
check whether timestamp of new workers is not > SUPERVISOR_WORKER_START_TIMEOUT_SECS otherwise mark as failed
35,599
public boolean matchesAssignment ( WorkerHeartbeat whb , Map < Integer , LocalAssignment > assignedTasks ) { boolean isMatch = true ; LocalAssignment localAssignment = assignedTasks . get ( whb . getPort ( ) ) ; if ( localAssignment == null ) { LOG . debug ( "Following worker has been removed, port=" + whb . getPort ( ) + ", assignedTasks=" + assignedTasks ) ; isMatch = false ; } else if ( ! whb . getTopologyId ( ) . equals ( localAssignment . getTopologyId ( ) ) ) { LOG . info ( "topology id not equal whb=" + whb . getTopologyId ( ) + ",localAssignment=" + localAssignment . getTopologyId ( ) ) ; isMatch = false ; } return isMatch ; }
check whether the worker heartbeat is allowed in the assignedTasks