idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
15,300 | public static SlotProfile preferredLocality ( ResourceProfile resourceProfile , Collection < TaskManagerLocation > preferredLocations ) { return new SlotProfile ( resourceProfile , preferredLocations , Collections . emptyList ( ) ) ; } | Returns a slot profile for the given resource profile and the preferred locations . |
15,301 | public static SlotProfile priorAllocation ( ResourceProfile resourceProfile , Collection < AllocationID > priorAllocations ) { return new SlotProfile ( resourceProfile , Collections . emptyList ( ) , priorAllocations ) ; } | Returns a slot profile for the given resource profile and the prior allocations . |
15,302 | public final void sendCombinedMessage ( Message combinedMessage ) { outValue . f1 = Either . Right ( combinedMessage ) ; out . collect ( outValue ) ; } | Sends the combined message to the target vertex . |
15,303 | public void aggregate ( T value ) { if ( value == null ) { nullCount ++ ; } else if ( isNan ( value ) ) { nanCount ++ ; } else if ( isInfinite ( value ) ) { infinityCount ++ ; } else { nonMissingCount ++ ; min . aggregate ( value ) ; max . aggregate ( value ) ; sum . aggregate ( value ) ; double doubleValue = value . doubleValue ( ) ; double delta = doubleValue - mean . value ( ) ; mean = mean . add ( delta / nonMissingCount ) ; m2 = m2 . add ( delta * ( doubleValue - mean . value ( ) ) ) ; } } | Add a value to the current aggregation . |
15,304 | public void combine ( Aggregator < T , NumericColumnSummary < T > > otherSameType ) { NumericSummaryAggregator < T > other = ( NumericSummaryAggregator < T > ) otherSameType ; nullCount += other . nullCount ; nanCount += other . nanCount ; infinityCount += other . infinityCount ; if ( nonMissingCount == 0 ) { nonMissingCount = other . nonMissingCount ; min = other . min ; max = other . max ; sum = other . sum ; mean = other . mean ; m2 = other . m2 ; } else if ( other . nonMissingCount != 0 ) { long combinedCount = nonMissingCount + other . nonMissingCount ; min . combine ( other . min ) ; max . combine ( other . max ) ; sum . combine ( other . sum ) ; double deltaMean = other . mean . value ( ) - mean . value ( ) ; mean = mean . add ( deltaMean * other . nonMissingCount / combinedCount ) ; m2 = m2 . add ( other . m2 ) . add ( deltaMean * deltaMean * nonMissingCount * other . nonMissingCount / combinedCount ) ; nonMissingCount = combinedCount ; } } | combine two aggregations . |
15,305 | public CassandraSink < IN > name ( String name ) { if ( useDataStreamSink ) { getSinkTransformation ( ) . setName ( name ) ; } else { getStreamTransformation ( ) . setName ( name ) ; } return this ; } | Sets the name of this sink . This name is used by the visualization and logging during runtime . |
15,306 | public CassandraSink < IN > uid ( String uid ) { if ( useDataStreamSink ) { getSinkTransformation ( ) . setUid ( uid ) ; } else { getStreamTransformation ( ) . setUid ( uid ) ; } return this ; } | Sets an ID for this operator . |
15,307 | public CassandraSink < IN > setParallelism ( int parallelism ) { if ( useDataStreamSink ) { getSinkTransformation ( ) . setParallelism ( parallelism ) ; } else { getStreamTransformation ( ) . setParallelism ( parallelism ) ; } return this ; } | Sets the parallelism for this sink . The degree must be higher than zero . |
15,308 | public CassandraSink < IN > slotSharingGroup ( String slotSharingGroup ) { if ( useDataStreamSink ) { getSinkTransformation ( ) . setSlotSharingGroup ( slotSharingGroup ) ; } else { getStreamTransformation ( ) . setSlotSharingGroup ( slotSharingGroup ) ; } return this ; } | Sets the slot sharing group of this operation . Parallel instances of operations that are in the same slot sharing group will be co - located in the same TaskManager slot if possible . |
15,309 | public final V put ( K key , V value ) { final int hash = hash ( key ) ; final int slot = indexOf ( hash ) ; for ( Entry < K , V > e = table [ slot ] ; e != null ; e = e . next ) { Object k ; if ( e . hashCode == hash && ( ( k = e . key ) == key || key . equals ( k ) ) ) { V old = e . value ; e . value = value ; return old ; } } insertNewEntry ( hash , key , value , slot ) ; return null ; } | Inserts the given value mapped under the given key . If the table already contains a value for the key the value is replaced and returned . If no value is contained yet the function returns null . |
15,310 | public final V putIfAbsent ( K key , LazyFactory < V > factory ) { final int hash = hash ( key ) ; final int slot = indexOf ( hash ) ; for ( Entry < K , V > entry = table [ slot ] ; entry != null ; entry = entry . next ) { if ( entry . hashCode == hash && entry . key . equals ( key ) ) { return entry . value ; } } V value = factory . create ( ) ; insertNewEntry ( hash , key , value , slot ) ; return value ; } | Inserts a value for the given key if no value is yet contained for that key . Otherwise returns the value currently contained for the key . |
15,311 | public V get ( K key ) { final int hash = hash ( key ) ; final int slot = indexOf ( hash ) ; for ( Entry < K , V > entry = table [ slot ] ; entry != null ; entry = entry . next ) { if ( entry . hashCode == hash && entry . key . equals ( key ) ) { return entry . value ; } } return null ; } | Looks up the value mapped under the given key . Returns null if no value is mapped under this key . |
15,312 | public Iterator < Entry < K , V > > iterator ( ) { return new Iterator < Entry < K , V > > ( ) { private final Entry < K , V > [ ] tab = KeyMap . this . table ; private Entry < K , V > nextEntry ; private int nextPos = 0 ; public boolean hasNext ( ) { if ( nextEntry != null ) { return true ; } else { while ( nextPos < tab . length ) { Entry < K , V > e = tab [ nextPos ++ ] ; if ( e != null ) { nextEntry = e ; return true ; } } return false ; } } public Entry < K , V > next ( ) { if ( nextEntry != null || hasNext ( ) ) { Entry < K , V > e = nextEntry ; nextEntry = nextEntry . next ; return e ; } else { throw new NoSuchElementException ( ) ; } } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } | Creates an iterator over the entries of this map . |
15,313 | public static < K , V > void traverseMaps ( final KeyMap < K , V > [ ] maps , final TraversalEvaluator < K , V > visitor , final long touchedTag ) throws Exception { Arrays . sort ( maps , CapacityDescendingComparator . INSTANCE ) ; final int [ ] shifts = new int [ maps . length ] ; final int [ ] lowBitsMask = new int [ maps . length ] ; final int numSlots = maps [ 0 ] . table . length ; final int numTables = maps . length ; for ( int i = 0 ; i < numTables ; i ++ ) { shifts [ i ] = maps [ 0 ] . log2size - maps [ i ] . log2size ; lowBitsMask [ i ] = ( 1 << shifts [ i ] ) - 1 ; } for ( int pos = 0 ; pos < numSlots ; pos ++ ) { int mask ; for ( int rootTable = 0 ; rootTable < numTables && ( ( mask = lowBitsMask [ rootTable ] ) & pos ) == mask ; rootTable ++ ) { Entry < K , V > entry = maps [ rootTable ] . table [ pos >> shifts [ rootTable ] ] ; while ( entry != null ) { if ( entry . touchedTag < touchedTag ) { entry . touchedTag = touchedTag ; final K key = entry . key ; final int hashCode = entry . hashCode ; visitor . startNewKey ( key ) ; visitor . nextValue ( entry . value ) ; addEntriesFromChain ( entry . next , visitor , key , touchedTag , hashCode ) ; for ( int followupTable = rootTable + 1 ; followupTable < numTables ; followupTable ++ ) { Entry < K , V > followupEntry = maps [ followupTable ] . table [ pos >> shifts [ followupTable ] ] ; if ( followupEntry != null ) { addEntriesFromChain ( followupEntry , visitor , key , touchedTag , hashCode ) ; } } visitor . keyDone ( ) ; } entry = entry . next ; } } } } | Performs a traversal about logical the multi - map that results from the union of the given maps . This method does not actually build a union of the map but traverses the hash maps together . |
15,314 | public void initializeBufferMetrics ( Task task ) { final MetricGroup buffers = addGroup ( "buffers" ) ; buffers . gauge ( "inputQueueLength" , new InputBuffersGauge ( task ) ) ; buffers . gauge ( "outputQueueLength" , new OutputBuffersGauge ( task ) ) ; buffers . gauge ( "inPoolUsage" , new InputBufferPoolUsageGauge ( task ) ) ; buffers . gauge ( "outPoolUsage" , new OutputBufferPoolUsageGauge ( task ) ) ; } | Initialize Buffer Metrics for a task . |
15,315 | public KeyGroupRange getIntersection ( KeyGroupRange other ) { int start = Math . max ( startKeyGroup , other . startKeyGroup ) ; int end = Math . min ( endKeyGroup , other . endKeyGroup ) ; return start <= end ? new KeyGroupRange ( start , end ) : EMPTY_KEY_GROUP_RANGE ; } | Create a range that represent the intersection between this range and the given range . |
15,316 | public static KeyGroupRange of ( int startKeyGroup , int endKeyGroup ) { return startKeyGroup <= endKeyGroup ? new KeyGroupRange ( startKeyGroup , endKeyGroup ) : EMPTY_KEY_GROUP_RANGE ; } | Factory method that also handles creation of empty key - groups . |
15,317 | protected Collector < OT > createSolutionSetUpdateOutputCollector ( Collector < OT > delegate ) { Broker < Object > solutionSetBroker = SolutionSetBroker . instance ( ) ; Object ss = solutionSetBroker . get ( brokerKey ( ) ) ; if ( ss instanceof CompactingHashTable ) { @ SuppressWarnings ( "unchecked" ) CompactingHashTable < OT > solutionSet = ( CompactingHashTable < OT > ) ss ; return new SolutionSetUpdateOutputCollector < OT > ( solutionSet , delegate ) ; } else if ( ss instanceof JoinHashMap ) { @ SuppressWarnings ( "unchecked" ) JoinHashMap < OT > map = ( JoinHashMap < OT > ) ss ; return new SolutionSetObjectsUpdateOutputCollector < OT > ( map , delegate ) ; } else { throw new RuntimeException ( "Unrecognized solution set handle: " + ss ) ; } } | Creates a new solution set update output collector . |
15,318 | public static void terminateRpcService ( RpcService rpcService , Time timeout ) throws InterruptedException , ExecutionException , TimeoutException { rpcService . stopService ( ) . get ( timeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; } | Shuts the given rpc service down and waits for its termination . |
15,319 | public static void terminateRpcServices ( Time timeout , RpcService ... rpcServices ) throws InterruptedException , ExecutionException , TimeoutException { final Collection < CompletableFuture < ? > > terminationFutures = new ArrayList < > ( rpcServices . length ) ; for ( RpcService service : rpcServices ) { if ( service != null ) { terminationFutures . add ( service . stopService ( ) ) ; } } FutureUtils . waitForAll ( terminationFutures ) . get ( timeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; } | Shuts the given rpc services down and waits for their termination . |
15,320 | public void setBroadcastInputs ( List < NamedChannel > broadcastInputs ) { if ( broadcastInputs != null ) { this . broadcastInputs = broadcastInputs ; for ( NamedChannel nc : broadcastInputs ) { PlanNode source = nc . getSource ( ) ; mergeBranchPlanMaps ( branchPlan , source . branchPlan ) ; } } if ( this . template . hasUnclosedBranches ( ) ) { if ( this . branchPlan == null ) { throw new CompilerException ( "Branching and rejoining logic did not find a candidate for the branching point." ) ; } for ( UnclosedBranchDescriptor uc : this . template . getOpenBranches ( ) ) { OptimizerNode brancher = uc . getBranchingNode ( ) ; if ( this . branchPlan . get ( brancher ) == null ) { throw new CompilerException ( "Branching and rejoining logic did not find a candidate for the branching point." ) ; } } } } | Sets a list of all broadcast inputs attached to this node . |
15,321 | private void stopResources ( boolean waitForShutdown ) throws InterruptedException { emitter . stop ( ) ; emitterThread . interrupt ( ) ; executor . shutdown ( ) ; if ( waitForShutdown ) { try { if ( ! executor . awaitTermination ( 365L , TimeUnit . DAYS ) ) { executor . shutdownNow ( ) ; } } catch ( InterruptedException e ) { executor . shutdownNow ( ) ; Thread . currentThread ( ) . interrupt ( ) ; } if ( Thread . holdsLock ( checkpointingLock ) ) { while ( emitterThread . isAlive ( ) ) { checkpointingLock . wait ( 100L ) ; } } emitterThread . join ( ) ; } else { executor . shutdownNow ( ) ; } } | Close the operator s resources . They include the emitter thread and the executor to run the queue s complete operation . |
15,322 | private < T > void addAsyncBufferEntry ( StreamElementQueueEntry < T > streamElementQueueEntry ) throws InterruptedException { assert ( Thread . holdsLock ( checkpointingLock ) ) ; pendingStreamElementQueueEntry = streamElementQueueEntry ; while ( ! queue . tryPut ( streamElementQueueEntry ) ) { checkpointingLock . wait ( ) ; } pendingStreamElementQueueEntry = null ; } | Add the given stream element queue entry to the operator s stream element queue . This operation blocks until the element has been added . |
15,323 | private static void assertNotEndOfInput ( final JsonParser p , final JsonToken jsonToken ) { checkState ( jsonToken != null , "Unexpected end of input at %s" , p . getCurrentLocation ( ) ) ; } | Asserts that the provided JsonToken is not null i . e . not at the end of the input . |
15,324 | public void setFirstInputs ( List < Operator < IN1 > > inputs ) { this . input1 = Operator . createUnionCascade ( inputs ) ; } | Sets the first input to the union of the given operators . |
15,325 | public void setSecondInputs ( List < Operator < IN2 > > inputs ) { this . input2 = Operator . createUnionCascade ( inputs ) ; } | Sets the second input to the union of the given operators . |
15,326 | private static void updateJobOverview ( File webOverviewDir , File webDir ) { try ( JsonGenerator gen = jacksonFactory . createGenerator ( HistoryServer . createOrGetFile ( webDir , JobsOverviewHeaders . URL ) ) ) { File [ ] overviews = new File ( webOverviewDir . getPath ( ) ) . listFiles ( ) ; if ( overviews != null ) { Collection < JobDetails > allJobs = new ArrayList < > ( overviews . length ) ; for ( File overview : overviews ) { MultipleJobsDetails subJobs = mapper . readValue ( overview , MultipleJobsDetails . class ) ; allJobs . addAll ( subJobs . getJobs ( ) ) ; } mapper . writeValue ( gen , new MultipleJobsDetails ( allJobs ) ) ; } } catch ( IOException ioe ) { LOG . error ( "Failed to update job overview." , ioe ) ; } } | This method replicates the JSON response that would be given by the JobsOverviewHandler when listing both running and finished jobs . |
15,327 | public Optional < Tuple2 < String , UUID > > getLeaderNow ( ) throws Exception { CompletableFuture < Tuple2 < String , UUID > > leaderFuture = this . atomicLeaderFuture . get ( ) ; if ( leaderFuture != null ) { if ( leaderFuture . isDone ( ) ) { return Optional . of ( leaderFuture . get ( ) ) ; } else { return Optional . empty ( ) ; } } else { return Optional . empty ( ) ; } } | Returns the current leader information if available . Otherwise it returns an empty optional . |
15,328 | public static < E extends Enum < E > > TypeInformation < E > ENUM ( Class < E > enumType ) { return new EnumTypeInfo < > ( enumType ) ; } | Returns type information for Java enumerations . Null values are not supported . |
15,329 | public JoinOperatorSetsPredicateBase where ( String ... fields ) { return new JoinOperatorSetsPredicateBase ( new Keys . ExpressionKeys < > ( fields , input1 . getType ( ) ) ) ; } | Continues a Join transformation . |
15,330 | public InstanceID registerTaskManager ( TaskManagerGateway taskManagerGateway , TaskManagerLocation taskManagerLocation , HardwareDescription resources , int numberOfSlots ) { synchronized ( this . lock ) { if ( this . isShutdown ) { throw new IllegalStateException ( "InstanceManager is shut down." ) ; } Instance prior = registeredHostsByResource . get ( taskManagerLocation . getResourceID ( ) ) ; if ( prior != null ) { throw new IllegalStateException ( "Registration attempt from TaskManager at " + taskManagerLocation . addressString ( ) + ". This connection is already registered under ID " + prior . getId ( ) ) ; } boolean wasDead = this . deadHosts . remove ( taskManagerLocation . getResourceID ( ) ) ; if ( wasDead ) { LOG . info ( "Registering TaskManager at " + taskManagerLocation . addressString ( ) + " which was marked as dead earlier because of a heart-beat timeout." ) ; } InstanceID instanceID = new InstanceID ( ) ; Instance host = new Instance ( taskManagerGateway , taskManagerLocation , instanceID , resources , numberOfSlots ) ; registeredHostsById . put ( instanceID , host ) ; registeredHostsByResource . put ( taskManagerLocation . getResourceID ( ) , host ) ; totalNumberOfAliveTaskSlots += numberOfSlots ; if ( LOG . isInfoEnabled ( ) ) { LOG . info ( String . format ( "Registered TaskManager at %s (%s) as %s. " + "Current number of registered hosts is %d. " + "Current number of alive task slots is %d." , taskManagerLocation . getHostname ( ) , taskManagerGateway . getAddress ( ) , instanceID , registeredHostsById . size ( ) , totalNumberOfAliveTaskSlots ) ) ; } host . reportHeartBeat ( ) ; notifyNewInstance ( host ) ; return instanceID ; } } | Registers a task manager . Registration of a task manager makes it available to be used for the job execution . |
15,331 | public void unregisterAllTaskManagers ( ) { for ( Instance instance : registeredHostsById . values ( ) ) { deadHosts . add ( instance . getTaskManagerID ( ) ) ; instance . markDead ( ) ; totalNumberOfAliveTaskSlots -= instance . getTotalNumberOfSlots ( ) ; notifyDeadInstance ( instance ) ; } registeredHostsById . clear ( ) ; registeredHostsByResource . clear ( ) ; } | Unregisters all currently registered TaskManagers from the InstanceManager . |
15,332 | public static Row of ( Object ... values ) { Row row = new Row ( values . length ) ; for ( int i = 0 ; i < values . length ; i ++ ) { row . setField ( i , values [ i ] ) ; } return row ; } | Creates a new Row and assigns the given values to the Row s fields . This is more convenient than using the constructor . |
15,333 | public static Row copy ( Row row ) { final Row newRow = new Row ( row . fields . length ) ; System . arraycopy ( row . fields , 0 , newRow . fields , 0 , row . fields . length ) ; return newRow ; } | Creates a new Row which copied from another row . This method does not perform a deep copy . |
15,334 | public static Row project ( Row row , int [ ] fields ) { final Row newRow = new Row ( fields . length ) ; for ( int i = 0 ; i < fields . length ; i ++ ) { newRow . fields [ i ] = row . fields [ fields [ i ] ] ; } return newRow ; } | Creates a new Row with projected fields from another row . This method does not perform a deep copy . |
15,335 | public static void copyToUnsafe ( MemorySegment [ ] segments , int offset , Object target , int pointer , int numBytes ) { if ( inFirstSegment ( segments , offset , numBytes ) ) { segments [ 0 ] . copyToUnsafe ( offset , target , pointer , numBytes ) ; } else { copyMultiSegmentsToUnsafe ( segments , offset , target , pointer , numBytes ) ; } } | Copy segments to target unsafe pointer . |
15,336 | public static byte [ ] getBytes ( MemorySegment [ ] segments , int baseOffset , int sizeInBytes ) { if ( segments . length == 1 ) { byte [ ] heapMemory = segments [ 0 ] . getHeapMemory ( ) ; if ( baseOffset == 0 && heapMemory != null && heapMemory . length == sizeInBytes ) { return heapMemory ; } else { byte [ ] bytes = new byte [ sizeInBytes ] ; segments [ 0 ] . get ( baseOffset , bytes , 0 , sizeInBytes ) ; return bytes ; } } else { byte [ ] bytes = new byte [ sizeInBytes ] ; copyMultiSegmentsToBytes ( segments , baseOffset , bytes , 0 , sizeInBytes ) ; return bytes ; } } | Maybe not copied if want copy please use copyTo . |
15,337 | public static int hashByWords ( MemorySegment [ ] segments , int offset , int numBytes ) { if ( inFirstSegment ( segments , offset , numBytes ) ) { return MurmurHashUtil . hashBytesByWords ( segments [ 0 ] , offset , numBytes ) ; } else { return hashMultiSegByWords ( segments , offset , numBytes ) ; } } | hash segments to int numBytes must be aligned to 4 bytes . |
15,338 | public static int hash ( MemorySegment [ ] segments , int offset , int numBytes ) { if ( inFirstSegment ( segments , offset , numBytes ) ) { return MurmurHashUtil . hashBytes ( segments [ 0 ] , offset , numBytes ) ; } else { return hashMultiSeg ( segments , offset , numBytes ) ; } } | hash segments to int . |
15,339 | public static void bitUnSet ( MemorySegment segment , int baseOffset , int index ) { int offset = baseOffset + ( ( index & BIT_BYTE_POSITION_MASK ) >>> 3 ) ; byte current = segment . get ( offset ) ; current &= ~ ( 1 << ( index & BIT_BYTE_INDEX_MASK ) ) ; segment . put ( offset , current ) ; } | unset bit . |
15,340 | public static boolean bitGet ( MemorySegment segment , int baseOffset , int index ) { int offset = baseOffset + ( ( index & BIT_BYTE_POSITION_MASK ) >>> 3 ) ; byte current = segment . get ( offset ) ; return ( current & ( 1 << ( index & BIT_BYTE_INDEX_MASK ) ) ) != 0 ; } | read bit . |
15,341 | public static void bitUnSet ( MemorySegment [ ] segments , int baseOffset , int index ) { if ( segments . length == 1 ) { MemorySegment segment = segments [ 0 ] ; int offset = baseOffset + ( ( index & BIT_BYTE_POSITION_MASK ) >>> 3 ) ; byte current = segment . get ( offset ) ; current &= ~ ( 1 << ( index & BIT_BYTE_INDEX_MASK ) ) ; segment . put ( offset , current ) ; } else { bitUnSetMultiSegments ( segments , baseOffset , index ) ; } } | unset bit from segments . |
15,342 | public static void bitSet ( MemorySegment [ ] segments , int baseOffset , int index ) { if ( segments . length == 1 ) { int offset = baseOffset + ( ( index & BIT_BYTE_POSITION_MASK ) >>> 3 ) ; MemorySegment segment = segments [ 0 ] ; byte current = segment . get ( offset ) ; current |= ( 1 << ( index & BIT_BYTE_INDEX_MASK ) ) ; segment . put ( offset , current ) ; } else { bitSetMultiSegments ( segments , baseOffset , index ) ; } } | set bit from segments . |
15,343 | public static boolean bitGet ( MemorySegment [ ] segments , int baseOffset , int index ) { int offset = baseOffset + ( ( index & BIT_BYTE_POSITION_MASK ) >>> 3 ) ; byte current = getByte ( segments , offset ) ; return ( current & ( 1 << ( index & BIT_BYTE_INDEX_MASK ) ) ) != 0 ; } | read bit from segments . |
15,344 | public static boolean getBoolean ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 1 ) ) { return segments [ 0 ] . getBoolean ( offset ) ; } else { return getBooleanMultiSegments ( segments , offset ) ; } } | get boolean from segments . |
15,345 | public static void setBoolean ( MemorySegment [ ] segments , int offset , boolean value ) { if ( inFirstSegment ( segments , offset , 1 ) ) { segments [ 0 ] . putBoolean ( offset , value ) ; } else { setBooleanMultiSegments ( segments , offset , value ) ; } } | set boolean from segments . |
15,346 | public static byte getByte ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 1 ) ) { return segments [ 0 ] . get ( offset ) ; } else { return getByteMultiSegments ( segments , offset ) ; } } | get byte from segments . |
15,347 | public static void setByte ( MemorySegment [ ] segments , int offset , byte value ) { if ( inFirstSegment ( segments , offset , 1 ) ) { segments [ 0 ] . put ( offset , value ) ; } else { setByteMultiSegments ( segments , offset , value ) ; } } | set byte from segments . |
15,348 | public static int getInt ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 4 ) ) { return segments [ 0 ] . getInt ( offset ) ; } else { return getIntMultiSegments ( segments , offset ) ; } } | get int from segments . |
15,349 | public static void setInt ( MemorySegment [ ] segments , int offset , int value ) { if ( inFirstSegment ( segments , offset , 4 ) ) { segments [ 0 ] . putInt ( offset , value ) ; } else { setIntMultiSegments ( segments , offset , value ) ; } } | set int from segments . |
15,350 | public static long getLong ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 8 ) ) { return segments [ 0 ] . getLong ( offset ) ; } else { return getLongMultiSegments ( segments , offset ) ; } } | get long from segments . |
15,351 | public static void setLong ( MemorySegment [ ] segments , int offset , long value ) { if ( inFirstSegment ( segments , offset , 8 ) ) { segments [ 0 ] . putLong ( offset , value ) ; } else { setLongMultiSegments ( segments , offset , value ) ; } } | set long from segments . |
15,352 | public static short getShort ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 2 ) ) { return segments [ 0 ] . getShort ( offset ) ; } else { return getShortMultiSegments ( segments , offset ) ; } } | get short from segments . |
15,353 | public static void setShort ( MemorySegment [ ] segments , int offset , short value ) { if ( inFirstSegment ( segments , offset , 2 ) ) { segments [ 0 ] . putShort ( offset , value ) ; } else { setShortMultiSegments ( segments , offset , value ) ; } } | set short from segments . |
15,354 | public static float getFloat ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 4 ) ) { return segments [ 0 ] . getFloat ( offset ) ; } else { return getFloatMultiSegments ( segments , offset ) ; } } | get float from segments . |
15,355 | public static void setFloat ( MemorySegment [ ] segments , int offset , float value ) { if ( inFirstSegment ( segments , offset , 4 ) ) { segments [ 0 ] . putFloat ( offset , value ) ; } else { setFloatMultiSegments ( segments , offset , value ) ; } } | set float from segments . |
15,356 | public static double getDouble ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 8 ) ) { return segments [ 0 ] . getDouble ( offset ) ; } else { return getDoubleMultiSegments ( segments , offset ) ; } } | get double from segments . |
15,357 | public static void setDouble ( MemorySegment [ ] segments , int offset , double value ) { if ( inFirstSegment ( segments , offset , 8 ) ) { segments [ 0 ] . putDouble ( offset , value ) ; } else { setDoubleMultiSegments ( segments , offset , value ) ; } } | set double from segments . |
15,358 | public static char getChar ( MemorySegment [ ] segments , int offset ) { if ( inFirstSegment ( segments , offset , 2 ) ) { return segments [ 0 ] . getChar ( offset ) ; } else { return getCharMultiSegments ( segments , offset ) ; } } | get char from segments . |
15,359 | public static void setChar ( MemorySegment [ ] segments , int offset , char value ) { if ( inFirstSegment ( segments , offset , 2 ) ) { segments [ 0 ] . putChar ( offset , value ) ; } else { setCharMultiSegments ( segments , offset , value ) ; } } | set char from segments . |
15,360 | public static int find ( MemorySegment [ ] segments1 , int offset1 , int numBytes1 , MemorySegment [ ] segments2 , int offset2 , int numBytes2 ) { if ( numBytes2 == 0 ) { return offset1 ; } if ( inFirstSegment ( segments1 , offset1 , numBytes1 ) && inFirstSegment ( segments2 , offset2 , numBytes2 ) ) { byte first = segments2 [ 0 ] . get ( offset2 ) ; int end = numBytes1 - numBytes2 + offset1 ; for ( int i = offset1 ; i <= end ; i ++ ) { if ( segments1 [ 0 ] . get ( i ) == first && segments1 [ 0 ] . equalTo ( segments2 [ 0 ] , i , offset2 , numBytes2 ) ) { return i ; } } return - 1 ; } else { return findInMultiSegments ( segments1 , offset1 , numBytes1 , segments2 , offset2 , numBytes2 ) ; } } | Find equal segments2 in segments1 . |
15,361 | public static void initialize ( Configuration config , PluginManager pluginManager ) throws IllegalConfigurationException { LOCK . lock ( ) ; try { CACHE . clear ( ) ; FS_FACTORIES . clear ( ) ; Collection < Supplier < Iterator < FileSystemFactory > > > factorySuppliers = new ArrayList < > ( 2 ) ; factorySuppliers . add ( ( ) -> ServiceLoader . load ( FileSystemFactory . class ) . iterator ( ) ) ; if ( pluginManager != null ) { factorySuppliers . add ( ( ) -> pluginManager . load ( FileSystemFactory . class ) ) ; } final List < FileSystemFactory > fileSystemFactories = loadFileSystemFactories ( factorySuppliers ) ; for ( FileSystemFactory factory : fileSystemFactories ) { factory . configure ( config ) ; String scheme = factory . getScheme ( ) ; FileSystemFactory fsf = ConnectionLimitingFactory . decorateIfLimited ( factory , scheme , config ) ; FS_FACTORIES . put ( scheme , fsf ) ; } FALLBACK_FACTORY . configure ( config ) ; final String stringifiedUri = config . getString ( CoreOptions . DEFAULT_FILESYSTEM_SCHEME , null ) ; if ( stringifiedUri == null ) { defaultScheme = null ; } else { try { defaultScheme = new URI ( stringifiedUri ) ; } catch ( URISyntaxException e ) { throw new IllegalConfigurationException ( "The default file system scheme ('" + CoreOptions . DEFAULT_FILESYSTEM_SCHEME + "') is invalid: " + stringifiedUri , e ) ; } } } finally { LOCK . unlock ( ) ; } } | Initializes the shared file system settings . |
15,362 | public boolean exists ( final Path f ) throws IOException { try { return ( getFileStatus ( f ) != null ) ; } catch ( FileNotFoundException e ) { return false ; } } | Check if exists . |
15,363 | private static FileSystemFactory loadHadoopFsFactory ( ) { final ClassLoader cl = FileSystem . class . getClassLoader ( ) ; final Class < ? extends FileSystemFactory > factoryClass ; try { factoryClass = Class . forName ( "org.apache.flink.runtime.fs.hdfs.HadoopFsFactory" , false , cl ) . asSubclass ( FileSystemFactory . class ) ; } catch ( ClassNotFoundException e ) { LOG . info ( "No Flink runtime dependency present. " + "The extended set of supported File Systems via Hadoop is not available." ) ; return new UnsupportedSchemeFactory ( "Flink runtime classes missing in classpath/dependencies." ) ; } catch ( Exception | LinkageError e ) { LOG . warn ( "Flink's Hadoop file system factory could not be loaded" , e ) ; return new UnsupportedSchemeFactory ( "Flink's Hadoop file system factory could not be loaded" , e ) ; } try { Class . forName ( "org.apache.hadoop.conf.Configuration" , false , cl ) ; Class . forName ( "org.apache.hadoop.fs.FileSystem" , false , cl ) ; } catch ( ClassNotFoundException e ) { LOG . info ( "Hadoop is not in the classpath/dependencies. " + "The extended set of supported File Systems via Hadoop is not available." ) ; return new UnsupportedSchemeFactory ( "Hadoop is not in the classpath/dependencies." ) ; } try { return factoryClass . newInstance ( ) ; } catch ( Exception | LinkageError e ) { LOG . warn ( "Flink's Hadoop file system factory could not be created" , e ) ; return new UnsupportedSchemeFactory ( "Flink's Hadoop file system factory could not be created" , e ) ; } } | Utility loader for the Hadoop file system factory . We treat the Hadoop FS factory in a special way because we use it as a catch all for file systems schemes not supported directly in Flink . |
15,364 | @ SuppressWarnings ( "unchecked" ) public void startRegistration ( ) { if ( canceled ) { return ; } try { final CompletableFuture < G > rpcGatewayFuture ; if ( FencedRpcGateway . class . isAssignableFrom ( targetType ) ) { rpcGatewayFuture = ( CompletableFuture < G > ) rpcService . connect ( targetAddress , fencingToken , targetType . asSubclass ( FencedRpcGateway . class ) ) ; } else { rpcGatewayFuture = rpcService . connect ( targetAddress , targetType ) ; } CompletableFuture < Void > rpcGatewayAcceptFuture = rpcGatewayFuture . thenAcceptAsync ( ( G rpcGateway ) -> { log . info ( "Resolved {} address, beginning registration" , targetName ) ; register ( rpcGateway , 1 , retryingRegistrationConfiguration . getInitialRegistrationTimeoutMillis ( ) ) ; } , rpcService . getExecutor ( ) ) ; rpcGatewayAcceptFuture . whenCompleteAsync ( ( Void v , Throwable failure ) -> { if ( failure != null && ! canceled ) { final Throwable strippedFailure = ExceptionUtils . stripCompletionException ( failure ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( "Could not resolve {} address {}, retrying in {} ms." , targetName , targetAddress , retryingRegistrationConfiguration . getErrorDelayMillis ( ) , strippedFailure ) ; } else { log . info ( "Could not resolve {} address {}, retrying in {} ms: {}." , targetName , targetAddress , retryingRegistrationConfiguration . getErrorDelayMillis ( ) , strippedFailure . getMessage ( ) ) ; } startRegistrationLater ( retryingRegistrationConfiguration . getErrorDelayMillis ( ) ) ; } } , rpcService . getExecutor ( ) ) ; } catch ( Throwable t ) { completionFuture . completeExceptionally ( t ) ; cancel ( ) ; } } | This method resolves the target address to a callable gateway and starts the registration after that . |
15,365 | @ SuppressWarnings ( "unchecked" ) private void register ( final G gateway , final int attempt , final long timeoutMillis ) { if ( canceled ) { return ; } try { log . info ( "Registration at {} attempt {} (timeout={}ms)" , targetName , attempt , timeoutMillis ) ; CompletableFuture < RegistrationResponse > registrationFuture = invokeRegistration ( gateway , fencingToken , timeoutMillis ) ; CompletableFuture < Void > registrationAcceptFuture = registrationFuture . thenAcceptAsync ( ( RegistrationResponse result ) -> { if ( ! isCanceled ( ) ) { if ( result instanceof RegistrationResponse . Success ) { S success = ( S ) result ; completionFuture . complete ( Tuple2 . of ( gateway , success ) ) ; } else { if ( result instanceof RegistrationResponse . Decline ) { RegistrationResponse . Decline decline = ( RegistrationResponse . Decline ) result ; log . info ( "Registration at {} was declined: {}" , targetName , decline . getReason ( ) ) ; } else { log . error ( "Received unknown response to registration attempt: {}" , result ) ; } log . info ( "Pausing and re-attempting registration in {} ms" , retryingRegistrationConfiguration . getRefusedDelayMillis ( ) ) ; registerLater ( gateway , 1 , retryingRegistrationConfiguration . getInitialRegistrationTimeoutMillis ( ) , retryingRegistrationConfiguration . getRefusedDelayMillis ( ) ) ; } } } , rpcService . getExecutor ( ) ) ; registrationAcceptFuture . whenCompleteAsync ( ( Void v , Throwable failure ) -> { if ( failure != null && ! isCanceled ( ) ) { if ( ExceptionUtils . stripCompletionException ( failure ) instanceof TimeoutException ) { if ( log . isDebugEnabled ( ) ) { log . debug ( "Registration at {} ({}) attempt {} timed out after {} ms" , targetName , targetAddress , attempt , timeoutMillis ) ; } long newTimeoutMillis = Math . min ( 2 * timeoutMillis , retryingRegistrationConfiguration . getMaxRegistrationTimeoutMillis ( ) ) ; register ( gateway , attempt + 1 , newTimeoutMillis ) ; } else { log . error ( "Registration at {} failed due to an error" , targetName , failure ) ; log . info ( "Pausing and re-attempting registration in {} ms" , retryingRegistrationConfiguration . getErrorDelayMillis ( ) ) ; registerLater ( gateway , 1 , retryingRegistrationConfiguration . getInitialRegistrationTimeoutMillis ( ) , retryingRegistrationConfiguration . getErrorDelayMillis ( ) ) ; } } } , rpcService . getExecutor ( ) ) ; } catch ( Throwable t ) { completionFuture . completeExceptionally ( t ) ; cancel ( ) ; } } | This method performs a registration attempt and triggers either a success notification or a retry depending on the result . |
15,366 | public void exceptionCaught ( ChannelHandlerContext ctx , Throwable cause ) throws Exception { if ( cause instanceof TransportException ) { notifyAllChannelsOfErrorAndClose ( cause ) ; } else { final SocketAddress remoteAddr = ctx . channel ( ) . remoteAddress ( ) ; final TransportException tex ; if ( cause instanceof IOException && cause . getMessage ( ) . equals ( "Connection reset by peer" ) ) { tex = new RemoteTransportException ( "Lost connection to task manager '" + remoteAddr + "'. This indicates " + "that the remote task manager was lost." , remoteAddr , cause ) ; } else { SocketAddress localAddr = ctx . channel ( ) . localAddress ( ) ; tex = new LocalTransportException ( String . format ( "%s (connection to '%s')" , cause . getMessage ( ) , remoteAddr ) , localAddr , cause ) ; } notifyAllChannelsOfErrorAndClose ( tex ) ; } } | Called on exceptions in the client handler pipeline . |
15,367 | public static Type extractTypeFromLambda ( Class < ? > baseClass , LambdaExecutable exec , int [ ] lambdaTypeArgumentIndices , int paramLen , int baseParametersLen ) { Type output = exec . getParameterTypes ( ) [ paramLen - baseParametersLen + lambdaTypeArgumentIndices [ 0 ] ] ; for ( int i = 1 ; i < lambdaTypeArgumentIndices . length ; i ++ ) { validateLambdaType ( baseClass , output ) ; output = extractTypeArgument ( output , lambdaTypeArgumentIndices [ i ] ) ; } validateLambdaType ( baseClass , output ) ; return output ; } | Extracts type from given index from lambda . It supports nested types . |
15,368 | public static Type extractTypeArgument ( Type t , int index ) throws InvalidTypesException { if ( t instanceof ParameterizedType ) { Type [ ] actualTypeArguments = ( ( ParameterizedType ) t ) . getActualTypeArguments ( ) ; if ( index < 0 || index >= actualTypeArguments . length ) { throw new InvalidTypesException ( "Cannot extract the type argument with index " + index + " because the type has only " + actualTypeArguments . length + " type arguments." ) ; } else { return actualTypeArguments [ index ] ; } } else { throw new InvalidTypesException ( "The given type " + t + " is not a parameterized type." ) ; } } | This method extracts the n - th type argument from the given type . An InvalidTypesException is thrown if the type does not have any type arguments or if the index exceeds the number of type arguments . |
15,369 | public static List < Method > getAllDeclaredMethods ( Class < ? > clazz ) { List < Method > result = new ArrayList < > ( ) ; while ( clazz != null ) { Method [ ] methods = clazz . getDeclaredMethods ( ) ; Collections . addAll ( result , methods ) ; clazz = clazz . getSuperclass ( ) ; } return result ; } | Returns all declared methods of a class including methods of superclasses . |
15,370 | public static Class < ? > typeToClass ( Type t ) { if ( t instanceof Class ) { return ( Class < ? > ) t ; } else if ( t instanceof ParameterizedType ) { return ( ( Class < ? > ) ( ( ParameterizedType ) t ) . getRawType ( ) ) ; } throw new IllegalArgumentException ( "Cannot convert type to class" ) ; } | Convert ParameterizedType or Class to a Class . |
15,371 | public static boolean sameTypeVars ( Type t1 , Type t2 ) { return t1 instanceof TypeVariable && t2 instanceof TypeVariable && ( ( TypeVariable < ? > ) t1 ) . getName ( ) . equals ( ( ( TypeVariable < ? > ) t2 ) . getName ( ) ) && ( ( TypeVariable < ? > ) t1 ) . getGenericDeclaration ( ) . equals ( ( ( TypeVariable < ? > ) t2 ) . getGenericDeclaration ( ) ) ; } | Checks whether two types are type variables describing the same . |
15,372 | public static Type getTypeHierarchy ( List < Type > typeHierarchy , Type t , Class < ? > stopAtClass ) { while ( ! ( isClassType ( t ) && typeToClass ( t ) . equals ( stopAtClass ) ) ) { typeHierarchy . add ( t ) ; t = typeToClass ( t ) . getGenericSuperclass ( ) ; if ( t == null ) { break ; } } return t ; } | Traverses the type hierarchy of a type up until a certain stop class is found . |
15,373 | public static boolean hasSuperclass ( Class < ? > clazz , String superClassName ) { List < Type > hierarchy = new ArrayList < > ( ) ; getTypeHierarchy ( hierarchy , clazz , Object . class ) ; for ( Type t : hierarchy ) { if ( isClassType ( t ) && typeToClass ( t ) . getName ( ) . equals ( superClassName ) ) { return true ; } } return false ; } | Returns true if the given class has a superclass of given name . |
15,374 | public static Class < ? > getRawClass ( Type t ) { if ( isClassType ( t ) ) { return typeToClass ( t ) ; } else if ( t instanceof GenericArrayType ) { Type component = ( ( GenericArrayType ) t ) . getGenericComponentType ( ) ; return Array . newInstance ( getRawClass ( component ) , 0 ) . getClass ( ) ; } return Object . class ; } | Returns the raw class of both parameterized types and generic arrays . Returns java . lang . Object for all other types . |
15,375 | public static void validateLambdaType ( Class < ? > baseClass , Type t ) { if ( ! ( t instanceof Class ) ) { return ; } final Class < ? > clazz = ( Class < ? > ) t ; if ( clazz . getTypeParameters ( ) . length > 0 ) { throw new InvalidTypesException ( "The generic type parameters of '" + clazz . getSimpleName ( ) + "' are missing. " + "In many cases lambda methods don't provide enough information for automatic type extraction when Java generics are involved. " + "An easy workaround is to use an (anonymous) class instead that implements the '" + baseClass . getName ( ) + "' interface. " + "Otherwise the type has to be specified explicitly using type information." ) ; } } | Checks whether the given type has the generic parameters declared in the class definition . |
15,376 | public void clear ( ) { streamNodes = new HashMap < > ( ) ; virtualSelectNodes = new HashMap < > ( ) ; virtualSideOutputNodes = new HashMap < > ( ) ; virtualPartitionNodes = new HashMap < > ( ) ; vertexIDtoBrokerID = new HashMap < > ( ) ; vertexIDtoLoopTimeout = new HashMap < > ( ) ; iterationSourceSinkPairs = new HashSet < > ( ) ; sources = new HashSet < > ( ) ; sinks = new HashSet < > ( ) ; } | Remove all registered nodes etc . |
15,377 | public void addVirtualSelectNode ( Integer originalId , Integer virtualId , List < String > selectedNames ) { if ( virtualSelectNodes . containsKey ( virtualId ) ) { throw new IllegalStateException ( "Already has virtual select node with id " + virtualId ) ; } virtualSelectNodes . put ( virtualId , new Tuple2 < Integer , List < String > > ( originalId , selectedNames ) ) ; } | Adds a new virtual node that is used to connect a downstream vertex to only the outputs with the selected names . |
15,378 | public void addVirtualPartitionNode ( Integer originalId , Integer virtualId , StreamPartitioner < ? > partitioner ) { if ( virtualPartitionNodes . containsKey ( virtualId ) ) { throw new IllegalStateException ( "Already has virtual partition node with id " + virtualId ) ; } virtualPartitionNodes . put ( virtualId , new Tuple2 < Integer , StreamPartitioner < ? > > ( originalId , partitioner ) ) ; } | Adds a new virtual node that is used to connect a downstream vertex to an input with a certain partitioning . |
15,379 | public String getSlotSharingGroup ( Integer id ) { if ( virtualSideOutputNodes . containsKey ( id ) ) { Integer mappedId = virtualSideOutputNodes . get ( id ) . f0 ; return getSlotSharingGroup ( mappedId ) ; } else if ( virtualSelectNodes . containsKey ( id ) ) { Integer mappedId = virtualSelectNodes . get ( id ) . f0 ; return getSlotSharingGroup ( mappedId ) ; } else if ( virtualPartitionNodes . containsKey ( id ) ) { Integer mappedId = virtualPartitionNodes . get ( id ) . f0 ; return getSlotSharingGroup ( mappedId ) ; } else { StreamNode node = getStreamNode ( id ) ; return node . getSlotSharingGroup ( ) ; } } | Determines the slot sharing group of an operation across virtual nodes . |
15,380 | public boolean isCheckpointingEnabled ( ) { if ( snapshotSettings == null ) { return false ; } long checkpointInterval = snapshotSettings . getCheckpointCoordinatorConfiguration ( ) . getCheckpointInterval ( ) ; return checkpointInterval > 0 && checkpointInterval < Long . MAX_VALUE ; } | Checks if the checkpointing was enabled for this job graph |
15,381 | public int getMaximumParallelism ( ) { int maxParallelism = - 1 ; for ( JobVertex vertex : taskVertices . values ( ) ) { maxParallelism = Math . max ( vertex . getParallelism ( ) , maxParallelism ) ; } return maxParallelism ; } | Gets the maximum parallelism of all operations in this job graph . |
15,382 | public void addJar ( Path jar ) { if ( jar == null ) { throw new IllegalArgumentException ( ) ; } if ( ! userJars . contains ( jar ) ) { userJars . add ( jar ) ; } } | Adds the path of a JAR file required to run the job on a task manager . |
15,383 | public void addUserArtifact ( String name , DistributedCache . DistributedCacheEntry file ) { if ( file == null ) { throw new IllegalArgumentException ( ) ; } userArtifacts . putIfAbsent ( name , file ) ; } | Adds the path of a custom file required to run the job on a task manager . |
15,384 | public void addUserJarBlobKey ( PermanentBlobKey key ) { if ( key == null ) { throw new IllegalArgumentException ( ) ; } if ( ! userJarBlobKeys . contains ( key ) ) { userJarBlobKeys . add ( key ) ; } } | Adds the BLOB referenced by the key to the JobGraph s dependencies . |
15,385 | public static < C > ProgramTargetDescriptor of ( C clusterId , JobID jobId , String webInterfaceUrl ) { String clusterIdString ; try { clusterId . getClass ( ) . getDeclaredMethod ( "toString" ) ; clusterIdString = clusterId . toString ( ) ; } catch ( NoSuchMethodException e ) { clusterIdString = clusterId . getClass ( ) . getSimpleName ( ) ; } return new ProgramTargetDescriptor ( clusterIdString , jobId . toString ( ) , webInterfaceUrl ) ; } | Creates a program target description from deployment classes . |
15,386 | public KeyGroupRangeOffsets getIntersection ( KeyGroupRange keyGroupRange ) { Preconditions . checkNotNull ( keyGroupRange ) ; KeyGroupRange intersection = this . keyGroupRange . getIntersection ( keyGroupRange ) ; long [ ] subOffsets = new long [ intersection . getNumberOfKeyGroups ( ) ] ; if ( subOffsets . length > 0 ) { System . arraycopy ( offsets , computeKeyGroupIndex ( intersection . getStartKeyGroup ( ) ) , subOffsets , 0 , subOffsets . length ) ; } return new KeyGroupRangeOffsets ( intersection , subOffsets ) ; } | Returns a key - group range with offsets which is the intersection of the internal key - group range with the given key - group range . |
15,387 | public static Decimal fromBigDecimal ( BigDecimal bd , int precision , int scale ) { bd = bd . setScale ( scale , RoundingMode . HALF_UP ) ; if ( bd . precision ( ) > precision ) { return null ; } long longVal = - 1 ; if ( precision <= MAX_COMPACT_PRECISION ) { longVal = bd . movePointRight ( scale ) . longValueExact ( ) ; } return new Decimal ( precision , scale , longVal , bd ) ; } | then precision is checked . if precision overflow it will return null |
15,388 | public Decimal floor ( ) { BigDecimal bd = toBigDecimal ( ) . setScale ( 0 , RoundingMode . FLOOR ) ; return fromBigDecimal ( bd , bd . precision ( ) , 0 ) ; } | note that result may exceed the original precision . |
15,389 | public static long castToIntegral ( Decimal dec ) { BigDecimal bd = dec . toBigDecimal ( ) ; bd = bd . setScale ( 0 , RoundingMode . DOWN ) ; return bd . longValue ( ) ; } | to cast to floats overflow will not happen because precision< = 38 . |
15,390 | public < ACC , R > SingleOutputStreamOperator < R > aggregate ( AggregateFunction < T , ACC , R > function , TypeInformation < ACC > accumulatorType , TypeInformation < R > resultType ) { checkNotNull ( function , "function" ) ; checkNotNull ( accumulatorType , "accumulatorType" ) ; checkNotNull ( resultType , "resultType" ) ; if ( function instanceof RichFunction ) { throw new UnsupportedOperationException ( "This aggregation function cannot be a RichFunction." ) ; } return aggregate ( function , new PassThroughWindowFunction < K , W , R > ( ) , accumulatorType , resultType ) ; } | Applies the given aggregation function to each window . The aggregation function is called for each element aggregating values incrementally and keeping the state to one accumulator per key and window . |
15,391 | public void handIn ( String key , V obj ) { if ( ! retrieveSharedQueue ( key ) . offer ( obj ) ) { throw new RuntimeException ( "Could not register the given element, broker slot is already occupied." ) ; } } | Hand in the object to share . |
15,392 | private FinalApplicationStatus getYarnStatus ( ApplicationStatus status ) { if ( status == null ) { return FinalApplicationStatus . UNDEFINED ; } else { switch ( status ) { case SUCCEEDED : return FinalApplicationStatus . SUCCEEDED ; case FAILED : return FinalApplicationStatus . FAILED ; case CANCELED : return FinalApplicationStatus . KILLED ; default : return FinalApplicationStatus . UNDEFINED ; } } } | Converts a Flink application status enum to a YARN application status enum . |
15,393 | protected void writeLeaderInformation ( UUID leaderSessionID ) { try { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Write leader information: Leader={}, session ID={}." , leaderContender . getAddress ( ) , leaderSessionID ) ; } ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; ObjectOutputStream oos = new ObjectOutputStream ( baos ) ; oos . writeUTF ( leaderContender . getAddress ( ) ) ; oos . writeObject ( leaderSessionID ) ; oos . close ( ) ; boolean dataWritten = false ; while ( ! dataWritten && leaderLatch . hasLeadership ( ) ) { Stat stat = client . checkExists ( ) . forPath ( leaderPath ) ; if ( stat != null ) { long owner = stat . getEphemeralOwner ( ) ; long sessionID = client . getZookeeperClient ( ) . getZooKeeper ( ) . getSessionId ( ) ; if ( owner == sessionID ) { try { client . setData ( ) . forPath ( leaderPath , baos . toByteArray ( ) ) ; dataWritten = true ; } catch ( KeeperException . NoNodeException noNode ) { } } else { try { client . delete ( ) . forPath ( leaderPath ) ; } catch ( KeeperException . NoNodeException noNode ) { } } } else { try { client . create ( ) . creatingParentsIfNeeded ( ) . withMode ( CreateMode . EPHEMERAL ) . forPath ( leaderPath , baos . toByteArray ( ) ) ; dataWritten = true ; } catch ( KeeperException . NodeExistsException nodeExists ) { } } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Successfully wrote leader information: Leader={}, session ID={}." , leaderContender . getAddress ( ) , leaderSessionID ) ; } } catch ( Exception e ) { leaderContender . handleError ( new Exception ( "Could not write leader address and leader session ID to " + "ZooKeeper." , e ) ) ; } } | Writes the current leader s address as well the given leader session ID to ZooKeeper . |
15,394 | public static MesosConfiguration createMesosSchedulerConfiguration ( Configuration flinkConfig , String hostname ) { Protos . FrameworkInfo . Builder frameworkInfo = Protos . FrameworkInfo . newBuilder ( ) . setHostname ( hostname ) ; Protos . Credential . Builder credential = null ; if ( ! flinkConfig . contains ( MesosOptions . MASTER_URL ) ) { throw new IllegalConfigurationException ( MesosOptions . MASTER_URL . key ( ) + " must be configured." ) ; } String masterUrl = flinkConfig . getString ( MesosOptions . MASTER_URL ) ; Duration failoverTimeout = FiniteDuration . apply ( flinkConfig . getInteger ( MesosOptions . FAILOVER_TIMEOUT_SECONDS ) , TimeUnit . SECONDS ) ; frameworkInfo . setFailoverTimeout ( failoverTimeout . toSeconds ( ) ) ; frameworkInfo . setName ( flinkConfig . getString ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_NAME ) ) ; frameworkInfo . setRole ( flinkConfig . getString ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_ROLE ) ) ; frameworkInfo . setUser ( flinkConfig . getString ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_USER ) ) ; if ( flinkConfig . contains ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_PRINCIPAL ) ) { frameworkInfo . setPrincipal ( flinkConfig . getString ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_PRINCIPAL ) ) ; credential = Protos . Credential . newBuilder ( ) ; credential . setPrincipal ( frameworkInfo . getPrincipal ( ) ) ; if ( flinkConfig . contains ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_SECRET ) ) { credential . setSecret ( flinkConfig . getString ( MesosOptions . RESOURCEMANAGER_FRAMEWORK_SECRET ) ) ; } } MesosConfiguration mesos = new MesosConfiguration ( masterUrl , frameworkInfo , scala . Option . apply ( credential ) ) ; return mesos ; } | Loads and validates the Mesos scheduler configuration . |
15,395 | public static void applyOverlays ( Configuration configuration , ContainerSpecification containerSpec ) throws IOException { CompositeContainerOverlay overlay = new CompositeContainerOverlay ( FlinkDistributionOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) , HadoopConfOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) , HadoopUserOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) , KeytabOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) , Krb5ConfOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) , SSLStoreOverlay . newBuilder ( ) . fromEnvironment ( configuration ) . build ( ) ) ; overlay . configure ( containerSpec ) ; } | Generate a container specification as a TaskManager template . |
15,396 | public static Configuration loadConfiguration ( Configuration dynamicProperties , Logger log ) { Configuration configuration = GlobalConfiguration . loadConfigurationWithDynamicProperties ( dynamicProperties ) ; final Map < String , String > envs = System . getenv ( ) ; final String tmpDirs = envs . get ( MesosConfigKeys . ENV_FLINK_TMP_DIR ) ; BootstrapTools . updateTmpDirectoriesInConfiguration ( configuration , tmpDirs ) ; return configuration ; } | Loads the global configuration adds the given dynamic properties configuration and sets the temp directory paths . |
15,397 | @ SuppressWarnings ( "unchecked" ) public static < T , R > Aggregator < T , R > create ( Class < T > type ) { if ( type == Long . class ) { return ( Aggregator < T , R > ) new LongSummaryAggregator ( ) ; } else if ( type == LongValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . LongValueSummaryAggregator ( ) ; } else if ( type == Integer . class ) { return ( Aggregator < T , R > ) new IntegerSummaryAggregator ( ) ; } else if ( type == IntValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . IntegerValueSummaryAggregator ( ) ; } else if ( type == Double . class ) { return ( Aggregator < T , R > ) new DoubleSummaryAggregator ( ) ; } else if ( type == DoubleValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . DoubleValueSummaryAggregator ( ) ; } else if ( type == Float . class ) { return ( Aggregator < T , R > ) new FloatSummaryAggregator ( ) ; } else if ( type == FloatValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . FloatValueSummaryAggregator ( ) ; } else if ( type == Short . class ) { return ( Aggregator < T , R > ) new ShortSummaryAggregator ( ) ; } else if ( type == ShortValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . ShortValueSummaryAggregator ( ) ; } else if ( type == Boolean . class ) { return ( Aggregator < T , R > ) new BooleanSummaryAggregator ( ) ; } else if ( type == BooleanValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . BooleanValueSummaryAggregator ( ) ; } else if ( type == String . class ) { return ( Aggregator < T , R > ) new StringSummaryAggregator ( ) ; } else if ( type == StringValue . class ) { return ( Aggregator < T , R > ) new ValueSummaryAggregator . StringValueSummaryAggregator ( ) ; } else { return ( Aggregator < T , R > ) new ObjectSummaryAggregator ( ) ; } } | Create a SummaryAggregator for the supplied type . |
15,398 | public static Configuration loadConfiguration ( final String configDir , final Configuration dynamicProperties ) { if ( configDir == null ) { throw new IllegalArgumentException ( "Given configuration directory is null, cannot load configuration" ) ; } final File confDirFile = new File ( configDir ) ; if ( ! ( confDirFile . exists ( ) ) ) { throw new IllegalConfigurationException ( "The given configuration directory name '" + configDir + "' (" + confDirFile . getAbsolutePath ( ) + ") does not describe an existing directory." ) ; } final File yamlConfigFile = new File ( confDirFile , FLINK_CONF_FILENAME ) ; if ( ! yamlConfigFile . exists ( ) ) { throw new IllegalConfigurationException ( "The Flink config file '" + yamlConfigFile + "' (" + confDirFile . getAbsolutePath ( ) + ") does not exist." ) ; } Configuration configuration = loadYAMLResource ( yamlConfigFile ) ; if ( dynamicProperties != null ) { configuration . addAll ( dynamicProperties ) ; } return configuration ; } | Loads the configuration files from the specified directory . If the dynamic properties configuration is not null then it is added to the loaded configuration . |
15,399 | public static Configuration loadConfigurationWithDynamicProperties ( Configuration dynamicProperties ) { final String configDir = System . getenv ( ConfigConstants . ENV_FLINK_CONF_DIR ) ; if ( configDir == null ) { return new Configuration ( dynamicProperties ) ; } return loadConfiguration ( configDir , dynamicProperties ) ; } | Loads the global configuration and adds the given dynamic properties configuration . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.