idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
15,000
public void setCustomEndpointInitializer ( EndpointInitializer initializer ) { Objects . requireNonNull ( initializer , "Initializer has to be set" ) ; ClosureCleaner . ensureSerializable ( initializer ) ; this . initializer = initializer ; }
Set a custom endpoint initializer .
15,001
void unregisterOutputStream ( OutStream stream ) { lock . lock ( ) ; try { if ( openOutputStreams . remove ( stream ) ) { numReservedOutputStreams -- ; available . signalAll ( ) ; } } finally { lock . unlock ( ) ; } }
Atomically removes the given output stream from the set of currently open output streams and signals that new stream can now be opened .
15,002
void unregisterInputStream ( InStream stream ) { lock . lock ( ) ; try { if ( openInputStreams . remove ( stream ) ) { numReservedInputStreams -- ; available . signalAll ( ) ; } } finally { lock . unlock ( ) ; } }
Atomically removes the given input stream from the set of currently open input streams and signals that new stream can now be opened .
15,003
public void setMaxParallelism ( int maxParallelism ) { Preconditions . checkArgument ( maxParallelism > 0 && maxParallelism <= StreamGraphGenerator . UPPER_BOUND_MAX_PARALLELISM , "Maximum parallelism must be between 1 and " + StreamGraphGenerator . UPPER_BOUND_MAX_PARALLELISM + ". Found: " + maxParallelism ) ; this . maxParallelism = maxParallelism ; }
Sets the maximum parallelism for this stream transformation .
15,004
public static String getUserCodeClassLoaderInfo ( ClassLoader loader ) { if ( loader instanceof URLClassLoader ) { URLClassLoader cl = ( URLClassLoader ) loader ; try { StringBuilder bld = new StringBuilder ( ) ; if ( cl == ClassLoader . getSystemClassLoader ( ) ) { bld . append ( "System ClassLoader: " ) ; } else { bld . append ( "URL ClassLoader:" ) ; } for ( URL url : cl . getURLs ( ) ) { bld . append ( "\n " ) ; if ( url == null ) { bld . append ( "(null)" ) ; } else if ( "file" . equals ( url . getProtocol ( ) ) ) { String filePath = url . getPath ( ) ; File fileFile = new File ( filePath ) ; bld . append ( "file: '" ) . append ( filePath ) . append ( '\'' ) ; if ( fileFile . exists ( ) ) { if ( fileFile . isDirectory ( ) ) { bld . append ( " (directory)" ) ; } else { JarFile jar = null ; try { jar = new JarFile ( filePath ) ; bld . append ( " (valid JAR)" ) ; } catch ( Exception e ) { bld . append ( " (invalid JAR: " ) . append ( e . getMessage ( ) ) . append ( ')' ) ; } finally { if ( jar != null ) { jar . close ( ) ; } } } } else { bld . append ( " (missing)" ) ; } } else { bld . append ( "url: " ) . append ( url ) ; } } return bld . toString ( ) ; } catch ( Throwable t ) { return "Cannot access classloader info due to an exception.\n" + ExceptionUtils . stringifyException ( t ) ; } } else { return "No user code ClassLoader" ; } }
Gets information about URL class loaders . The returned info string contains all URLs of the class loader . For file URLs it contains in addition whether the referenced file exists is a valid JAR file or is a directory .
15,005
public static boolean validateClassLoadable ( ClassNotFoundException cnfe , ClassLoader cl ) { try { String className = cnfe . getMessage ( ) ; Class . forName ( className , false , cl ) ; return true ; } catch ( ClassNotFoundException e ) { return false ; } catch ( Exception e ) { return false ; } }
Checks whether the class that was not found in the given exception can be resolved through the given class loader .
15,006
private Mapping extractProjectsAndMapping ( Aggregate aggregate , RelNode input , RelBuilder relBuilder ) { final ImmutableBitSet . Builder inputFieldsUsed = getInputFieldUsed ( aggregate , input ) ; final List < RexNode > projects = new ArrayList < > ( ) ; final Mapping mapping = Mappings . create ( MappingType . INVERSE_SURJECTION , aggregate . getInput ( ) . getRowType ( ) . getFieldCount ( ) , inputFieldsUsed . cardinality ( ) ) ; int j = 0 ; for ( int i : inputFieldsUsed . build ( ) ) { projects . add ( relBuilder . field ( i ) ) ; mapping . set ( i , j ++ ) ; } if ( input instanceof Project ) { relBuilder . project ( projects ) ; } else { relBuilder . project ( projects , Collections . emptyList ( ) , true ) ; } return mapping ; }
Extract projects from the Aggregate and return the index mapping between the new projects and it s input .
15,007
private ImmutableBitSet . Builder getInputFieldUsed ( Aggregate aggregate , RelNode input ) { final ImmutableBitSet . Builder inputFieldsUsed = aggregate . getGroupSet ( ) . rebuild ( ) ; for ( AggregateCall aggCall : aggregate . getAggCallList ( ) ) { for ( int i : aggCall . getArgList ( ) ) { inputFieldsUsed . set ( i ) ; } if ( aggCall . filterArg >= 0 ) { inputFieldsUsed . set ( aggCall . filterArg ) ; } } if ( aggregate instanceof LogicalWindowAggregate ) { inputFieldsUsed . set ( getWindowTimeFieldIndex ( ( LogicalWindowAggregate ) aggregate , input ) ) ; } return inputFieldsUsed ; }
Compute which input fields are used by the aggregate .
15,008
public static ContaineredTaskManagerParameters create ( Configuration config , long containerMemoryMB , int numSlots ) { final long cutoffMB = calculateCutoffMB ( config , containerMemoryMB ) ; final long heapSizeMB = TaskManagerServices . calculateHeapSizeMB ( containerMemoryMB - cutoffMB , config ) ; final long offHeapSizeMB = containerMemoryMB - heapSizeMB ; final HashMap < String , String > envVars = new HashMap < > ( ) ; final String prefix = ResourceManagerOptions . CONTAINERIZED_TASK_MANAGER_ENV_PREFIX ; for ( String key : config . keySet ( ) ) { if ( key . startsWith ( prefix ) && key . length ( ) > prefix . length ( ) ) { String envVarKey = key . substring ( prefix . length ( ) ) ; envVars . put ( envVarKey , config . getString ( key , null ) ) ; } } return new ContaineredTaskManagerParameters ( containerMemoryMB , heapSizeMB , offHeapSizeMB , numSlots , envVars ) ; }
Computes the parameters to be used to start a TaskManager Java process .
15,009
public static Set < Annotation > readSingleForwardAnnotations ( Class < ? > udfClass ) { ForwardedFields forwardedFields = udfClass . getAnnotation ( ForwardedFields . class ) ; NonForwardedFields nonForwardedFields = udfClass . getAnnotation ( NonForwardedFields . class ) ; ReadFields readSet = udfClass . getAnnotation ( ReadFields . class ) ; Set < Annotation > annotations = new HashSet < Annotation > ( ) ; if ( forwardedFields != null ) { annotations . add ( forwardedFields ) ; } if ( nonForwardedFields != null ) { if ( ! annotations . isEmpty ( ) ) { throw new InvalidProgramException ( "Either " + ForwardedFields . class . getSimpleName ( ) + " or " + NonForwardedFields . class . getSimpleName ( ) + " can be annotated to a function, not both." ) ; } annotations . add ( nonForwardedFields ) ; } if ( readSet != null ) { annotations . add ( readSet ) ; } return ! annotations . isEmpty ( ) ? annotations : null ; }
Reads the annotations of a user defined function with one input and returns semantic properties according to the forwarded fields annotated .
15,010
public static Set < Annotation > readDualForwardAnnotations ( Class < ? > udfClass ) { ForwardedFieldsFirst forwardedFields1 = udfClass . getAnnotation ( ForwardedFieldsFirst . class ) ; ForwardedFieldsSecond forwardedFields2 = udfClass . getAnnotation ( ForwardedFieldsSecond . class ) ; NonForwardedFieldsFirst nonForwardedFields1 = udfClass . getAnnotation ( NonForwardedFieldsFirst . class ) ; NonForwardedFieldsSecond nonForwardedFields2 = udfClass . getAnnotation ( NonForwardedFieldsSecond . class ) ; ReadFieldsFirst readSet1 = udfClass . getAnnotation ( ReadFieldsFirst . class ) ; ReadFieldsSecond readSet2 = udfClass . getAnnotation ( ReadFieldsSecond . class ) ; Set < Annotation > annotations = new HashSet < Annotation > ( ) ; if ( nonForwardedFields1 != null && forwardedFields1 != null ) { throw new InvalidProgramException ( "Either " + ForwardedFieldsFirst . class . getSimpleName ( ) + " or " + NonForwardedFieldsFirst . class . getSimpleName ( ) + " can be annotated to a function, not both." ) ; } else if ( forwardedFields1 != null ) { annotations . add ( forwardedFields1 ) ; } else if ( nonForwardedFields1 != null ) { annotations . add ( nonForwardedFields1 ) ; } if ( forwardedFields2 != null && nonForwardedFields2 != null ) { throw new InvalidProgramException ( "Either " + ForwardedFieldsSecond . class . getSimpleName ( ) + " or " + NonForwardedFieldsSecond . class . getSimpleName ( ) + " can be annotated to a function, not both." ) ; } else if ( forwardedFields2 != null ) { annotations . add ( forwardedFields2 ) ; } else if ( nonForwardedFields2 != null ) { annotations . add ( nonForwardedFields2 ) ; } if ( readSet1 != null ) { annotations . add ( readSet1 ) ; } if ( readSet2 != null ) { annotations . add ( readSet2 ) ; } return ! annotations . isEmpty ( ) ? annotations : null ; }
Reads the annotations of a user defined function with two inputs and returns semantic properties according to the forwarded fields annotated .
15,011
public static BinaryString blankString ( int length ) { byte [ ] spaces = new byte [ length ] ; Arrays . fill ( spaces , ( byte ) ' ' ) ; return fromBytes ( spaces ) ; }
Creates an BinaryString that contains length spaces .
15,012
private int compareMultiSegments ( BinaryString other ) { if ( sizeInBytes == 0 || other . sizeInBytes == 0 ) { return sizeInBytes - other . sizeInBytes ; } int len = Math . min ( sizeInBytes , other . sizeInBytes ) ; MemorySegment seg1 = segments [ 0 ] ; MemorySegment seg2 = other . segments [ 0 ] ; int segmentSize = segments [ 0 ] . size ( ) ; int otherSegmentSize = other . segments [ 0 ] . size ( ) ; int sizeOfFirst1 = segmentSize - offset ; int sizeOfFirst2 = otherSegmentSize - other . offset ; int varSegIndex1 = 1 ; int varSegIndex2 = 1 ; while ( sizeOfFirst1 <= 0 ) { sizeOfFirst1 += segmentSize ; seg1 = segments [ varSegIndex1 ++ ] ; } while ( sizeOfFirst2 <= 0 ) { sizeOfFirst2 += otherSegmentSize ; seg2 = other . segments [ varSegIndex2 ++ ] ; } int offset1 = segmentSize - sizeOfFirst1 ; int offset2 = otherSegmentSize - sizeOfFirst2 ; int needCompare = Math . min ( Math . min ( sizeOfFirst1 , sizeOfFirst2 ) , len ) ; while ( needCompare > 0 ) { for ( int i = 0 ; i < needCompare ; i ++ ) { int res = ( seg1 . get ( offset1 + i ) & 0xFF ) - ( seg2 . get ( offset2 + i ) & 0xFF ) ; if ( res != 0 ) { return res ; } } if ( needCompare == len ) { break ; } len -= needCompare ; if ( sizeOfFirst1 < sizeOfFirst2 ) { seg1 = segments [ varSegIndex1 ++ ] ; offset1 = 0 ; offset2 += needCompare ; sizeOfFirst1 = segmentSize ; sizeOfFirst2 -= needCompare ; } else if ( sizeOfFirst1 > sizeOfFirst2 ) { seg2 = other . segments [ varSegIndex2 ++ ] ; offset2 = 0 ; offset1 += needCompare ; sizeOfFirst2 = otherSegmentSize ; sizeOfFirst1 -= needCompare ; } else { seg1 = segments [ varSegIndex1 ++ ] ; seg2 = other . segments [ varSegIndex2 ++ ] ; offset1 = 0 ; offset2 = 0 ; sizeOfFirst1 = segmentSize ; sizeOfFirst2 = otherSegmentSize ; } needCompare = Math . min ( Math . min ( sizeOfFirst1 , sizeOfFirst2 ) , len ) ; } checkArgument ( needCompare == len ) ; return sizeInBytes - other . sizeInBytes ; }
Find the boundaries of segments and then compare MemorySegment .
15,013
public static BinaryString concat ( Iterable < BinaryString > inputs ) { int totalLength = 0 ; for ( BinaryString input : inputs ) { if ( input != null ) { input . ensureMaterialized ( ) ; totalLength += input . getSizeInBytes ( ) ; } } final byte [ ] result = new byte [ totalLength ] ; int offset = 0 ; for ( BinaryString input : inputs ) { if ( input != null ) { int len = input . sizeInBytes ; SegmentsUtil . copyToBytes ( input . segments , input . offset , result , offset , len ) ; offset += len ; } } return fromBytes ( result ) ; }
Concatenates input strings together into a single string .
15,014
public boolean contains ( final BinaryString substring ) { ensureMaterialized ( ) ; substring . ensureMaterialized ( ) ; if ( substring . sizeInBytes == 0 ) { return true ; } int find = SegmentsUtil . find ( segments , offset , sizeInBytes , substring . segments , substring . offset , substring . sizeInBytes ) ; return find != - 1 ; }
Returns whether this contains substring or not . Same to like %substring% .
15,015
public boolean endsWith ( final BinaryString suffix ) { ensureMaterialized ( ) ; suffix . ensureMaterialized ( ) ; return matchAt ( suffix , sizeInBytes - suffix . sizeInBytes ) ; }
Same to like %suffix .
15,016
public BinaryString trim ( BinaryString trimStr ) { if ( trimStr == null ) { return null ; } return trimLeft ( trimStr ) . trimRight ( trimStr ) ; }
Walk each character of current string from both ends remove the character if it is in trim string . Return the new substring which both ends trim characters have been removed .
15,017
public BinaryString trimLeft ( BinaryString trimStr ) { ensureMaterialized ( ) ; if ( trimStr == null ) { return null ; } trimStr . ensureMaterialized ( ) ; if ( trimStr . isSpaceString ( ) ) { return trimLeft ( ) ; } if ( inFirstSegment ( ) ) { int searchIdx = 0 ; while ( searchIdx < this . sizeInBytes ) { int charBytes = numBytesForFirstByte ( getByteOneSegment ( searchIdx ) ) ; BinaryString currentChar = copyBinaryStringInOneSeg ( searchIdx , searchIdx + charBytes - 1 ) ; if ( trimStr . contains ( currentChar ) ) { searchIdx += charBytes ; } else { break ; } } if ( searchIdx >= sizeInBytes ) { return EMPTY_UTF8 ; } else { return copyBinaryStringInOneSeg ( searchIdx , sizeInBytes - 1 ) ; } } else { return trimLeftSlow ( trimStr ) ; } }
Walk each character of current string from left end remove the character if it is in trim string . Stops at the first character which is not in trim string . Return the new substring .
15,018
public BinaryString trimRight ( BinaryString trimStr ) { ensureMaterialized ( ) ; if ( trimStr == null ) { return null ; } trimStr . ensureMaterialized ( ) ; if ( trimStr . isSpaceString ( ) ) { return trimRight ( ) ; } if ( inFirstSegment ( ) ) { int charIdx = 0 ; int byteIdx = 0 ; int [ ] charLens = new int [ sizeInBytes ] ; int [ ] charStartPos = new int [ sizeInBytes ] ; while ( byteIdx < sizeInBytes ) { charStartPos [ charIdx ] = byteIdx ; charLens [ charIdx ] = numBytesForFirstByte ( getByteOneSegment ( byteIdx ) ) ; byteIdx += charLens [ charIdx ] ; charIdx ++ ; } int searchIdx = sizeInBytes - 1 ; charIdx -= 1 ; while ( charIdx >= 0 ) { BinaryString currentChar = copyBinaryStringInOneSeg ( charStartPos [ charIdx ] , charStartPos [ charIdx ] + charLens [ charIdx ] - 1 ) ; if ( trimStr . contains ( currentChar ) ) { searchIdx -= charLens [ charIdx ] ; } else { break ; } charIdx -- ; } if ( searchIdx < 0 ) { return EMPTY_UTF8 ; } else { return copyBinaryStringInOneSeg ( 0 , searchIdx ) ; } } else { return trimRightSlow ( trimStr ) ; } }
Walk each character of current string from right end remove the character if it is in trim string . Stops at the first character which is not in trim string . Return the new substring .
15,019
public int indexOf ( BinaryString subStr , int start ) { ensureMaterialized ( ) ; subStr . ensureMaterialized ( ) ; if ( subStr . sizeInBytes == 0 ) { return 0 ; } if ( inFirstSegment ( ) ) { int byteIdx = 0 ; int charIdx = 0 ; while ( byteIdx < sizeInBytes && charIdx < start ) { byteIdx += numBytesForFirstByte ( getByteOneSegment ( byteIdx ) ) ; charIdx ++ ; } do { if ( byteIdx + subStr . sizeInBytes > sizeInBytes ) { return - 1 ; } if ( SegmentsUtil . equals ( segments , offset + byteIdx , subStr . segments , subStr . offset , subStr . sizeInBytes ) ) { return charIdx ; } byteIdx += numBytesForFirstByte ( getByteOneSegment ( byteIdx ) ) ; charIdx ++ ; } while ( byteIdx < sizeInBytes ) ; return - 1 ; } else { return indexOfSlow ( subStr , start ) ; } }
Returns the position of the first occurence of substr in current string starting from given position .
15,020
public BinaryString reverse ( ) { ensureMaterialized ( ) ; if ( inFirstSegment ( ) ) { byte [ ] result = new byte [ this . sizeInBytes ] ; int byteIdx = 0 ; while ( byteIdx < sizeInBytes ) { int charBytes = numBytesForFirstByte ( getByteOneSegment ( byteIdx ) ) ; segments [ 0 ] . get ( offset + byteIdx , result , result . length - byteIdx - charBytes , charBytes ) ; byteIdx += charBytes ; } return BinaryString . fromBytes ( result ) ; } else { return reverseSlow ( ) ; } }
Reverse each character in current string .
15,021
public Long toLong ( ) { ensureMaterialized ( ) ; if ( sizeInBytes == 0 ) { return null ; } int size = segments [ 0 ] . size ( ) ; SegmentAndOffset segmentAndOffset = startSegmentAndOffset ( size ) ; int totalOffset = 0 ; byte b = segmentAndOffset . value ( ) ; final boolean negative = b == '-' ; if ( negative || b == '+' ) { segmentAndOffset . nextByte ( size ) ; totalOffset ++ ; if ( sizeInBytes == 1 ) { return null ; } } long result = 0 ; final byte separator = '.' ; final int radix = 10 ; final long stopValue = Long . MIN_VALUE / radix ; while ( totalOffset < this . sizeInBytes ) { b = segmentAndOffset . value ( ) ; totalOffset ++ ; segmentAndOffset . nextByte ( size ) ; if ( b == separator ) { break ; } int digit ; if ( b >= '0' && b <= '9' ) { digit = b - '0' ; } else { return null ; } if ( result < stopValue ) { return null ; } result = result * radix - digit ; if ( result > 0 ) { return null ; } } while ( totalOffset < sizeInBytes ) { byte currentByte = segmentAndOffset . value ( ) ; if ( currentByte < '0' || currentByte > '9' ) { return null ; } totalOffset ++ ; segmentAndOffset . nextByte ( size ) ; } if ( ! negative ) { result = - result ; if ( result < 0 ) { return null ; } } return result ; }
Parses this BinaryString to Long .
15,022
public BinaryString toUpperCase ( ) { if ( javaObject != null ) { return toUpperCaseSlow ( ) ; } if ( sizeInBytes == 0 ) { return EMPTY_UTF8 ; } int size = segments [ 0 ] . size ( ) ; SegmentAndOffset segmentAndOffset = startSegmentAndOffset ( size ) ; byte [ ] bytes = new byte [ sizeInBytes ] ; bytes [ 0 ] = ( byte ) Character . toTitleCase ( segmentAndOffset . value ( ) ) ; for ( int i = 0 ; i < sizeInBytes ; i ++ ) { byte b = segmentAndOffset . value ( ) ; if ( numBytesForFirstByte ( b ) != 1 ) { return toUpperCaseSlow ( ) ; } int upper = Character . toUpperCase ( ( int ) b ) ; if ( upper > 127 ) { return toUpperCaseSlow ( ) ; } bytes [ i ] = ( byte ) upper ; segmentAndOffset . nextByte ( size ) ; } return fromBytes ( bytes ) ; }
Returns the upper case of this string .
15,023
public BinaryString toLowerCase ( ) { if ( javaObject != null ) { return toLowerCaseSlow ( ) ; } if ( sizeInBytes == 0 ) { return EMPTY_UTF8 ; } int size = segments [ 0 ] . size ( ) ; SegmentAndOffset segmentAndOffset = startSegmentAndOffset ( size ) ; byte [ ] bytes = new byte [ sizeInBytes ] ; bytes [ 0 ] = ( byte ) Character . toTitleCase ( segmentAndOffset . value ( ) ) ; for ( int i = 0 ; i < sizeInBytes ; i ++ ) { byte b = segmentAndOffset . value ( ) ; if ( numBytesForFirstByte ( b ) != 1 ) { return toLowerCaseSlow ( ) ; } int lower = Character . toLowerCase ( ( int ) b ) ; if ( lower > 127 ) { return toLowerCaseSlow ( ) ; } bytes [ i ] = ( byte ) lower ; segmentAndOffset . nextByte ( size ) ; } return fromBytes ( bytes ) ; }
Returns the lower case of this string .
15,024
public Boolean toBooleanSQL ( ) { if ( TRUE_STRINGS . contains ( toLowerCase ( ) ) ) { return true ; } else if ( FALSE_STRINGS . contains ( toLowerCase ( ) ) ) { return false ; } else { return null ; } }
Decide boolean representation of a string .
15,025
public SingleOutputStreamOperator < T > setParallelism ( int parallelism ) { Preconditions . checkArgument ( canBeParallel ( ) || parallelism == 1 , "The parallelism of non parallel operator must be 1." ) ; transformation . setParallelism ( parallelism ) ; return this ; }
Sets the parallelism for this operator .
15,026
public SingleOutputStreamOperator < T > setMaxParallelism ( int maxParallelism ) { Preconditions . checkArgument ( maxParallelism > 0 , "The maximum parallelism must be greater than 0." ) ; Preconditions . checkArgument ( canBeParallel ( ) || maxParallelism == 1 , "The maximum parallelism of non parallel operator must be 1." ) ; transformation . setMaxParallelism ( maxParallelism ) ; return this ; }
Sets the maximum parallelism of this operator .
15,027
private SingleOutputStreamOperator < T > setResources ( ResourceSpec resources ) { Preconditions . checkNotNull ( resources , "The resources must be not null." ) ; Preconditions . checkArgument ( resources . isValid ( ) , "The values in resources must be not less than 0." ) ; transformation . setResources ( resources , resources ) ; return this ; }
Sets the resources for this operator the minimum and preferred resources are the same by default .
15,028
public SingleOutputStreamOperator < T > forceNonParallel ( ) { transformation . setParallelism ( 1 ) ; transformation . setMaxParallelism ( 1 ) ; nonParallel = true ; return this ; }
Sets the parallelism and maximum parallelism of this operator to one . And mark this operator cannot set a non - 1 degree of parallelism .
15,029
int finalizeBuildPhase ( IOManager ioAccess , FileIOChannel . Enumerator probeChannelEnumerator ) throws IOException { this . finalBufferLimit = this . buildSideWriteBuffer . getCurrentPositionInSegment ( ) ; this . partitionBuffers = this . buildSideWriteBuffer . close ( ) ; if ( ! isInMemory ( ) ) { this . buildSideChannel . close ( ) ; this . probeSideBuffer = FileChannelUtil . createOutputView ( ioAccess , probeChannelEnumerator . next ( ) , compressionEnable , compressionCodecFactory , compressionBlockSize , memorySegmentSize ) ; return 1 ; } else { return 0 ; } }
After build phase .
15,030
public void reportError ( Throwable t ) { if ( t != null && exception . compareAndSet ( null , t ) && toInterrupt != null ) { toInterrupt . interrupt ( ) ; } }
Sets the exception and interrupts the target thread if no other exception has occurred so far .
15,031
private void emitWindowResult ( W window ) throws Exception { BaseRow aggResult = windowFunction . getWindowAggregationResult ( window ) ; if ( sendRetraction ) { previousState . setCurrentNamespace ( window ) ; BaseRow previousAggResult = previousState . value ( ) ; if ( previousAggResult != null ) { if ( ! equaliser . equalsWithoutHeader ( aggResult , previousAggResult ) ) { reuseOutput . replace ( ( BaseRow ) getCurrentKey ( ) , previousAggResult ) ; BaseRowUtil . setRetract ( reuseOutput ) ; collector . collect ( reuseOutput ) ; reuseOutput . replace ( ( BaseRow ) getCurrentKey ( ) , aggResult ) ; BaseRowUtil . setAccumulate ( reuseOutput ) ; collector . collect ( reuseOutput ) ; previousState . update ( aggResult ) ; } } else { reuseOutput . replace ( ( BaseRow ) getCurrentKey ( ) , aggResult ) ; BaseRowUtil . setAccumulate ( reuseOutput ) ; collector . collect ( reuseOutput ) ; previousState . update ( aggResult ) ; } } else { reuseOutput . replace ( ( BaseRow ) getCurrentKey ( ) , aggResult ) ; collector . collect ( reuseOutput ) ; } }
Emits the window result of the given window .
15,032
private void registerCleanupTimer ( W window ) { long cleanupTime = cleanupTime ( window ) ; if ( cleanupTime == Long . MAX_VALUE ) { return ; } if ( windowAssigner . isEventTime ( ) ) { triggerContext . registerEventTimeTimer ( cleanupTime ) ; } else { triggerContext . registerProcessingTimeTimer ( cleanupTime ) ; } }
Registers a timer to cleanup the content of the window .
15,033
public DataSink < T > setParallelism ( int parallelism ) { Preconditions . checkArgument ( parallelism > 0 || parallelism == ExecutionConfig . PARALLELISM_DEFAULT , "The parallelism of an operator must be at least 1." ) ; this . parallelism = parallelism ; return this ; }
Sets the parallelism for this data sink . The degree must be 1 or more .
15,034
private DataSink < T > setResources ( ResourceSpec minResources , ResourceSpec preferredResources ) { Preconditions . checkNotNull ( minResources , "The min resources must be not null." ) ; Preconditions . checkNotNull ( preferredResources , "The preferred resources must be not null." ) ; Preconditions . checkArgument ( minResources . isValid ( ) && preferredResources . isValid ( ) && minResources . lessThanOrEqual ( preferredResources ) , "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources." ) ; this . minResources = minResources ; this . preferredResources = preferredResources ; return this ; }
Sets the minimum and preferred resources for this data sink . and the lower and upper resource limits will be considered in resource resize feature for future plan .
15,035
private DataSink < T > setResources ( ResourceSpec resources ) { Preconditions . checkNotNull ( resources , "The resources must be not null." ) ; Preconditions . checkArgument ( resources . isValid ( ) , "The values in resources must be not less than 0." ) ; this . minResources = resources ; this . preferredResources = resources ; return this ; }
Sets the resources for this data sink and the minimum and preferred resources are the same by default .
15,036
void restorePartitionBuffers ( IOManager ioManager , List < MemorySegment > availableMemory ) throws IOException { final BulkBlockChannelReader reader = ioManager . createBulkBlockChannelReader ( this . initialBuildSideChannel , availableMemory , this . initialPartitionBuffersCount ) ; reader . close ( ) ; final List < MemorySegment > partitionBuffersFromDisk = reader . getFullSegments ( ) ; this . partitionBuffers = ( MemorySegment [ ] ) partitionBuffersFromDisk . toArray ( new MemorySegment [ partitionBuffersFromDisk . size ( ) ] ) ; this . overflowSegments = new MemorySegment [ 2 ] ; this . numOverflowSegments = 0 ; this . nextOverflowBucket = 0 ; this . isRestored = true ; }
This method is called every time a multi - match hash map is opened again for a new probe input .
15,037
public Option add ( String name ) throws RequiredParametersException { if ( ! this . data . containsKey ( name ) ) { Option option = new Option ( name ) ; this . data . put ( name , option ) ; return option ; } else { throw new RequiredParametersException ( "Option with key " + name + " already exists." ) ; } }
Add a parameter based on its name .
15,038
private void checkAndApplyDefaultValue ( Option o , Map < String , String > data ) throws RequiredParametersException { if ( hasNoDefaultValueAndNoValuePassedOnAlternativeName ( o , data ) ) { throw new RequiredParametersException ( "No default value for undefined parameter " + o . getName ( ) ) ; } }
else throw an exception
15,039
private boolean hasNoDefaultValueAndNoValuePassedOnAlternativeName ( Option o , Map < String , String > data ) throws RequiredParametersException { if ( o . hasAlt ( ) && data . containsKey ( o . getAlt ( ) ) ) { data . put ( o . getName ( ) , data . get ( o . getAlt ( ) ) ) ; } else { if ( o . hasDefaultValue ( ) ) { data . put ( o . getName ( ) , o . getDefaultValue ( ) ) ; if ( o . hasAlt ( ) ) { data . put ( o . getAlt ( ) , o . getDefaultValue ( ) ) ; } } else { return true ; } } return false ; }
else return true to indicate parameter is really missing
15,040
public String getHelp ( ) { StringBuilder sb = new StringBuilder ( data . size ( ) * HELP_TEXT_LENGTH_PER_PARAM ) ; sb . append ( "Required Parameters:" ) ; sb . append ( HELP_TEXT_LINE_DELIMITER ) ; for ( Option o : data . values ( ) ) { sb . append ( this . helpText ( o ) ) ; } sb . append ( HELP_TEXT_LINE_DELIMITER ) ; return sb . toString ( ) ; }
Build a help text for the defined parameters .
15,041
public JaccardIndex < K , VV , EV > setGroupSize ( int groupSize ) { Preconditions . checkArgument ( groupSize > 0 , "Group size must be greater than zero" ) ; this . groupSize = groupSize ; return this ; }
Override the default group size for the quadratic expansion of neighbor pairs . Small groups generate more data whereas large groups distribute computation less evenly among tasks .
15,042
public JaccardIndex < K , VV , EV > setMinimumScore ( int numerator , int denominator ) { Preconditions . checkArgument ( numerator >= 0 , "Minimum score numerator must be non-negative" ) ; Preconditions . checkArgument ( denominator > 0 , "Minimum score denominator must be greater than zero" ) ; Preconditions . checkArgument ( numerator <= denominator , "Minimum score fraction must be less than or equal to one" ) ; this . unboundedScores = false ; this . minimumScoreNumerator = numerator ; this . minimumScoreDenominator = denominator ; return this ; }
Filter out Jaccard Index scores less than the given minimum fraction .
15,043
public JaccardIndex < K , VV , EV > setMaximumScore ( int numerator , int denominator ) { Preconditions . checkArgument ( numerator >= 0 , "Maximum score numerator must be non-negative" ) ; Preconditions . checkArgument ( denominator > 0 , "Maximum score denominator must be greater than zero" ) ; Preconditions . checkArgument ( numerator <= denominator , "Maximum score fraction must be less than or equal to one" ) ; this . unboundedScores = false ; this . maximumScoreNumerator = numerator ; this . maximumScoreDenominator = denominator ; return this ; }
Filter out Jaccard Index scores greater than the given maximum fraction .
15,044
public DataSet < ST > closeWith ( DataSet < ST > solutionSetDelta , DataSet < WT > newWorkset ) { return new DeltaIterationResultSet < ST , WT > ( initialSolutionSet . getExecutionEnvironment ( ) , initialSolutionSet . getType ( ) , initialWorkset . getType ( ) , this , solutionSetDelta , newWorkset , keys , maxIterations ) ; }
Closes the delta iteration . This method defines the end of the delta iteration s function .
15,045
public DeltaIteration < ST , WT > parallelism ( int parallelism ) { Preconditions . checkArgument ( parallelism > 0 || parallelism == ExecutionConfig . PARALLELISM_DEFAULT , "The parallelism must be positive, or ExecutionConfig.PARALLELISM_DEFAULT (use default)." ) ; this . parallelism = parallelism ; return this ; }
Sets the parallelism for the iteration .
15,046
private DeltaIteration < ST , WT > setResources ( ResourceSpec minResources , ResourceSpec preferredResources ) { Preconditions . checkNotNull ( minResources , "The min resources must be not null." ) ; Preconditions . checkNotNull ( preferredResources , "The preferred resources must be not null." ) ; Preconditions . checkArgument ( minResources . isValid ( ) && preferredResources . isValid ( ) && minResources . lessThanOrEqual ( preferredResources ) , "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources." ) ; this . minResources = minResources ; this . preferredResources = preferredResources ; return this ; }
Sets the minimum and preferred resources for the iteration . This overrides the default resources . The lower and upper resource limits will be considered in dynamic resource resize feature for future plan .
15,047
private DeltaIteration < ST , WT > setResources ( ResourceSpec resources ) { Preconditions . checkNotNull ( resources , "The resources must be not null." ) ; Preconditions . checkArgument ( resources . isValid ( ) , "The values in resources must be not less than 0." ) ; this . minResources = resources ; this . preferredResources = resources ; return this ; }
Sets the resources for the iteration and the minimum and preferred resources are the same by default . The lower and upper resource limits will be considered in dynamic resource resize feature for future plan .
15,048
private static void requestAndSetSpecificTimeOffsetsFromKafka ( SimpleConsumer consumer , List < KafkaTopicPartitionState < TopicAndPartition > > partitions , long whichTime ) throws IOException { Map < TopicAndPartition , PartitionOffsetRequestInfo > requestInfo = new HashMap < > ( ) ; for ( KafkaTopicPartitionState < TopicAndPartition > part : partitions ) { requestInfo . put ( part . getKafkaPartitionHandle ( ) , new PartitionOffsetRequestInfo ( whichTime , 1 ) ) ; } requestAndSetOffsetsFromKafka ( consumer , partitions , requestInfo ) ; }
Request offsets before a specific time for a set of partitions via a Kafka consumer .
15,049
private static void requestAndSetOffsetsFromKafka ( SimpleConsumer consumer , List < KafkaTopicPartitionState < TopicAndPartition > > partitionStates , Map < TopicAndPartition , PartitionOffsetRequestInfo > partitionToRequestInfo ) throws IOException { int retries = 0 ; OffsetResponse response ; while ( true ) { kafka . javaapi . OffsetRequest request = new kafka . javaapi . OffsetRequest ( partitionToRequestInfo , kafka . api . OffsetRequest . CurrentVersion ( ) , consumer . clientId ( ) ) ; response = consumer . getOffsetsBefore ( request ) ; if ( response . hasError ( ) ) { StringBuilder exception = new StringBuilder ( ) ; for ( KafkaTopicPartitionState < TopicAndPartition > part : partitionStates ) { short code ; if ( ( code = response . errorCode ( part . getTopic ( ) , part . getPartition ( ) ) ) != ErrorMapping . NoError ( ) ) { exception . append ( "\nException for topic=" ) . append ( part . getTopic ( ) ) . append ( " partition=" ) . append ( part . getPartition ( ) ) . append ( ": " ) . append ( ExceptionUtils . stringifyException ( ErrorMapping . exceptionFor ( code ) ) ) ; } } if ( ++ retries >= 3 ) { throw new IOException ( "Unable to get last offset for partitions " + partitionStates + ": " + exception . toString ( ) ) ; } else { LOG . warn ( "Unable to get last offset for partitions: Exception(s): {}" , exception ) ; } } else { break ; } } for ( KafkaTopicPartitionState < TopicAndPartition > part : partitionStates ) { if ( partitionToRequestInfo . containsKey ( part . getKafkaPartitionHandle ( ) ) ) { final long offset = response . offsets ( part . getTopic ( ) , part . getPartition ( ) ) [ 0 ] ; part . setOffset ( offset - 1 ) ; } } }
Request offsets from Kafka with a specified set of partition s offset request information . The returned offsets are used to set the internal partition states .
15,050
public static < T > CompletableFuture < T > retry ( final Supplier < CompletableFuture < T > > operation , final int retries , final Executor executor ) { final CompletableFuture < T > resultFuture = new CompletableFuture < > ( ) ; retryOperation ( resultFuture , operation , retries , executor ) ; return resultFuture ; }
Retry the given operation the given number of times in case of a failure .
15,051
private static < T > void retryOperation ( final CompletableFuture < T > resultFuture , final Supplier < CompletableFuture < T > > operation , final int retries , final Executor executor ) { if ( ! resultFuture . isDone ( ) ) { final CompletableFuture < T > operationFuture = operation . get ( ) ; operationFuture . whenCompleteAsync ( ( t , throwable ) -> { if ( throwable != null ) { if ( throwable instanceof CancellationException ) { resultFuture . completeExceptionally ( new RetryException ( "Operation future was cancelled." , throwable ) ) ; } else { if ( retries > 0 ) { retryOperation ( resultFuture , operation , retries - 1 , executor ) ; } else { resultFuture . completeExceptionally ( new RetryException ( "Could not complete the operation. Number of retries " + "has been exhausted." , throwable ) ) ; } } } else { resultFuture . complete ( t ) ; } } , executor ) ; resultFuture . whenComplete ( ( t , throwable ) -> operationFuture . cancel ( false ) ) ; } }
Helper method which retries the provided operation in case of a failure .
15,052
public static < T > CompletableFuture < T > retrySuccessfulWithDelay ( final Supplier < CompletableFuture < T > > operation , final Time retryDelay , final Deadline deadline , final Predicate < T > acceptancePredicate , final ScheduledExecutor scheduledExecutor ) { final CompletableFuture < T > resultFuture = new CompletableFuture < > ( ) ; retrySuccessfulOperationWithDelay ( resultFuture , operation , retryDelay , deadline , acceptancePredicate , scheduledExecutor ) ; return resultFuture ; }
Retry the given operation with the given delay in between successful completions where the result does not match a given predicate .
15,053
public static CompletableFuture < Void > composeAfterwards ( CompletableFuture < ? > future , Supplier < CompletableFuture < ? > > composedAction ) { final CompletableFuture < Void > resultFuture = new CompletableFuture < > ( ) ; future . whenComplete ( ( Object outerIgnored , Throwable outerThrowable ) -> { final CompletableFuture < ? > composedActionFuture = composedAction . get ( ) ; composedActionFuture . whenComplete ( ( Object innerIgnored , Throwable innerThrowable ) -> { if ( innerThrowable != null ) { resultFuture . completeExceptionally ( ExceptionUtils . firstOrSuppressed ( innerThrowable , outerThrowable ) ) ; } else if ( outerThrowable != null ) { resultFuture . completeExceptionally ( outerThrowable ) ; } else { resultFuture . complete ( null ) ; } } ) ; } ) ; return resultFuture ; }
Run the given asynchronous action after the completion of the given future . The given future can be completed normally or exceptionally . In case of an exceptional completion the asynchronous action s exception will be added to the initial exception .
15,054
public static void main ( String [ ] args ) throws Exception { final KafkaCollector [ ] collectors = new KafkaCollector [ NUM_PARTITIONS ] ; for ( int i = 0 ; i < collectors . length ; i ++ ) { collectors [ i ] = new KafkaCollector ( BROKER_ADDRESS , TOPIC , i ) ; } StandaloneThreadedGenerator . runGenerator ( collectors ) ; }
Entry point to the kafka data producer .
15,055
public void suspend ( ) { componentMainThreadExecutor . assertRunningInMainThread ( ) ; log . info ( "Suspending SlotPool." ) ; Set < AllocationID > allocationIds = pendingRequests . keySetB ( ) ; for ( AllocationID allocationId : allocationIds ) { resourceManagerGateway . cancelSlotRequest ( allocationId ) ; } jobMasterId = null ; resourceManagerGateway = null ; clear ( ) ; }
Suspends this pool meaning it has lost its authority to accept and distribute slots .
15,056
private PendingRequest removePendingRequest ( SlotRequestId requestId ) { PendingRequest result = waitingForResourceManager . remove ( requestId ) ; if ( result != null ) { assert ! pendingRequests . containsKeyA ( requestId ) : "A pending requests should only be part of either " + "the pendingRequests or waitingForResourceManager but not both." ; return result ; } else { return pendingRequests . removeKeyA ( requestId ) ; } }
Checks whether there exists a pending request with the given slot request id and removes it from the internal data structures .
15,057
private void tryFulfillSlotRequestOrMakeAvailable ( AllocatedSlot allocatedSlot ) { Preconditions . checkState ( ! allocatedSlot . isUsed ( ) , "Provided slot is still in use." ) ; final PendingRequest pendingRequest = pollMatchingPendingRequest ( allocatedSlot ) ; if ( pendingRequest != null ) { log . debug ( "Fulfilling pending slot request [{}] early with returned slot [{}]" , pendingRequest . getSlotRequestId ( ) , allocatedSlot . getAllocationId ( ) ) ; allocatedSlots . add ( pendingRequest . getSlotRequestId ( ) , allocatedSlot ) ; pendingRequest . getAllocatedSlotFuture ( ) . complete ( allocatedSlot ) ; } else { log . debug ( "Adding returned slot [{}] to available slots" , allocatedSlot . getAllocationId ( ) ) ; availableSlots . add ( allocatedSlot , clock . relativeTimeMillis ( ) ) ; } }
Tries to fulfill with the given allocated slot a pending slot request or add the allocated slot to the set of available slots if no matching request is available .
15,058
public Optional < ResourceID > failAllocation ( final AllocationID allocationID , final Exception cause ) { componentMainThreadExecutor . assertRunningInMainThread ( ) ; final PendingRequest pendingRequest = pendingRequests . removeKeyB ( allocationID ) ; if ( pendingRequest != null ) { failPendingRequest ( pendingRequest , cause ) ; return Optional . empty ( ) ; } else { return tryFailingAllocatedSlot ( allocationID , cause ) ; } }
Fail the specified allocation and release the corresponding slot if we have one . This may triggered by JobManager when some slot allocation failed with rpcTimeout . Or this could be triggered by TaskManager when it finds out something went wrong with the slot and decided to take it back .
15,059
public boolean registerTaskManager ( final ResourceID resourceID ) { componentMainThreadExecutor . assertRunningInMainThread ( ) ; log . debug ( "Register new TaskExecutor {}." , resourceID ) ; return registeredTaskManagers . add ( resourceID ) ; }
Register TaskManager to this pool only those slots come from registered TaskManager will be considered valid . Also it provides a way for us to keep dead or abnormal TaskManagers out of this pool .
15,060
public boolean releaseTaskManager ( final ResourceID resourceId , final Exception cause ) { componentMainThreadExecutor . assertRunningInMainThread ( ) ; if ( registeredTaskManagers . remove ( resourceId ) ) { releaseTaskManagerInternal ( resourceId , cause ) ; return true ; } else { return false ; } }
Unregister TaskManager from this pool all the related slots will be released and tasks be canceled . Called when we find some TaskManager becomes dead or abnormal and we decide to not using slots from it anymore .
15,061
private void checkIdleSlot ( ) { final long currentRelativeTimeMillis = clock . relativeTimeMillis ( ) ; final List < AllocatedSlot > expiredSlots = new ArrayList < > ( availableSlots . size ( ) ) ; for ( SlotAndTimestamp slotAndTimestamp : availableSlots . availableSlots . values ( ) ) { if ( currentRelativeTimeMillis - slotAndTimestamp . timestamp > idleSlotTimeout . toMilliseconds ( ) ) { expiredSlots . add ( slotAndTimestamp . slot ) ; } } final FlinkException cause = new FlinkException ( "Releasing idle slot." ) ; for ( AllocatedSlot expiredSlot : expiredSlots ) { final AllocationID allocationID = expiredSlot . getAllocationId ( ) ; if ( availableSlots . tryRemove ( allocationID ) != null ) { log . info ( "Releasing idle slot [{}]." , allocationID ) ; final CompletableFuture < Acknowledge > freeSlotFuture = expiredSlot . getTaskManagerGateway ( ) . freeSlot ( allocationID , cause , rpcTimeout ) ; FutureUtils . whenCompleteAsyncIfNotDone ( freeSlotFuture , componentMainThreadExecutor , ( Acknowledge ignored , Throwable throwable ) -> { if ( throwable != null ) { if ( registeredTaskManagers . contains ( expiredSlot . getTaskManagerId ( ) ) ) { log . debug ( "Releasing slot [{}] of registered TaskExecutor {} failed. " + "Trying to fulfill a different slot request." , allocationID , expiredSlot . getTaskManagerId ( ) , throwable ) ; tryFulfillSlotRequestOrMakeAvailable ( expiredSlot ) ; } else { log . debug ( "Releasing slot [{}] failed and owning TaskExecutor {} is no " + "longer registered. Discarding slot." , allocationID , expiredSlot . getTaskManagerId ( ) ) ; } } } ) ; } } scheduleRunAsync ( this :: checkIdleSlot , idleSlotTimeout ) ; }
Check the available slots release the slot that is idle for a long time .
15,062
private void clear ( ) { availableSlots . clear ( ) ; allocatedSlots . clear ( ) ; pendingRequests . clear ( ) ; waitingForResourceManager . clear ( ) ; registeredTaskManagers . clear ( ) ; }
Clear the internal state of the SlotPool .
15,063
public void startQueryService ( RpcService rpcService , ResourceID resourceID ) { synchronized ( lock ) { Preconditions . checkState ( ! isShutdown ( ) , "The metric registry has already been shut down." ) ; try { metricQueryServiceRpcService = rpcService ; queryService = MetricQueryService . createMetricQueryService ( rpcService , resourceID , maximumFramesize ) ; queryService . start ( ) ; } catch ( Exception e ) { LOG . warn ( "Could not start MetricDumpActor. No metrics will be submitted to the WebInterface." , e ) ; } } }
Initializes the MetricQueryService .
15,064
public static TaskManagerServices fromConfiguration ( TaskManagerServicesConfiguration taskManagerServicesConfiguration , TaskManagerMetricGroup taskManagerMetricGroup , ResourceID resourceID , Executor taskIOExecutor , long freeHeapMemoryWithDefrag , long maxJvmHeapMemory ) throws Exception { checkTempDirs ( taskManagerServicesConfiguration . getTmpDirPaths ( ) ) ; final TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher ( ) ; final NetworkEnvironment network = new NetworkEnvironment ( taskManagerServicesConfiguration . getNetworkConfig ( ) , taskEventDispatcher , taskManagerMetricGroup ) ; network . start ( ) ; final KvStateService kvStateService = KvStateService . fromConfiguration ( taskManagerServicesConfiguration ) ; kvStateService . start ( ) ; final TaskManagerLocation taskManagerLocation = new TaskManagerLocation ( resourceID , taskManagerServicesConfiguration . getTaskManagerAddress ( ) , network . getConnectionManager ( ) . getDataPort ( ) ) ; final MemoryManager memoryManager = createMemoryManager ( taskManagerServicesConfiguration , freeHeapMemoryWithDefrag , maxJvmHeapMemory ) ; final IOManager ioManager = new IOManagerAsync ( taskManagerServicesConfiguration . getTmpDirPaths ( ) ) ; final BroadcastVariableManager broadcastVariableManager = new BroadcastVariableManager ( ) ; final List < ResourceProfile > resourceProfiles = new ArrayList < > ( taskManagerServicesConfiguration . getNumberOfSlots ( ) ) ; for ( int i = 0 ; i < taskManagerServicesConfiguration . getNumberOfSlots ( ) ; i ++ ) { resourceProfiles . add ( ResourceProfile . ANY ) ; } final TimerService < AllocationID > timerService = new TimerService < > ( new ScheduledThreadPoolExecutor ( 1 ) , taskManagerServicesConfiguration . getTimerServiceShutdownTimeout ( ) ) ; final TaskSlotTable taskSlotTable = new TaskSlotTable ( resourceProfiles , timerService ) ; final JobManagerTable jobManagerTable = new JobManagerTable ( ) ; final JobLeaderService jobLeaderService = new JobLeaderService ( taskManagerLocation , taskManagerServicesConfiguration . getRetryingRegistrationConfiguration ( ) ) ; final String [ ] stateRootDirectoryStrings = taskManagerServicesConfiguration . getLocalRecoveryStateRootDirectories ( ) ; final File [ ] stateRootDirectoryFiles = new File [ stateRootDirectoryStrings . length ] ; for ( int i = 0 ; i < stateRootDirectoryStrings . length ; ++ i ) { stateRootDirectoryFiles [ i ] = new File ( stateRootDirectoryStrings [ i ] , LOCAL_STATE_SUB_DIRECTORY_ROOT ) ; } final TaskExecutorLocalStateStoresManager taskStateManager = new TaskExecutorLocalStateStoresManager ( taskManagerServicesConfiguration . isLocalRecoveryEnabled ( ) , stateRootDirectoryFiles , taskIOExecutor ) ; return new TaskManagerServices ( taskManagerLocation , memoryManager , ioManager , network , kvStateService , broadcastVariableManager , taskSlotTable , jobManagerTable , jobLeaderService , taskStateManager , taskEventDispatcher ) ; }
Creates and returns the task manager services .
15,065
public static ExternalCatalog findAndCreateExternalCatalog ( Descriptor descriptor ) { Map < String , String > properties = descriptor . toProperties ( ) ; return TableFactoryService . find ( ExternalCatalogFactory . class , properties ) . createExternalCatalog ( properties ) ; }
Returns an external catalog .
15,066
public static < T > TableSource < T > findAndCreateTableSource ( Descriptor descriptor ) { Map < String , String > properties = descriptor . toProperties ( ) ; TableSource tableSource ; try { tableSource = TableFactoryService . find ( TableSourceFactory . class , properties ) . createTableSource ( properties ) ; } catch ( Throwable t ) { throw new TableException ( "findAndCreateTableSource failed." , t ) ; } return tableSource ; }
Returns a table source matching the descriptor .
15,067
public static < T > TableSink < T > findAndCreateTableSink ( Descriptor descriptor ) { Map < String , String > properties = descriptor . toProperties ( ) ; TableSink tableSink ; try { tableSink = TableFactoryService . find ( TableSinkFactory . class , properties ) . createTableSink ( properties ) ; } catch ( Throwable t ) { throw new TableException ( "findAndCreateTableSink failed." , t ) ; } return tableSink ; }
Returns a table sink matching the descriptor .
15,068
public < T > void setBroadcastVariables ( Map < String , Operator < T > > inputs ) { this . broadcastInputs . clear ( ) ; this . broadcastInputs . putAll ( inputs ) ; }
Clears all previous broadcast inputs and binds the given inputs as broadcast variables of this operator .
15,069
protected static < U > Class < U > [ ] asArray ( Class < U > clazz ) { @ SuppressWarnings ( "unchecked" ) Class < U > [ ] array = new Class [ ] { clazz } ; return array ; }
Generic utility function that wraps a single class object into an array of that class type .
15,070
protected static < U > Class < U > [ ] emptyClassArray ( ) { @ SuppressWarnings ( "unchecked" ) Class < U > [ ] array = new Class [ 0 ] ; return array ; }
Generic utility function that returns an empty class array .
15,071
public void update ( ) { synchronized ( this ) { long currentTime = System . currentTimeMillis ( ) ; if ( currentTime - lastUpdateTime > updateInterval ) { lastUpdateTime = currentTime ; fetchMetrics ( ) ; } } }
This method can be used to signal this MetricFetcher that the metrics are still in use and should be updated .
15,072
private void retrieveAndQueryMetrics ( String queryServiceAddress ) { LOG . debug ( "Retrieve metric query service gateway for {}" , queryServiceAddress ) ; final CompletableFuture < MetricQueryServiceGateway > queryServiceGatewayFuture = queryServiceRetriever . retrieveService ( queryServiceAddress ) ; queryServiceGatewayFuture . whenCompleteAsync ( ( MetricQueryServiceGateway queryServiceGateway , Throwable t ) -> { if ( t != null ) { LOG . debug ( "Could not retrieve QueryServiceGateway." , t ) ; } else { queryMetrics ( queryServiceGateway ) ; } } , executor ) ; }
Retrieves and queries the specified QueryServiceGateway .
15,073
private void queryMetrics ( final MetricQueryServiceGateway queryServiceGateway ) { LOG . debug ( "Query metrics for {}." , queryServiceGateway . getAddress ( ) ) ; queryServiceGateway . queryMetrics ( timeout ) . whenCompleteAsync ( ( MetricDumpSerialization . MetricSerializationResult result , Throwable t ) -> { if ( t != null ) { LOG . debug ( "Fetching metrics failed." , t ) ; } else { metrics . addAll ( deserializer . deserialize ( result ) ) ; } } , executor ) ; }
Query the metrics from the given QueryServiceGateway .
15,074
public static RelOptCluster create ( RelOptPlanner planner , RexBuilder rexBuilder ) { return new RelOptCluster ( planner , rexBuilder . getTypeFactory ( ) , rexBuilder , new AtomicInteger ( 0 ) , new HashMap < String , RelNode > ( ) ) ; }
Creates a cluster .
15,075
public Expression getValueExpression ( ) { return ifThenElse ( equalTo ( count , literal ( 0L ) ) , nullOf ( getResultType ( ) ) , div ( sum , count ) ) ; }
If all input are nulls count will be 0 and we will get null after the division .
15,076
private void closeCurrentPartFile ( BucketState < T > bucketState ) throws Exception { if ( bucketState . isWriterOpen ) { bucketState . writer . close ( ) ; bucketState . isWriterOpen = false ; } if ( bucketState . currentFile != null ) { Path currentPartPath = new Path ( bucketState . currentFile ) ; Path inProgressPath = getInProgressPathFor ( currentPartPath ) ; Path pendingPath = getPendingPathFor ( currentPartPath ) ; fs . rename ( inProgressPath , pendingPath ) ; LOG . debug ( "Moving in-progress bucket {} to pending file {}" , inProgressPath , pendingPath ) ; bucketState . pendingFiles . add ( currentPartPath . toString ( ) ) ; bucketState . currentFile = null ; } }
Closes the current part file and moves it from the in - progress state to the pending state .
15,077
public void tryAdd ( AbstractCheckpointStats checkpoint ) { if ( cache != null && checkpoint != null && ! checkpoint . getStatus ( ) . isInProgress ( ) ) { cache . put ( checkpoint . getCheckpointId ( ) , checkpoint ) ; } }
Try to add the checkpoint to the cache .
15,078
public < R > R wrapClassLoader ( Supplier < R > supplier ) { try ( TemporaryClassLoaderContext tmpCl = new TemporaryClassLoaderContext ( classLoader ) ) { return supplier . get ( ) ; } }
Executes the given supplier using the execution context s classloader as thread classloader .
15,079
public void open ( Configuration config ) throws IOException { streamer . open ( ) ; streamer . sendBroadCastVariables ( config ) ; }
Opens this function .
15,080
public AccumulatorSnapshot getSnapshot ( ) { try { return new AccumulatorSnapshot ( jobID , taskID , userAccumulators ) ; } catch ( Throwable e ) { LOG . warn ( "Failed to serialize accumulators for task." , e ) ; return null ; } }
Creates a snapshot of this accumulator registry .
15,081
public void registerKvState ( KeyGroupRange keyGroupRange , String registrationName , InternalKvState < ? , ? , ? > kvState ) { KvStateID kvStateId = registry . registerKvState ( jobId , jobVertexId , keyGroupRange , registrationName , kvState ) ; registeredKvStates . add ( new KvStateInfo ( keyGroupRange , registrationName , kvStateId ) ) ; }
Registers the KvState instance at the KvStateRegistry .
15,082
public void unregisterAll ( ) { for ( KvStateInfo kvState : registeredKvStates ) { registry . unregisterKvState ( jobId , jobVertexId , kvState . keyGroupRange , kvState . registrationName , kvState . kvStateId ) ; } }
Unregisters all registered KvState instances from the KvStateRegistry .
15,083
public ConfigOption < T > withFallbackKeys ( String ... fallbackKeys ) { final Stream < FallbackKey > newFallbackKeys = Arrays . stream ( fallbackKeys ) . map ( FallbackKey :: createFallbackKey ) ; final Stream < FallbackKey > currentAlternativeKeys = Arrays . stream ( this . fallbackKeys ) ; final FallbackKey [ ] mergedAlternativeKeys = Stream . concat ( newFallbackKeys , currentAlternativeKeys ) . toArray ( FallbackKey [ ] :: new ) ; return new ConfigOption < > ( key , description , defaultValue , mergedAlternativeKeys ) ; }
Creates a new config option using this option s key and default value and adding the given fallback keys .
15,084
public ConfigOption < T > withDeprecatedKeys ( String ... deprecatedKeys ) { final Stream < FallbackKey > newDeprecatedKeys = Arrays . stream ( deprecatedKeys ) . map ( FallbackKey :: createDeprecatedKey ) ; final Stream < FallbackKey > currentAlternativeKeys = Arrays . stream ( this . fallbackKeys ) ; final FallbackKey [ ] mergedAlternativeKeys = Stream . concat ( currentAlternativeKeys , newDeprecatedKeys ) . toArray ( FallbackKey [ ] :: new ) ; return new ConfigOption < > ( key , description , defaultValue , mergedAlternativeKeys ) ; }
Creates a new config option using this option s key and default value and adding the given deprecated keys .
15,085
public Iterable < FallbackKey > fallbackKeys ( ) { return ( fallbackKeys == EMPTY ) ? Collections . emptyList ( ) : Arrays . asList ( fallbackKeys ) ; }
Gets the fallback keys in the order to be checked .
15,086
public static ExecutionState getAggregateJobVertexState ( int [ ] verticesPerState , int parallelism ) { if ( verticesPerState == null || verticesPerState . length != ExecutionState . values ( ) . length ) { throw new IllegalArgumentException ( "Must provide an array as large as there are execution states." ) ; } if ( verticesPerState [ ExecutionState . FAILED . ordinal ( ) ] > 0 ) { return ExecutionState . FAILED ; } if ( verticesPerState [ ExecutionState . CANCELING . ordinal ( ) ] > 0 ) { return ExecutionState . CANCELING ; } else if ( verticesPerState [ ExecutionState . CANCELED . ordinal ( ) ] > 0 ) { return ExecutionState . CANCELED ; } else if ( verticesPerState [ ExecutionState . RUNNING . ordinal ( ) ] > 0 ) { return ExecutionState . RUNNING ; } else if ( verticesPerState [ ExecutionState . FINISHED . ordinal ( ) ] > 0 ) { return verticesPerState [ ExecutionState . FINISHED . ordinal ( ) ] == parallelism ? ExecutionState . FINISHED : ExecutionState . RUNNING ; } else { return ExecutionState . CREATED ; } }
A utility function that computes an aggregated state for the vertex .
15,087
public static boolean hasHDFSDelegationToken ( ) throws Exception { UserGroupInformation loginUser = UserGroupInformation . getCurrentUser ( ) ; Collection < Token < ? extends TokenIdentifier > > usrTok = loginUser . getTokens ( ) ; for ( Token < ? extends TokenIdentifier > token : usrTok ) { if ( token . getKind ( ) . equals ( HDFS_DELEGATION_TOKEN_KIND ) ) { return true ; } } return false ; }
Indicates whether the current user has an HDFS delegation token .
15,088
public static boolean isMinHadoopVersion ( int major , int minor ) throws FlinkRuntimeException { String versionString = VersionInfo . getVersion ( ) ; String [ ] versionParts = versionString . split ( "\\." ) ; if ( versionParts . length < 2 ) { throw new FlinkRuntimeException ( "Cannot determine version of Hadoop, unexpected version string: " + versionString ) ; } int maj = Integer . parseInt ( versionParts [ 0 ] ) ; int min = Integer . parseInt ( versionParts [ 1 ] ) ; return maj > major || ( maj == major && min >= minor ) ; }
Checks if the Hadoop dependency is at least of the given version .
15,089
public static < T extends SpecificRecordBase > ParquetWriterFactory < T > forSpecificRecord ( Class < T > type ) { final String schemaString = SpecificData . get ( ) . getSchema ( type ) . toString ( ) ; final ParquetBuilder < T > builder = ( out ) -> createAvroParquetWriter ( schemaString , SpecificData . get ( ) , out ) ; return new ParquetWriterFactory < > ( builder ) ; }
Creates a ParquetWriterFactory for an Avro specific type . The Parquet writers will use the schema of that specific type to build and write the columnar data .
15,090
public static ParquetWriterFactory < GenericRecord > forGenericRecord ( Schema schema ) { final String schemaString = schema . toString ( ) ; final ParquetBuilder < GenericRecord > builder = ( out ) -> createAvroParquetWriter ( schemaString , GenericData . get ( ) , out ) ; return new ParquetWriterFactory < > ( builder ) ; }
Creates a ParquetWriterFactory that accepts and writes Avro generic types . The Parquet writers will use the given schema to build and write the columnar data .
15,091
public static < T > ParquetWriterFactory < T > forReflectRecord ( Class < T > type ) { final String schemaString = ReflectData . get ( ) . getSchema ( type ) . toString ( ) ; final ParquetBuilder < T > builder = ( out ) -> createAvroParquetWriter ( schemaString , ReflectData . get ( ) , out ) ; return new ParquetWriterFactory < > ( builder ) ; }
Creates a ParquetWriterFactory for the given type . The Parquet writers will use Avro to reflectively create a schema for the type and use that schema to write the columnar data .
15,092
public Option < Protos . FrameworkID > getFrameworkID ( ) throws Exception { synchronized ( startStopLock ) { verifyIsRunning ( ) ; Option < Protos . FrameworkID > frameworkID ; byte [ ] value = frameworkIdInZooKeeper . getValue ( ) ; if ( value . length == 0 ) { frameworkID = Option . empty ( ) ; } else { frameworkID = Option . apply ( Protos . FrameworkID . newBuilder ( ) . setValue ( new String ( value , ConfigConstants . DEFAULT_CHARSET ) ) . build ( ) ) ; } return frameworkID ; } }
Get the persisted framework ID .
15,093
public void setFrameworkID ( Option < Protos . FrameworkID > frameworkID ) throws Exception { synchronized ( startStopLock ) { verifyIsRunning ( ) ; byte [ ] value = frameworkID . isDefined ( ) ? frameworkID . get ( ) . getValue ( ) . getBytes ( ConfigConstants . DEFAULT_CHARSET ) : new byte [ 0 ] ; frameworkIdInZooKeeper . setValue ( value ) ; } }
Update the persisted framework ID .
15,094
public Protos . TaskID newTaskID ( ) throws Exception { synchronized ( startStopLock ) { verifyIsRunning ( ) ; int nextCount ; boolean success ; do { ZooKeeperVersionedValue < Integer > count = totalTaskCountInZooKeeper . getVersionedValue ( ) ; nextCount = count . getValue ( ) + 1 ; success = totalTaskCountInZooKeeper . trySetCount ( count , nextCount ) ; } while ( ! success ) ; Protos . TaskID taskID = Protos . TaskID . newBuilder ( ) . setValue ( TASKID_FORMAT . format ( nextCount ) ) . build ( ) ; return taskID ; } }
Generates a new task ID .
15,095
public void shutdown ( ) { synchronized ( lock ) { if ( ! isShutDown ) { isShutDown = true ; numNonAllocatedPages = 0 ; for ( Set < MemorySegment > segments : allocatedSegments . values ( ) ) { for ( MemorySegment seg : segments ) { seg . free ( ) ; } } memoryPool . clear ( ) ; } } }
Shuts the memory manager down trying to release all the memory it managed . Depending on implementation details the memory does not necessarily become reclaimable by the garbage collector because there might still be references to allocated segments in the code that allocated them from the memory manager .
15,096
public void release ( MemorySegment segment ) { if ( segment == null || segment . getOwner ( ) == null ) { return ; } final Object owner = segment . getOwner ( ) ; synchronized ( lock ) { if ( segment . isFreed ( ) ) { return ; } if ( isShutDown ) { throw new IllegalStateException ( "Memory manager has been shut down." ) ; } try { Set < MemorySegment > segsForOwner = this . allocatedSegments . get ( owner ) ; if ( segsForOwner != null ) { segsForOwner . remove ( segment ) ; if ( segsForOwner . isEmpty ( ) ) { this . allocatedSegments . remove ( owner ) ; } } if ( isPreAllocated ) { memoryPool . returnSegmentToPool ( segment ) ; } else { segment . free ( ) ; numNonAllocatedPages ++ ; } } catch ( Throwable t ) { throw new RuntimeException ( "Error removing book-keeping reference to allocated memory segment." , t ) ; } } }
Tries to release the memory for the specified segment . If the segment has already been released or is null the request is simply ignored .
15,097
public void release ( Collection < MemorySegment > segments ) { if ( segments == null ) { return ; } synchronized ( lock ) { if ( isShutDown ) { throw new IllegalStateException ( "Memory manager has been shut down." ) ; } boolean successfullyReleased = false ; do { final Iterator < MemorySegment > segmentsIterator = segments . iterator ( ) ; Object lastOwner = null ; Set < MemorySegment > segsForOwner = null ; try { while ( segmentsIterator . hasNext ( ) ) { final MemorySegment seg = segmentsIterator . next ( ) ; if ( seg == null || seg . isFreed ( ) ) { continue ; } final Object owner = seg . getOwner ( ) ; try { if ( lastOwner != owner ) { lastOwner = owner ; segsForOwner = this . allocatedSegments . get ( owner ) ; } if ( segsForOwner != null ) { segsForOwner . remove ( seg ) ; if ( segsForOwner . isEmpty ( ) ) { this . allocatedSegments . remove ( owner ) ; } } if ( isPreAllocated ) { memoryPool . returnSegmentToPool ( seg ) ; } else { seg . free ( ) ; numNonAllocatedPages ++ ; } } catch ( Throwable t ) { throw new RuntimeException ( "Error removing book-keeping reference to allocated memory segment." , t ) ; } } segments . clear ( ) ; successfullyReleased = true ; } catch ( ConcurrentModificationException | NoSuchElementException e ) { } } while ( ! successfullyReleased ) ; } }
Tries to release many memory segments together .
15,098
public void releaseAll ( Object owner ) { if ( owner == null ) { return ; } synchronized ( lock ) { if ( isShutDown ) { throw new IllegalStateException ( "Memory manager has been shut down." ) ; } final Set < MemorySegment > segments = allocatedSegments . remove ( owner ) ; if ( segments == null || segments . isEmpty ( ) ) { return ; } if ( isPreAllocated ) { for ( MemorySegment seg : segments ) { memoryPool . returnSegmentToPool ( seg ) ; } } else { for ( MemorySegment seg : segments ) { seg . free ( ) ; } numNonAllocatedPages += segments . size ( ) ; } segments . clear ( ) ; } }
Releases all memory segments for the given owner .
15,099
public List < FieldReferenceExpression > getAllInputFields ( ) { return fieldReferences . stream ( ) . flatMap ( input -> input . values ( ) . stream ( ) ) . collect ( toList ( ) ) ; }
Gives all fields of underlying inputs in order of those inputs and order of fields within input .