idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,900 | public < M > Graph < K , VV , EV > runGatherSumApplyIteration ( org . apache . flink . graph . gsa . GatherFunction < VV , EV , M > gatherFunction , SumFunction < VV , EV , M > sumFunction , ApplyFunction < K , VV , M > applyFunction , int maximumNumberOfIterations , GSAConfiguration parameters ) { GatherSumApplyIteration < K , VV , EV , M > iteration = GatherSumApplyIteration . withEdges ( edges , gatherFunction , sumFunction , applyFunction , maximumNumberOfIterations ) ; iteration . configure ( parameters ) ; DataSet < Vertex < K , VV > > newVertices = vertices . runOperation ( iteration ) ; return new Graph < > ( newVertices , this . edges , this . context ) ; } | Runs a Gather - Sum - Apply iteration on the graph with configuration options . |
14,901 | public CompletableFuture < Void > shutdown ( ) { final CompletableFuture < Void > newShutdownFuture = new CompletableFuture < > ( ) ; if ( clientShutdownFuture . compareAndSet ( null , newShutdownFuture ) ) { final List < CompletableFuture < Void > > connectionFutures = new ArrayList < > ( ) ; for ( Map . Entry < InetSocketAddress , EstablishedConnection > conn : establishedConnections . entrySet ( ) ) { if ( establishedConnections . remove ( conn . getKey ( ) , conn . getValue ( ) ) ) { connectionFutures . add ( conn . getValue ( ) . close ( ) ) ; } } for ( Map . Entry < InetSocketAddress , PendingConnection > conn : pendingConnections . entrySet ( ) ) { if ( pendingConnections . remove ( conn . getKey ( ) ) != null ) { connectionFutures . add ( conn . getValue ( ) . close ( ) ) ; } } CompletableFuture . allOf ( connectionFutures . toArray ( new CompletableFuture < ? > [ connectionFutures . size ( ) ] ) ) . whenComplete ( ( result , throwable ) -> { if ( throwable != null ) { LOG . warn ( "Problem while shutting down the connections at the {}: {}" , clientName , throwable ) ; } if ( bootstrap != null ) { EventLoopGroup group = bootstrap . group ( ) ; if ( group != null && ! group . isShutdown ( ) ) { group . shutdownGracefully ( 0L , 0L , TimeUnit . MILLISECONDS ) . addListener ( finished -> { if ( finished . isSuccess ( ) ) { newShutdownFuture . complete ( null ) ; } else { newShutdownFuture . completeExceptionally ( finished . cause ( ) ) ; } } ) ; } else { newShutdownFuture . complete ( null ) ; } } else { newShutdownFuture . complete ( null ) ; } } ) ; return newShutdownFuture ; } return clientShutdownFuture . get ( ) ; } | Shuts down the client and closes all connections . |
14,902 | public static Long toTimestamp ( String dateStr , String format , TimeZone tz ) { SimpleDateFormat formatter = FORMATTER_CACHE . get ( format ) ; formatter . setTimeZone ( tz ) ; try { return formatter . parse ( dateStr ) . getTime ( ) ; } catch ( ParseException e ) { return null ; } } | Parse date time string to timestamp based on the given time zone and format . Returns null if parsing failed . |
14,903 | public static Long toTimestampTz ( String dateStr , String format , String tzStr ) { TimeZone tz = TIMEZONE_CACHE . get ( tzStr ) ; return toTimestamp ( dateStr , format , tz ) ; } | Parse date time string to timestamp based on the given time zone string and format . Returns null if parsing failed . |
14,904 | public static int strToDate ( String dateStr , String fromFormat ) { long ts = parseToTimeMillis ( dateStr , fromFormat , TimeZone . getTimeZone ( "UTC" ) ) ; ZoneId zoneId = ZoneId . of ( "UTC" ) ; Instant instant = Instant . ofEpochMilli ( ts ) ; ZonedDateTime zdt = ZonedDateTime . ofInstant ( instant , zoneId ) ; return DateTimeUtils . ymdToUnixDate ( zdt . getYear ( ) , zdt . getMonthValue ( ) , zdt . getDayOfMonth ( ) ) ; } | Returns the epoch days since 1970 - 01 - 01 . |
14,905 | public static String dateFormat ( long ts , String format , TimeZone tz ) { SimpleDateFormat formatter = FORMATTER_CACHE . get ( format ) ; formatter . setTimeZone ( tz ) ; Date dateTime = new Date ( ts ) ; return formatter . format ( dateTime ) ; } | Format a timestamp as specific . |
14,906 | public static String dateFormat ( String dateStr , String fromFormat , String toFormat , TimeZone tz ) { SimpleDateFormat fromFormatter = FORMATTER_CACHE . get ( fromFormat ) ; fromFormatter . setTimeZone ( tz ) ; SimpleDateFormat toFormatter = FORMATTER_CACHE . get ( toFormat ) ; toFormatter . setTimeZone ( tz ) ; try { return toFormatter . format ( fromFormatter . parse ( dateStr ) ) ; } catch ( ParseException e ) { LOG . error ( "Exception when formatting: '" + dateStr + "' from: '" + fromFormat + "' to: '" + toFormat + "'" , e ) ; return null ; } } | Format a string datetime as specific . |
14,907 | public static String convertTz ( String dateStr , String format , String tzFrom , String tzTo ) { return dateFormatTz ( toTimestampTz ( dateStr , format , tzFrom ) , tzTo ) ; } | Convert datetime string from a time zone to another time zone . |
14,908 | public static String timestampToString ( long ts , int precision , TimeZone tz ) { int p = ( precision <= 3 && precision >= 0 ) ? precision : 3 ; String format = DEFAULT_DATETIME_FORMATS [ p ] ; return dateFormat ( ts , format , tz ) ; } | Convert a timestamp to string . |
14,909 | private static int getMillis ( String dateStr ) { int length = dateStr . length ( ) ; if ( length == 19 ) { return 0 ; } else if ( length == 21 ) { return Integer . parseInt ( dateStr . substring ( 20 ) ) * 100 ; } else if ( length == 22 ) { return Integer . parseInt ( dateStr . substring ( 20 ) ) * 10 ; } else if ( length >= 23 && length <= 26 ) { return Integer . parseInt ( dateStr . substring ( 20 , 23 ) ) * 10 ; } else { return 0 ; } } | Returns the milli second part of the datetime . |
14,910 | public static long timestampCeil ( TimeUnitRange range , long ts , TimeZone tz ) { long offset = tz . getOffset ( ts ) ; long utcTs = ts + offset ; switch ( range ) { case HOUR : return ceil ( utcTs , MILLIS_PER_HOUR ) - offset ; case DAY : return ceil ( utcTs , MILLIS_PER_DAY ) - offset ; case MONTH : case YEAR : case QUARTER : int days = ( int ) ( utcTs / MILLIS_PER_DAY + EPOCH_JULIAN ) ; return julianDateFloor ( range , days , false ) * MILLIS_PER_DAY - offset ; default : throw new AssertionError ( range ) ; } } | Keep the algorithm consistent with Calcite DateTimeUtils . julianDateFloor but here we take time zone into account . |
14,911 | public boolean validate ( Graph < K , VV , EV > graph ) throws Exception { DataSet < Tuple1 < K > > edgeIds = graph . getEdges ( ) . flatMap ( new MapEdgeIds < > ( ) ) . distinct ( ) ; DataSet < K > invalidIds = graph . getVertices ( ) . coGroup ( edgeIds ) . where ( 0 ) . equalTo ( 0 ) . with ( new GroupInvalidIds < > ( ) ) . first ( 1 ) ; return invalidIds . map ( new KToTupleMap < > ( ) ) . count ( ) == 0 ; } | Checks that the edge set input contains valid vertex Ids i . e . that they also exist in the vertex input set . |
14,912 | public HCatInputFormatBase < T > getFields ( String ... fields ) throws IOException { ArrayList < HCatFieldSchema > fieldSchemas = new ArrayList < HCatFieldSchema > ( fields . length ) ; for ( String field : fields ) { fieldSchemas . add ( this . outputSchema . get ( field ) ) ; } this . outputSchema = new HCatSchema ( fieldSchemas ) ; configuration . set ( "mapreduce.lib.hcat.output.schema" , HCatUtil . serialize ( outputSchema ) ) ; return this ; } | Specifies the fields which are returned by the InputFormat and their order . |
14,913 | static byte [ ] readBinaryFieldFromSegments ( MemorySegment [ ] segments , int baseOffset , int fieldOffset , long variablePartOffsetAndLen ) { long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT ; if ( mark == 0 ) { final int subOffset = ( int ) ( variablePartOffsetAndLen >> 32 ) ; final int len = ( int ) variablePartOffsetAndLen ; return SegmentsUtil . copyToBytes ( segments , baseOffset + subOffset , len ) ; } else { int len = ( int ) ( ( variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT ) >>> 56 ) ; if ( SegmentsUtil . LITTLE_ENDIAN ) { return SegmentsUtil . copyToBytes ( segments , fieldOffset , len ) ; } else { return SegmentsUtil . copyToBytes ( segments , fieldOffset + 1 , len ) ; } } } | Get binary if len less than 8 will be include in variablePartOffsetAndLen . |
14,914 | static BinaryString readBinaryStringFieldFromSegments ( MemorySegment [ ] segments , int baseOffset , int fieldOffset , long variablePartOffsetAndLen ) { long mark = variablePartOffsetAndLen & HIGHEST_FIRST_BIT ; if ( mark == 0 ) { final int subOffset = ( int ) ( variablePartOffsetAndLen >> 32 ) ; final int len = ( int ) variablePartOffsetAndLen ; return new BinaryString ( segments , baseOffset + subOffset , len ) ; } else { int len = ( int ) ( ( variablePartOffsetAndLen & HIGHEST_SECOND_TO_EIGHTH_BIT ) >>> 56 ) ; if ( SegmentsUtil . LITTLE_ENDIAN ) { return new BinaryString ( segments , fieldOffset , len ) ; } else { return new BinaryString ( segments , fieldOffset + 1 , len ) ; } } } | Get binary string if len less than 8 will be include in variablePartOffsetAndLen . |
14,915 | public static String getVersion ( ) { String version = EnvironmentInformation . class . getPackage ( ) . getImplementationVersion ( ) ; return version != null ? version : UNKNOWN ; } | Returns the version of the code as String . If version == null then the JobManager does not run from a Maven build . An example is a source code checkout compile and run from inside an IDE . |
14,916 | public static String getHadoopUser ( ) { try { Class < ? > ugiClass = Class . forName ( "org.apache.hadoop.security.UserGroupInformation" , false , EnvironmentInformation . class . getClassLoader ( ) ) ; Method currentUserMethod = ugiClass . getMethod ( "getCurrentUser" ) ; Method shortUserNameMethod = ugiClass . getMethod ( "getShortUserName" ) ; Object ugi = currentUserMethod . invoke ( null ) ; return ( String ) shortUserNameMethod . invoke ( ugi ) ; } catch ( ClassNotFoundException e ) { return "<no hadoop dependency found>" ; } catch ( LinkageError e ) { LOG . debug ( "Cannot determine user/group information using Hadoop utils. " + "Hadoop classes not loaded or compatible" , e ) ; } catch ( Throwable t ) { LOG . warn ( "Error while accessing user/group information via Hadoop utils." , t ) ; } return UNKNOWN ; } | Gets the name of the user that is running the JVM . |
14,917 | public static long getMaxJvmHeapMemory ( ) { final long maxMemory = Runtime . getRuntime ( ) . maxMemory ( ) ; if ( maxMemory != Long . MAX_VALUE ) { return maxMemory ; } else { final long physicalMemory = Hardware . getSizeOfPhysicalMemory ( ) ; if ( physicalMemory != - 1 ) { return physicalMemory / 4 ; } else { throw new RuntimeException ( "Could not determine the amount of free memory.\n" + "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes." ) ; } } } | The maximum JVM heap size in bytes . |
14,918 | public static void logEnvironmentInfo ( Logger log , String componentName , String [ ] commandLineArgs ) { if ( log . isInfoEnabled ( ) ) { RevisionInformation rev = getRevisionInformation ( ) ; String version = getVersion ( ) ; String jvmVersion = getJvmVersion ( ) ; String [ ] options = getJvmStartupOptionsArray ( ) ; String javaHome = System . getenv ( "JAVA_HOME" ) ; long maxHeapMegabytes = getMaxJvmHeapMemory ( ) >>> 20 ; log . info ( "--------------------------------------------------------------------------------" ) ; log . info ( " Starting " + componentName + " (Version: " + version + ", " + "Rev:" + rev . commitId + ", " + "Date:" + rev . commitDate + ")" ) ; log . info ( " OS current user: " + System . getProperty ( "user.name" ) ) ; log . info ( " Current Hadoop/Kerberos user: " + getHadoopUser ( ) ) ; log . info ( " JVM: " + jvmVersion ) ; log . info ( " Maximum heap size: " + maxHeapMegabytes + " MiBytes" ) ; log . info ( " JAVA_HOME: " + ( javaHome == null ? "(not set)" : javaHome ) ) ; String hadoopVersionString = getHadoopVersionString ( ) ; if ( hadoopVersionString != null ) { log . info ( " Hadoop version: " + hadoopVersionString ) ; } else { log . info ( " No Hadoop Dependency available" ) ; } if ( options . length == 0 ) { log . info ( " JVM Options: (none)" ) ; } else { log . info ( " JVM Options:" ) ; for ( String s : options ) { log . info ( " " + s ) ; } } if ( commandLineArgs == null || commandLineArgs . length == 0 ) { log . info ( " Program Arguments: (none)" ) ; } else { log . info ( " Program Arguments:" ) ; for ( String s : commandLineArgs ) { log . info ( " " + s ) ; } } log . info ( " Classpath: " + System . getProperty ( "java.class.path" ) ) ; log . info ( "--------------------------------------------------------------------------------" ) ; } } | Logs information about the environment like code revision current user Java version and JVM parameters . |
14,919 | public Json schema ( TypeInformation < Row > schemaType ) { Preconditions . checkNotNull ( schemaType ) ; this . schema = TypeStringUtils . writeTypeInfo ( schemaType ) ; this . jsonSchema = null ; this . deriveSchema = null ; return this ; } | Sets the schema using type information . |
14,920 | public void open ( ) { synchronized ( stateLock ) { if ( ! closed ) { throw new IllegalStateException ( "currently not closed." ) ; } closed = false ; } final int partitionFanOut = getPartitioningFanOutNoEstimates ( this . availableMemory . size ( ) ) ; createPartitions ( partitionFanOut ) ; final int numBuckets = getInitialTableSize ( this . availableMemory . size ( ) , this . segmentSize , partitionFanOut , this . avgRecordLen ) ; initTable ( numBuckets , ( byte ) partitionFanOut ) ; } | Initialize the hash table |
14,921 | public void close ( ) { synchronized ( this . stateLock ) { if ( this . closed ) { return ; } this . closed = true ; } LOG . debug ( "Closing hash table and releasing resources." ) ; releaseTable ( ) ; clearPartitions ( ) ; } | Closes the hash table . This effectively releases all internal structures and closes all open files and removes them . The call to this method is valid both as a cleanup after the complete inputs were properly processed and as an cancellation call which cleans up all resources that are currently held by the hash join . If another process still access the hash table after close has been called no operations will be performed . |
14,922 | private long getSize ( ) { long numSegments = 0 ; numSegments += this . availableMemory . size ( ) ; numSegments += this . buckets . length ; for ( InMemoryPartition < T > p : this . partitions ) { numSegments += p . getBlockCount ( ) ; numSegments += p . numOverflowSegments ; } numSegments += this . compactionMemory . getBlockCount ( ) ; return numSegments * this . segmentSize ; } | Size of all memory segments owned by this hash table |
14,923 | private long getPartitionSize ( ) { long numSegments = 0 ; for ( InMemoryPartition < T > p : this . partitions ) { numSegments += p . getBlockCount ( ) ; } return numSegments * this . segmentSize ; } | Size of all memory segments owned by the partitions of this hash table excluding the compaction partition |
14,924 | private void tryDeleteEmptyParentZNodes ( ) throws Exception { String remainingPath = getParentPath ( getNormalizedPath ( client . getNamespace ( ) ) ) ; final CuratorFramework nonNamespaceClient = client . usingNamespace ( null ) ; while ( ! isRootPath ( remainingPath ) ) { try { nonNamespaceClient . delete ( ) . forPath ( remainingPath ) ; } catch ( KeeperException . NotEmptyException ignored ) { break ; } remainingPath = getParentPath ( remainingPath ) ; } } | Tries to delete empty parent znodes . |
14,925 | public int numKeyValueStateEntries ( Object namespace ) { int sum = 0 ; for ( StateTable < ? , ? , ? > state : registeredKVStates . values ( ) ) { sum += state . sizeOfNamespace ( namespace ) ; } return sum ; } | Returns the total number of state entries across all keys for the given namespace . |
14,926 | static void skipSerializedStates ( DataInputView in ) throws IOException { TypeSerializer < String > nameSerializer = StringSerializer . INSTANCE ; TypeSerializer < State . StateType > stateTypeSerializer = new EnumSerializer < > ( State . StateType . class ) ; TypeSerializer < StateTransitionAction > actionSerializer = new EnumSerializer < > ( StateTransitionAction . class ) ; final int noOfStates = in . readInt ( ) ; for ( int i = 0 ; i < noOfStates ; i ++ ) { nameSerializer . deserialize ( in ) ; stateTypeSerializer . deserialize ( in ) ; } for ( int i = 0 ; i < noOfStates ; i ++ ) { String srcName = nameSerializer . deserialize ( in ) ; int noOfTransitions = in . readInt ( ) ; for ( int j = 0 ; j < noOfTransitions ; j ++ ) { String src = nameSerializer . deserialize ( in ) ; Preconditions . checkState ( src . equals ( srcName ) , "Source Edge names do not match (" + srcName + " - " + src + ")." ) ; nameSerializer . deserialize ( in ) ; actionSerializer . deserialize ( in ) ; try { skipCondition ( in ) ; } catch ( ClassNotFoundException e ) { e . printStackTrace ( ) ; } } } } | Skips bytes corresponding to serialized states . In flink 1 . 6 + the states are no longer kept in state . |
14,927 | public static boolean isInFixedLengthPart ( InternalType type ) { if ( type instanceof DecimalType ) { return ( ( DecimalType ) type ) . precision ( ) <= DecimalType . MAX_COMPACT_PRECISION ; } else { return MUTABLE_FIELD_TYPES . contains ( type ) ; } } | If it is a fixed - length field we can call this BinaryRow s setXX method for in - place updates . If it is variable - length field can t use this method because the underlying data is stored continuously . |
14,928 | public boolean anyNull ( ) { if ( ( segments [ 0 ] . getLong ( 0 ) & FIRST_BYTE_ZERO ) != 0 ) { return true ; } for ( int i = 8 ; i < nullBitsSizeInBytes ; i += 8 ) { if ( segments [ 0 ] . getLong ( i ) != 0 ) { return true ; } } return false ; } | The bit is 1 when the field is null . Default is 0 . |
14,929 | private static < R extends JarRequestBody , M extends MessageParameters > List < String > getProgramArgs ( HandlerRequest < R , M > request , Logger log ) throws RestHandlerException { JarRequestBody requestBody = request . getRequestBody ( ) ; @ SuppressWarnings ( "deprecation" ) List < String > programArgs = tokenizeArguments ( fromRequestBodyOrQueryParameter ( emptyToNull ( requestBody . getProgramArguments ( ) ) , ( ) -> getQueryParameter ( request , ProgramArgsQueryParameter . class ) , null , log ) ) ; List < String > programArgsList = fromRequestBodyOrQueryParameter ( requestBody . getProgramArgumentsList ( ) , ( ) -> request . getQueryParameter ( ProgramArgQueryParameter . class ) , null , log ) ; if ( ! programArgsList . isEmpty ( ) ) { if ( ! programArgs . isEmpty ( ) ) { throw new RestHandlerException ( "Confusing request: programArgs and programArgsList are specified, please, use only programArgsList" , HttpResponseStatus . BAD_REQUEST ) ; } return programArgsList ; } else { return programArgs ; } } | Parse program arguments in jar run or plan request . |
14,930 | static List < String > tokenizeArguments ( final String args ) { if ( args == null ) { return Collections . emptyList ( ) ; } final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN . matcher ( args ) ; final List < String > tokens = new ArrayList < > ( ) ; while ( matcher . find ( ) ) { tokens . add ( matcher . group ( ) . trim ( ) . replace ( "\"" , "" ) . replace ( "\'" , "" ) ) ; } return tokens ; } | Takes program arguments as a single string and splits them into a list of string . |
14,931 | public Map < String , OptionalFailure < Accumulator < ? , ? > > > aggregateUserAccumulators ( ) { Map < String , OptionalFailure < Accumulator < ? , ? > > > userAccumulators = new HashMap < > ( ) ; for ( ExecutionVertex vertex : getAllExecutionVertices ( ) ) { Map < String , Accumulator < ? , ? > > next = vertex . getCurrentExecutionAttempt ( ) . getUserAccumulators ( ) ; if ( next != null ) { AccumulatorHelper . mergeInto ( userAccumulators , next ) ; } } return userAccumulators ; } | Merges all accumulator results from the tasks previously executed in the Executions . |
14,932 | public Map < String , SerializedValue < OptionalFailure < Object > > > getAccumulatorsSerialized ( ) { return aggregateUserAccumulators ( ) . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( Map . Entry :: getKey , entry -> serializeAccumulator ( entry . getKey ( ) , entry . getValue ( ) ) ) ) ; } | Gets a serialized accumulator map . |
14,933 | public StringifiedAccumulatorResult [ ] getAccumulatorResultsStringified ( ) { Map < String , OptionalFailure < Accumulator < ? , ? > > > accumulatorMap = aggregateUserAccumulators ( ) ; return StringifiedAccumulatorResult . stringifyAccumulatorResults ( accumulatorMap ) ; } | Returns the a stringified version of the user - defined accumulators . |
14,934 | public void suspend ( Throwable suspensionCause ) { assertRunningInJobMasterMainThread ( ) ; if ( state . isTerminalState ( ) ) { return ; } else if ( transitionState ( state , JobStatus . SUSPENDED , suspensionCause ) ) { initFailureCause ( suspensionCause ) ; incrementGlobalModVersion ( ) ; if ( schedulingFuture != null ) { schedulingFuture . cancel ( false ) ; } final ArrayList < CompletableFuture < Void > > executionJobVertexTerminationFutures = new ArrayList < > ( verticesInCreationOrder . size ( ) ) ; for ( ExecutionJobVertex ejv : verticesInCreationOrder ) { executionJobVertexTerminationFutures . add ( ejv . suspend ( ) ) ; } final ConjunctFuture < Void > jobVerticesTerminationFuture = FutureUtils . waitForAll ( executionJobVertexTerminationFutures ) ; checkState ( jobVerticesTerminationFuture . isDone ( ) , "Suspend needs to happen atomically" ) ; jobVerticesTerminationFuture . whenComplete ( ( Void ignored , Throwable throwable ) -> { if ( throwable != null ) { LOG . debug ( "Could not properly suspend the execution graph." , throwable ) ; } onTerminalState ( state ) ; LOG . info ( "Job {} has been suspended." , getJobID ( ) ) ; } ) ; } else { throw new IllegalStateException ( String . format ( "Could not suspend because transition from %s to %s failed." , state , JobStatus . SUSPENDED ) ) ; } } | Suspends the current ExecutionGraph . |
14,935 | public void failGlobal ( Throwable t ) { assertRunningInJobMasterMainThread ( ) ; while ( true ) { JobStatus current = state ; if ( current == JobStatus . FAILING || current == JobStatus . SUSPENDED || current . isGloballyTerminalState ( ) ) { return ; } else if ( transitionState ( current , JobStatus . FAILING , t ) ) { initFailureCause ( t ) ; final long globalVersionForRestart = incrementGlobalModVersion ( ) ; final CompletableFuture < Void > ongoingSchedulingFuture = schedulingFuture ; if ( ongoingSchedulingFuture != null ) { ongoingSchedulingFuture . cancel ( false ) ; } final ArrayList < CompletableFuture < ? > > futures = new ArrayList < > ( verticesInCreationOrder . size ( ) ) ; for ( ExecutionJobVertex ejv : verticesInCreationOrder ) { futures . add ( ejv . cancelWithFuture ( ) ) ; } final ConjunctFuture < Void > allTerminal = FutureUtils . waitForAll ( futures ) ; allTerminal . whenComplete ( ( Void ignored , Throwable throwable ) -> { if ( throwable != null ) { transitionState ( JobStatus . FAILING , JobStatus . FAILED , new FlinkException ( "Could not cancel all execution job vertices properly." , throwable ) ) ; } else { allVerticesInTerminalState ( globalVersionForRestart ) ; } } ) ; return ; } } } | Fails the execution graph globally . This failure will not be recovered by a specific failover strategy but results in a full restart of all tasks . |
14,936 | public boolean updateState ( TaskExecutionState state ) { assertRunningInJobMasterMainThread ( ) ; final Execution attempt = currentExecutions . get ( state . getID ( ) ) ; if ( attempt != null ) { try { Map < String , Accumulator < ? , ? > > accumulators ; switch ( state . getExecutionState ( ) ) { case RUNNING : return attempt . switchToRunning ( ) ; case FINISHED : accumulators = deserializeAccumulators ( state ) ; attempt . markFinished ( accumulators , state . getIOMetrics ( ) ) ; return true ; case CANCELED : accumulators = deserializeAccumulators ( state ) ; attempt . completeCancelling ( accumulators , state . getIOMetrics ( ) ) ; return true ; case FAILED : accumulators = deserializeAccumulators ( state ) ; attempt . markFailed ( state . getError ( userClassLoader ) , accumulators , state . getIOMetrics ( ) ) ; return true ; default : attempt . fail ( new Exception ( "TaskManager sent illegal state update: " + state . getExecutionState ( ) ) ) ; return false ; } } catch ( Throwable t ) { ExceptionUtils . rethrowIfFatalErrorOrOOM ( t ) ; failGlobal ( t ) ; return false ; } } else { return false ; } } | Updates the state of one of the ExecutionVertex s Execution attempts . If the new status if FINISHED this also updates the accumulators . |
14,937 | private Map < String , Accumulator < ? , ? > > deserializeAccumulators ( TaskExecutionState state ) { AccumulatorSnapshot serializedAccumulators = state . getAccumulators ( ) ; if ( serializedAccumulators != null ) { try { return serializedAccumulators . deserializeUserAccumulators ( userClassLoader ) ; } catch ( Throwable t ) { LOG . error ( "Failed to deserialize final accumulator results." , t ) ; } } return null ; } | Deserializes accumulators from a task state update . |
14,938 | public void scheduleOrUpdateConsumers ( ResultPartitionID partitionId ) throws ExecutionGraphException { assertRunningInJobMasterMainThread ( ) ; final Execution execution = currentExecutions . get ( partitionId . getProducerId ( ) ) ; if ( execution == null ) { throw new ExecutionGraphException ( "Cannot find execution for execution Id " + partitionId . getPartitionId ( ) + '.' ) ; } else if ( execution . getVertex ( ) == null ) { throw new ExecutionGraphException ( "Execution with execution Id " + partitionId . getPartitionId ( ) + " has no vertex assigned." ) ; } else { execution . getVertex ( ) . scheduleOrUpdateConsumers ( partitionId ) ; } } | Schedule or updates consumers of the given result partition . |
14,939 | public void updateAccumulators ( AccumulatorSnapshot accumulatorSnapshot ) { Map < String , Accumulator < ? , ? > > userAccumulators ; try { userAccumulators = accumulatorSnapshot . deserializeUserAccumulators ( userClassLoader ) ; ExecutionAttemptID execID = accumulatorSnapshot . getExecutionAttemptID ( ) ; Execution execution = currentExecutions . get ( execID ) ; if ( execution != null ) { execution . setAccumulators ( userAccumulators ) ; } else { LOG . debug ( "Received accumulator result for unknown execution {}." , execID ) ; } } catch ( Exception e ) { LOG . error ( "Cannot update accumulators for job {}." , getJobID ( ) , e ) ; } } | Updates the accumulators during the runtime of a job . Final accumulator results are transferred through the UpdateTaskExecutionState message . |
14,940 | private Set < AllocationID > computeAllPriorAllocationIds ( ) { HashSet < AllocationID > allPreviousAllocationIds = new HashSet < > ( getNumberOfExecutionJobVertices ( ) ) ; for ( ExecutionVertex executionVertex : getAllExecutionVertices ( ) ) { AllocationID latestPriorAllocation = executionVertex . getLatestPriorAllocation ( ) ; if ( latestPriorAllocation != null ) { allPreviousAllocationIds . add ( latestPriorAllocation ) ; } } return allPreviousAllocationIds ; } | Computes and returns a set with the prior allocation ids from all execution vertices in the graph . |
14,941 | public static void clean ( Object func , boolean checkSerializable ) { if ( func == null ) { return ; } final Class < ? > cls = func . getClass ( ) ; boolean closureAccessed = false ; for ( Field f : cls . getDeclaredFields ( ) ) { if ( f . getName ( ) . startsWith ( "this$" ) ) { closureAccessed |= cleanThis0 ( func , cls , f . getName ( ) ) ; } } if ( checkSerializable ) { try { InstantiationUtil . serializeObject ( func ) ; } catch ( Exception e ) { String functionType = getSuperClassOrInterfaceName ( func . getClass ( ) ) ; String msg = functionType == null ? ( func + " is not serializable." ) : ( "The implementation of the " + functionType + " is not serializable." ) ; if ( closureAccessed ) { msg += " The implementation accesses fields of its enclosing class, which is " + "a common reason for non-serializability. " + "A common solution is to make the function a proper (non-inner) class, or " + "a static inner class." ; } else { msg += " The object probably contains or references non serializable fields." ; } throw new InvalidProgramException ( msg , e ) ; } } } | Tries to clean the closure of the given object if the object is a non - static inner class . |
14,942 | public static < T > void applyToAllWhileSuppressingExceptions ( Iterable < T > inputs , ThrowingConsumer < T , ? extends Exception > throwingConsumer ) throws Exception { if ( inputs != null && throwingConsumer != null ) { Exception exception = null ; for ( T input : inputs ) { if ( input != null ) { try { throwingConsumer . accept ( input ) ; } catch ( Exception ex ) { exception = ExceptionUtils . firstOrSuppressed ( ex , exception ) ; } } } if ( exception != null ) { throw exception ; } } } | This method supplies all elements from the input to the consumer . Exceptions that happen on elements are suppressed until all elements are processed . If exceptions happened for one or more of the inputs they are reported in a combining suppressed exception . |
14,943 | private ExecutorService createQueryExecutor ( ) { ThreadFactory threadFactory = new ThreadFactoryBuilder ( ) . setDaemon ( true ) . setNameFormat ( "Flink " + getServerName ( ) + " Thread %d" ) . build ( ) ; return Executors . newFixedThreadPool ( numQueryThreads , threadFactory ) ; } | Creates a thread pool for the query execution . |
14,944 | private boolean attemptToBind ( final int port ) throws Throwable { log . debug ( "Attempting to start {} on port {}." , serverName , port ) ; this . queryExecutor = createQueryExecutor ( ) ; this . handler = initializeHandler ( ) ; final NettyBufferPool bufferPool = new NettyBufferPool ( numEventLoopThreads ) ; final ThreadFactory threadFactory = new ThreadFactoryBuilder ( ) . setDaemon ( true ) . setNameFormat ( "Flink " + serverName + " EventLoop Thread %d" ) . build ( ) ; final NioEventLoopGroup nioGroup = new NioEventLoopGroup ( numEventLoopThreads , threadFactory ) ; this . bootstrap = new ServerBootstrap ( ) . localAddress ( bindAddress , port ) . group ( nioGroup ) . channel ( NioServerSocketChannel . class ) . option ( ChannelOption . ALLOCATOR , bufferPool ) . childOption ( ChannelOption . ALLOCATOR , bufferPool ) . childHandler ( new ServerChannelInitializer < > ( handler ) ) ; final int defaultHighWaterMark = 64 * 1024 ; if ( LOW_WATER_MARK > defaultHighWaterMark ) { bootstrap . childOption ( ChannelOption . WRITE_BUFFER_HIGH_WATER_MARK , HIGH_WATER_MARK ) ; bootstrap . childOption ( ChannelOption . WRITE_BUFFER_LOW_WATER_MARK , LOW_WATER_MARK ) ; } else { bootstrap . childOption ( ChannelOption . WRITE_BUFFER_LOW_WATER_MARK , LOW_WATER_MARK ) ; bootstrap . childOption ( ChannelOption . WRITE_BUFFER_HIGH_WATER_MARK , HIGH_WATER_MARK ) ; } try { final ChannelFuture future = bootstrap . bind ( ) . sync ( ) ; if ( future . isSuccess ( ) ) { final InetSocketAddress localAddress = ( InetSocketAddress ) future . channel ( ) . localAddress ( ) ; serverAddress = new InetSocketAddress ( localAddress . getAddress ( ) , localAddress . getPort ( ) ) ; return true ; } throw future . cause ( ) ; } catch ( BindException e ) { log . debug ( "Failed to start {} on port {}: {}." , serverName , port , e . getMessage ( ) ) ; try { shutdownServer ( ) . whenComplete ( ( ignoredV , ignoredT ) -> serverShutdownFuture . getAndSet ( null ) ) . get ( ) ; } catch ( Exception r ) { log . warn ( "Problem while shutting down {}: {}" , serverName , r . getMessage ( ) ) ; } } return false ; } | Tries to start the server at the provided port . |
14,945 | public CompletableFuture < Void > shutdownServer ( ) { CompletableFuture < Void > shutdownFuture = new CompletableFuture < > ( ) ; if ( serverShutdownFuture . compareAndSet ( null , shutdownFuture ) ) { log . info ( "Shutting down {} @ {}" , serverName , serverAddress ) ; final CompletableFuture < Void > groupShutdownFuture = new CompletableFuture < > ( ) ; if ( bootstrap != null ) { EventLoopGroup group = bootstrap . group ( ) ; if ( group != null && ! group . isShutdown ( ) ) { group . shutdownGracefully ( 0L , 0L , TimeUnit . MILLISECONDS ) . addListener ( finished -> { if ( finished . isSuccess ( ) ) { groupShutdownFuture . complete ( null ) ; } else { groupShutdownFuture . completeExceptionally ( finished . cause ( ) ) ; } } ) ; } else { groupShutdownFuture . complete ( null ) ; } } else { groupShutdownFuture . complete ( null ) ; } final CompletableFuture < Void > handlerShutdownFuture = new CompletableFuture < > ( ) ; if ( handler == null ) { handlerShutdownFuture . complete ( null ) ; } else { handler . shutdown ( ) . whenComplete ( ( result , throwable ) -> { if ( throwable != null ) { handlerShutdownFuture . completeExceptionally ( throwable ) ; } else { handlerShutdownFuture . complete ( null ) ; } } ) ; } final CompletableFuture < Void > queryExecShutdownFuture = CompletableFuture . runAsync ( ( ) -> { if ( queryExecutor != null ) { ExecutorUtils . gracefulShutdown ( 10L , TimeUnit . MINUTES , queryExecutor ) ; } } ) ; CompletableFuture . allOf ( queryExecShutdownFuture , groupShutdownFuture , handlerShutdownFuture ) . whenComplete ( ( result , throwable ) -> { if ( throwable != null ) { shutdownFuture . completeExceptionally ( throwable ) ; } else { shutdownFuture . complete ( null ) ; } } ) ; } return serverShutdownFuture . get ( ) ; } | Shuts down the server and all related thread pools . |
14,946 | public static MetricQueryService createMetricQueryService ( RpcService rpcService , ResourceID resourceID , long maximumFrameSize ) { String endpointId = resourceID == null ? METRIC_QUERY_SERVICE_NAME : METRIC_QUERY_SERVICE_NAME + "_" + resourceID . getResourceIdString ( ) ; return new MetricQueryService ( rpcService , endpointId , maximumFrameSize ) ; } | Starts the MetricQueryService actor in the given actor system . |
14,947 | public int size ( ) { int ret = anyMethodRouter . size ( ) ; for ( MethodlessRouter < T > router : routers . values ( ) ) { ret += router . size ( ) ; } return ret ; } | Returns the number of routes in this router . |
14,948 | public Router < T > addRoute ( HttpMethod method , String pathPattern , T target ) { getMethodlessRouter ( method ) . addRoute ( pathPattern , target ) ; return this ; } | Add route . |
14,949 | public Set < HttpMethod > allowedMethods ( String uri ) { QueryStringDecoder decoder = new QueryStringDecoder ( uri ) ; String [ ] tokens = PathPattern . removeSlashesAtBothEnds ( decoder . path ( ) ) . split ( "/" ) ; if ( anyMethodRouter . anyMatched ( tokens ) ) { return allAllowedMethods ( ) ; } Set < HttpMethod > ret = new HashSet < HttpMethod > ( routers . size ( ) ) ; for ( Map . Entry < HttpMethod , MethodlessRouter < T > > entry : routers . entrySet ( ) ) { MethodlessRouter < T > router = entry . getValue ( ) ; if ( router . anyMatched ( tokens ) ) { HttpMethod method = entry . getKey ( ) ; ret . add ( method ) ; } } return ret ; } | Returns allowed methods for a specific URI . |
14,950 | public QueryableStateStream < KEY , T > asQueryableState ( String queryableStateName ) { ValueStateDescriptor < T > valueStateDescriptor = new ValueStateDescriptor < T > ( UUID . randomUUID ( ) . toString ( ) , getType ( ) ) ; return asQueryableState ( queryableStateName , valueStateDescriptor ) ; } | Publishes the keyed stream as queryable ValueState instance . |
14,951 | public QueryableStateStream < KEY , T > asQueryableState ( String queryableStateName , ValueStateDescriptor < T > stateDescriptor ) { transform ( "Queryable state: " + queryableStateName , getType ( ) , new QueryableValueStateOperator < > ( queryableStateName , stateDescriptor ) ) ; stateDescriptor . initializeSerializerUnlessSet ( getExecutionConfig ( ) ) ; return new QueryableStateStream < > ( queryableStateName , stateDescriptor , getKeyType ( ) . createSerializer ( getExecutionConfig ( ) ) ) ; } | Publishes the keyed stream as a queryable ValueState instance . |
14,952 | public < ACC > QueryableStateStream < KEY , ACC > asQueryableState ( String queryableStateName , FoldingStateDescriptor < T , ACC > stateDescriptor ) { transform ( "Queryable state: " + queryableStateName , getType ( ) , new QueryableAppendingStateOperator < > ( queryableStateName , stateDescriptor ) ) ; stateDescriptor . initializeSerializerUnlessSet ( getExecutionConfig ( ) ) ; return new QueryableStateStream < > ( queryableStateName , stateDescriptor , getKeyType ( ) . createSerializer ( getExecutionConfig ( ) ) ) ; } | Publishes the keyed stream as a queryable FoldingState instance . |
14,953 | public QueryableStateStream < KEY , T > asQueryableState ( String queryableStateName , ReducingStateDescriptor < T > stateDescriptor ) { transform ( "Queryable state: " + queryableStateName , getType ( ) , new QueryableAppendingStateOperator < > ( queryableStateName , stateDescriptor ) ) ; stateDescriptor . initializeSerializerUnlessSet ( getExecutionConfig ( ) ) ; return new QueryableStateStream < > ( queryableStateName , stateDescriptor , getKeyType ( ) . createSerializer ( getExecutionConfig ( ) ) ) ; } | Publishes the keyed stream as a queryable ReducingState instance . |
14,954 | public static Environment parse ( URL url ) throws IOException { try { return new ConfigUtil . LowerCaseYamlMapper ( ) . readValue ( url , Environment . class ) ; } catch ( JsonMappingException e ) { throw new SqlClientException ( "Could not parse environment file. Cause: " + e . getMessage ( ) ) ; } } | Parses an environment file from an URL . |
14,955 | public static Environment parse ( String content ) throws IOException { try { return new ConfigUtil . LowerCaseYamlMapper ( ) . readValue ( content , Environment . class ) ; } catch ( JsonMappingException e ) { throw new SqlClientException ( "Could not parse environment file. Cause: " + e . getMessage ( ) ) ; } } | Parses an environment file from an String . |
14,956 | public static Environment merge ( Environment env1 , Environment env2 ) { final Environment mergedEnv = new Environment ( ) ; final Map < String , TableEntry > tables = new LinkedHashMap < > ( env1 . getTables ( ) ) ; tables . putAll ( env2 . getTables ( ) ) ; mergedEnv . tables = tables ; final Map < String , FunctionEntry > functions = new HashMap < > ( env1 . getFunctions ( ) ) ; functions . putAll ( env2 . getFunctions ( ) ) ; mergedEnv . functions = functions ; mergedEnv . execution = ExecutionEntry . merge ( env1 . getExecution ( ) , env2 . getExecution ( ) ) ; mergedEnv . deployment = DeploymentEntry . merge ( env1 . getDeployment ( ) , env2 . getDeployment ( ) ) ; return mergedEnv ; } | Merges two environments . The properties of the first environment might be overwritten by the second one . |
14,957 | void add ( long value ) { if ( value >= 0 ) { if ( count > 0 ) { min = Math . min ( min , value ) ; max = Math . max ( max , value ) ; } else { min = value ; max = value ; } count ++ ; sum += value ; } } | Adds the value to the stats if it is > = 0 . |
14,958 | public void clear ( ) { final int arrayOffset = getHeadElementIndex ( ) ; Arrays . fill ( queue , arrayOffset , arrayOffset + size , null ) ; size = 0 ; } | Clears the queue . |
14,959 | private void setMapForKeyGroup ( int keyGroupId , Map < N , Map < K , S > > map ) { try { state [ indexToOffset ( keyGroupId ) ] = map ; } catch ( ArrayIndexOutOfBoundsException e ) { throw new IllegalArgumentException ( "Key group index " + keyGroupId + " is out of range of key group " + "range [" + keyGroupOffset + ", " + ( keyGroupOffset + state . length ) + ")." ) ; } } | Sets the given map for the given key - group . |
14,960 | public void randomEmit ( T record ) throws IOException , InterruptedException { emit ( record , rng . nextInt ( numberOfChannels ) ) ; } | This is used to send LatencyMarks to a random target channel . |
14,961 | private void notifyFlusherException ( Throwable t ) { if ( flusherException == null ) { LOG . error ( "An exception happened while flushing the outputs" , t ) ; flusherException = t ; } } | Notifies the writer that the output flusher thread encountered an exception . |
14,962 | private void handleRpcInvocation ( RpcInvocation rpcInvocation ) { Method rpcMethod = null ; try { String methodName = rpcInvocation . getMethodName ( ) ; Class < ? > [ ] parameterTypes = rpcInvocation . getParameterTypes ( ) ; rpcMethod = lookupRpcMethod ( methodName , parameterTypes ) ; } catch ( ClassNotFoundException e ) { log . error ( "Could not load method arguments." , e ) ; RpcConnectionException rpcException = new RpcConnectionException ( "Could not load method arguments." , e ) ; getSender ( ) . tell ( new Status . Failure ( rpcException ) , getSelf ( ) ) ; } catch ( IOException e ) { log . error ( "Could not deserialize rpc invocation message." , e ) ; RpcConnectionException rpcException = new RpcConnectionException ( "Could not deserialize rpc invocation message." , e ) ; getSender ( ) . tell ( new Status . Failure ( rpcException ) , getSelf ( ) ) ; } catch ( final NoSuchMethodException e ) { log . error ( "Could not find rpc method for rpc invocation." , e ) ; RpcConnectionException rpcException = new RpcConnectionException ( "Could not find rpc method for rpc invocation." , e ) ; getSender ( ) . tell ( new Status . Failure ( rpcException ) , getSelf ( ) ) ; } if ( rpcMethod != null ) { try { rpcMethod . setAccessible ( true ) ; if ( rpcMethod . getReturnType ( ) . equals ( Void . TYPE ) ) { rpcMethod . invoke ( rpcEndpoint , rpcInvocation . getArgs ( ) ) ; } else { final Object result ; try { result = rpcMethod . invoke ( rpcEndpoint , rpcInvocation . getArgs ( ) ) ; } catch ( InvocationTargetException e ) { log . debug ( "Reporting back error thrown in remote procedure {}" , rpcMethod , e ) ; getSender ( ) . tell ( new Status . Failure ( e . getTargetException ( ) ) , getSelf ( ) ) ; return ; } final String methodName = rpcMethod . getName ( ) ; if ( result instanceof CompletableFuture ) { final CompletableFuture < ? > responseFuture = ( CompletableFuture < ? > ) result ; sendAsyncResponse ( responseFuture , methodName ) ; } else { sendSyncResponse ( result , methodName ) ; } } } catch ( Throwable e ) { log . error ( "Error while executing remote procedure call {}." , rpcMethod , e ) ; getSender ( ) . tell ( new Status . Failure ( e ) , getSelf ( ) ) ; } } } | Handle rpc invocations by looking up the rpc method on the rpc endpoint and calling this method with the provided method arguments . If the method has a return value it is returned to the sender of the call . |
14,963 | protected void sendErrorIfSender ( Throwable throwable ) { if ( ! getSender ( ) . equals ( ActorRef . noSender ( ) ) ) { getSender ( ) . tell ( new Status . Failure ( throwable ) , getSelf ( ) ) ; } } | Send throwable to sender if the sender is specified . |
14,964 | private void stop ( RpcEndpointTerminationResult rpcEndpointTerminationResult ) { if ( rpcEndpointStopped . compareAndSet ( false , true ) ) { this . rpcEndpointTerminationResult = rpcEndpointTerminationResult ; getContext ( ) . stop ( getSelf ( ) ) ; } } | Stop the actor immediately . |
14,965 | private static void printCustomCliOptions ( Collection < CustomCommandLine < ? > > customCommandLines , HelpFormatter formatter , boolean runOptions ) { for ( CustomCommandLine cli : customCommandLines ) { formatter . setSyntaxPrefix ( " Options for " + cli . getId ( ) + " mode:" ) ; Options customOpts = new Options ( ) ; cli . addGeneralOptions ( customOpts ) ; if ( runOptions ) { cli . addRunOptions ( customOpts ) ; } formatter . printHelp ( " " , customOpts ) ; System . out . println ( ) ; } } | Prints custom cli options . |
14,966 | public static void deleteFileOrDirectory ( File file ) throws IOException { checkNotNull ( file , "file" ) ; guardIfWindows ( FileUtils :: deleteFileOrDirectoryInternal , file ) ; } | Removes the given file or directory recursively . |
14,967 | public static void deleteDirectory ( File directory ) throws IOException { checkNotNull ( directory , "directory" ) ; guardIfWindows ( FileUtils :: deleteDirectoryInternal , directory ) ; } | Deletes the given directory recursively . |
14,968 | public static void cleanDirectory ( File directory ) throws IOException { checkNotNull ( directory , "directory" ) ; guardIfWindows ( FileUtils :: cleanDirectoryInternal , directory ) ; } | Removes all files contained within a directory without removing the directory itself . |
14,969 | public static void copy ( Path sourcePath , Path targetPath , boolean executable ) throws IOException { FileSystem sFS = FileSystem . getUnguardedFileSystem ( sourcePath . toUri ( ) ) ; FileSystem tFS = FileSystem . getUnguardedFileSystem ( targetPath . toUri ( ) ) ; if ( ! tFS . exists ( targetPath ) ) { if ( sFS . getFileStatus ( sourcePath ) . isDir ( ) ) { internalCopyDirectory ( sourcePath , targetPath , executable , sFS , tFS ) ; } else { internalCopyFile ( sourcePath , targetPath , executable , sFS , tFS ) ; } } } | Copies all files from source to target and sets executable flag . Paths might be on different systems . |
14,970 | public void releasePayload ( Throwable cause ) { final Payload payload = payloadReference . get ( ) ; if ( payload != null ) { payload . release ( cause ) ; payloadReference . set ( null ) ; } } | Triggers the release of the assigned payload . If the payload could be released then it is removed from the slot . |
14,971 | public static Protos . Environment . Variable variable ( String name , String value ) { checkNotNull ( name ) ; return Protos . Environment . Variable . newBuilder ( ) . setName ( name ) . setValue ( value ) . build ( ) ; } | Construct a Mesos environment variable . |
14,972 | public static List < Protos . Resource > resources ( Protos . Resource ... resources ) { checkNotNull ( resources ) ; return Arrays . asList ( resources ) ; } | Construct a list of resources . |
14,973 | public static Protos . Resource scalar ( String name , String role , double value ) { checkNotNull ( name ) ; checkNotNull ( role ) ; checkNotNull ( value ) ; return Protos . Resource . newBuilder ( ) . setName ( name ) . setType ( Protos . Value . Type . SCALAR ) . setScalar ( Protos . Value . Scalar . newBuilder ( ) . setValue ( value ) ) . setRole ( role ) . build ( ) ; } | Construct a scalar resource . |
14,974 | public static Protos . Value . Range range ( long begin , long end ) { return Protos . Value . Range . newBuilder ( ) . setBegin ( begin ) . setEnd ( end ) . build ( ) ; } | Construct a range value . |
14,975 | public static Protos . Resource ranges ( String name , String role , Protos . Value . Range ... ranges ) { checkNotNull ( name ) ; checkNotNull ( role ) ; checkNotNull ( ranges ) ; return Protos . Resource . newBuilder ( ) . setName ( name ) . setType ( Protos . Value . Type . RANGES ) . setRanges ( Protos . Value . Ranges . newBuilder ( ) . addAllRange ( Arrays . asList ( ranges ) ) . build ( ) ) . setRole ( role ) . build ( ) ; } | Construct a range resource . |
14,976 | public static LongStream rangeValues ( Collection < Protos . Resource > resources ) { checkNotNull ( resources ) ; return resources . stream ( ) . filter ( Protos . Resource :: hasRanges ) . flatMap ( r -> r . getRanges ( ) . getRangeList ( ) . stream ( ) ) . flatMapToLong ( Utils :: rangeValues ) ; } | Gets a stream of values from a collection of range resources . |
14,977 | public static LongStream rangeValues ( Protos . Value . Range range ) { checkNotNull ( range ) ; return LongStream . rangeClosed ( range . getBegin ( ) , range . getEnd ( ) ) ; } | Gets a stream of values from a range . |
14,978 | public Optional < TypeInformation < ? > > getFieldType ( int fieldIndex ) { if ( fieldIndex < 0 || fieldIndex >= fieldTypes . length ) { return Optional . empty ( ) ; } return Optional . of ( fieldTypes [ fieldIndex ] ) ; } | Returns the specified type information for the given field index . |
14,979 | public Optional < TypeInformation < ? > > getFieldType ( String fieldName ) { if ( fieldNameToIndex . containsKey ( fieldName ) ) { return Optional . of ( fieldTypes [ fieldNameToIndex . get ( fieldName ) ] ) ; } return Optional . empty ( ) ; } | Returns the specified type information for the given field name . |
14,980 | public Optional < String > getFieldName ( int fieldIndex ) { if ( fieldIndex < 0 || fieldIndex >= fieldNames . length ) { return Optional . empty ( ) ; } return Optional . of ( fieldNames [ fieldIndex ] ) ; } | Returns the specified name for the given field index . |
14,981 | public void deploy ( ) throws JobException { assertRunningInJobMasterMainThread ( ) ; final LogicalSlot slot = assignedResource ; checkNotNull ( slot , "In order to deploy the execution we first have to assign a resource via tryAssignResource." ) ; if ( ! slot . isAlive ( ) ) { throw new JobException ( "Target slot (TaskManager) for deployment is no longer alive." ) ; } ExecutionState previous = this . state ; if ( previous == SCHEDULED || previous == CREATED ) { if ( ! transitionState ( previous , DEPLOYING ) ) { throw new IllegalStateException ( "Cannot deploy task: Concurrent deployment call race." ) ; } } else { throw new IllegalStateException ( "The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous ) ; } if ( this != slot . getPayload ( ) ) { throw new IllegalStateException ( String . format ( "The execution %s has not been assigned to the assigned slot." , this ) ) ; } try { if ( this . state != DEPLOYING ) { slot . releaseSlot ( new FlinkException ( "Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING." ) ) ; return ; } if ( LOG . isInfoEnabled ( ) ) { LOG . info ( String . format ( "Deploying %s (attempt #%d) to %s" , vertex . getTaskNameWithSubtaskIndex ( ) , attemptNumber , getAssignedResourceLocation ( ) ) ) ; } final TaskDeploymentDescriptor deployment = vertex . createDeploymentDescriptor ( attemptId , slot , taskRestore , attemptNumber ) ; taskRestore = null ; final TaskManagerGateway taskManagerGateway = slot . getTaskManagerGateway ( ) ; final ComponentMainThreadExecutor jobMasterMainThreadExecutor = vertex . getExecutionGraph ( ) . getJobMasterMainThreadExecutor ( ) ; CompletableFuture . supplyAsync ( ( ) -> taskManagerGateway . submitTask ( deployment , rpcTimeout ) , executor ) . thenCompose ( Function . identity ( ) ) . whenCompleteAsync ( ( ack , failure ) -> { if ( failure != null ) { if ( failure instanceof TimeoutException ) { String taskname = vertex . getTaskNameWithSubtaskIndex ( ) + " (" + attemptId + ')' ; markFailed ( new Exception ( "Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation ( ) + ") not responding after a rpcTimeout of " + rpcTimeout , failure ) ) ; } else { markFailed ( failure ) ; } } } , jobMasterMainThreadExecutor ) ; } catch ( Throwable t ) { markFailed ( t ) ; ExceptionUtils . rethrow ( t ) ; } } | Deploys the execution to the previously assigned resource . |
14,982 | public CompletableFuture < StackTraceSampleResponse > requestStackTraceSample ( int sampleId , int numSamples , Time delayBetweenSamples , int maxStackTraceDepth , Time timeout ) { final LogicalSlot slot = assignedResource ; if ( slot != null ) { final TaskManagerGateway taskManagerGateway = slot . getTaskManagerGateway ( ) ; return taskManagerGateway . requestStackTraceSample ( attemptId , sampleId , numSamples , delayBetweenSamples , maxStackTraceDepth , timeout ) ; } else { return FutureUtils . completedExceptionally ( new Exception ( "The execution has no slot assigned." ) ) ; } } | Request a stack trace sample from the task of this execution . |
14,983 | public void notifyCheckpointComplete ( long checkpointId , long timestamp ) { final LogicalSlot slot = assignedResource ; if ( slot != null ) { final TaskManagerGateway taskManagerGateway = slot . getTaskManagerGateway ( ) ; taskManagerGateway . notifyCheckpointComplete ( attemptId , getVertex ( ) . getJobId ( ) , checkpointId , timestamp ) ; } else { LOG . debug ( "The execution has no slot assigned. This indicates that the execution is " + "no longer running." ) ; } } | Notify the task of this execution about a completed checkpoint . |
14,984 | private void sendCancelRpcCall ( int numberRetries ) { final LogicalSlot slot = assignedResource ; if ( slot != null ) { final TaskManagerGateway taskManagerGateway = slot . getTaskManagerGateway ( ) ; final ComponentMainThreadExecutor jobMasterMainThreadExecutor = getVertex ( ) . getExecutionGraph ( ) . getJobMasterMainThreadExecutor ( ) ; CompletableFuture < Acknowledge > cancelResultFuture = FutureUtils . retry ( ( ) -> taskManagerGateway . cancelTask ( attemptId , rpcTimeout ) , numberRetries , jobMasterMainThreadExecutor ) ; cancelResultFuture . whenComplete ( ( ack , failure ) -> { if ( failure != null ) { fail ( new Exception ( "Task could not be canceled." , failure ) ) ; } } ) ; } } | This method sends a CancelTask message to the instance of the assigned slot . |
14,985 | private void sendUpdatePartitionInfoRpcCall ( final Iterable < PartitionInfo > partitionInfos ) { final LogicalSlot slot = assignedResource ; if ( slot != null ) { final TaskManagerGateway taskManagerGateway = slot . getTaskManagerGateway ( ) ; final TaskManagerLocation taskManagerLocation = slot . getTaskManagerLocation ( ) ; CompletableFuture < Acknowledge > updatePartitionsResultFuture = taskManagerGateway . updatePartitions ( attemptId , partitionInfos , rpcTimeout ) ; updatePartitionsResultFuture . whenCompleteAsync ( ( ack , failure ) -> { if ( failure != null ) { fail ( new IllegalStateException ( "Update task on TaskManager " + taskManagerLocation + " failed due to:" , failure ) ) ; } } , getVertex ( ) . getExecutionGraph ( ) . getJobMasterMainThreadExecutor ( ) ) ; } } | Update the partition infos on the assigned resource . |
14,986 | public CompletableFuture < Collection < TaskManagerLocation > > calculatePreferredLocations ( LocationPreferenceConstraint locationPreferenceConstraint ) { final Collection < CompletableFuture < TaskManagerLocation > > preferredLocationFutures = getVertex ( ) . getPreferredLocations ( ) ; final CompletableFuture < Collection < TaskManagerLocation > > preferredLocationsFuture ; switch ( locationPreferenceConstraint ) { case ALL : preferredLocationsFuture = FutureUtils . combineAll ( preferredLocationFutures ) ; break ; case ANY : final ArrayList < TaskManagerLocation > completedTaskManagerLocations = new ArrayList < > ( preferredLocationFutures . size ( ) ) ; for ( CompletableFuture < TaskManagerLocation > preferredLocationFuture : preferredLocationFutures ) { if ( preferredLocationFuture . isDone ( ) && ! preferredLocationFuture . isCompletedExceptionally ( ) ) { final TaskManagerLocation taskManagerLocation = preferredLocationFuture . getNow ( null ) ; if ( taskManagerLocation == null ) { throw new FlinkRuntimeException ( "TaskManagerLocationFuture was completed with null. This indicates a programming bug." ) ; } completedTaskManagerLocations . add ( taskManagerLocation ) ; } } preferredLocationsFuture = CompletableFuture . completedFuture ( completedTaskManagerLocations ) ; break ; default : throw new RuntimeException ( "Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.' ) ; } return preferredLocationsFuture ; } | Calculates the preferred locations based on the location preference constraint . |
14,987 | public static < T > void writeVersionAndSerialize ( SimpleVersionedSerializer < T > serializer , T datum , DataOutputView out ) throws IOException { checkNotNull ( serializer , "serializer" ) ; checkNotNull ( datum , "datum" ) ; checkNotNull ( out , "out" ) ; final byte [ ] data = serializer . serialize ( datum ) ; out . writeInt ( serializer . getVersion ( ) ) ; out . writeInt ( data . length ) ; out . write ( data ) ; } | Serializes the version and datum into a stream . |
14,988 | public static < T > T readVersionAndDeSerialize ( SimpleVersionedSerializer < T > serializer , DataInputView in ) throws IOException { checkNotNull ( serializer , "serializer" ) ; checkNotNull ( in , "in" ) ; final int version = in . readInt ( ) ; final int length = in . readInt ( ) ; final byte [ ] data = new byte [ length ] ; in . readFully ( data ) ; return serializer . deserialize ( version , data ) ; } | Deserializes the version and datum from a stream . |
14,989 | public boolean cleanup ( ) throws IOException { return ! state . compareAndSet ( State . ONGOING , State . DELETED ) || fileSystem . delete ( directory , true ) ; } | Calling this method will attempt delete the underlying snapshot directory recursively if the state is ongoing . In this case the state will be set to deleted as a result of this call . |
14,990 | void initializeState ( final ListState < byte [ ] > bucketStates , final ListState < Long > partCounterState ) throws Exception { initializePartCounter ( partCounterState ) ; LOG . info ( "Subtask {} initializing its state (max part counter={})." , subtaskIndex , maxPartCounter ) ; initializeActiveBuckets ( bucketStates ) ; } | Initializes the state after recovery from a failure . |
14,991 | private boolean shouldRoll ( ) throws IOException { boolean shouldRoll = false ; int subtaskIndex = getRuntimeContext ( ) . getIndexOfThisSubtask ( ) ; if ( ! isWriterOpen ) { shouldRoll = true ; LOG . debug ( "RollingSink {} starting new initial bucket. " , subtaskIndex ) ; } if ( bucketer . shouldStartNewBucket ( new Path ( basePath ) , currentBucketDirectory ) ) { shouldRoll = true ; LOG . debug ( "RollingSink {} starting new bucket because {} said we should. " , subtaskIndex , bucketer ) ; partCounter = 0 ; } if ( isWriterOpen ) { long writePosition = writer . getPos ( ) ; if ( isWriterOpen && writePosition > batchSize ) { shouldRoll = true ; LOG . debug ( "RollingSink {} starting new bucket because file position {} is above batch size {}." , subtaskIndex , writePosition , batchSize ) ; } } return shouldRoll ; } | Determines whether we should change the bucket file we are writing to . |
14,992 | private void openNewPartFile ( ) throws Exception { closeCurrentPartFile ( ) ; Path newBucketDirectory = bucketer . getNextBucketPath ( new Path ( basePath ) ) ; if ( ! newBucketDirectory . equals ( currentBucketDirectory ) ) { currentBucketDirectory = newBucketDirectory ; try { if ( fs . mkdirs ( currentBucketDirectory ) ) { LOG . debug ( "Created new bucket directory: {}" , currentBucketDirectory ) ; } } catch ( IOException e ) { throw new RuntimeException ( "Could not create base path for new rolling file." , e ) ; } } int subtaskIndex = getRuntimeContext ( ) . getIndexOfThisSubtask ( ) ; currentPartPath = new Path ( currentBucketDirectory , partPrefix + "-" + subtaskIndex + "-" + partCounter ) ; while ( fs . exists ( currentPartPath ) || fs . exists ( getPendingPathFor ( currentPartPath ) ) || fs . exists ( getInProgressPathFor ( currentPartPath ) ) ) { partCounter ++ ; currentPartPath = new Path ( currentBucketDirectory , partPrefix + "-" + subtaskIndex + "-" + partCounter ) ; } partCounter ++ ; LOG . debug ( "Next part path is {}" , currentPartPath . toString ( ) ) ; Path inProgressPath = getInProgressPathFor ( currentPartPath ) ; writer . open ( fs , inProgressPath ) ; isWriterOpen = true ; } | Opens a new part file . |
14,993 | private void closeCurrentPartFile ( ) throws Exception { if ( isWriterOpen ) { writer . close ( ) ; isWriterOpen = false ; } if ( currentPartPath != null ) { Path inProgressPath = getInProgressPathFor ( currentPartPath ) ; Path pendingPath = getPendingPathFor ( currentPartPath ) ; fs . rename ( inProgressPath , pendingPath ) ; LOG . debug ( "Moving in-progress bucket {} to pending file {}" , inProgressPath , pendingPath ) ; this . bucketState . pendingFiles . add ( currentPartPath . toString ( ) ) ; } } | Closes the current part file . |
14,994 | public GridGraph addDimension ( long size , boolean wrapEndpoints ) { Preconditions . checkArgument ( size >= 2 , "Dimension size must be at least 2" ) ; vertexCount = Math . multiplyExact ( vertexCount , size ) ; if ( size == 2 ) { wrapEndpoints = false ; } dimensions . add ( new Tuple2 < > ( size , wrapEndpoints ) ) ; return this ; } | Required configuration for each dimension of the graph . |
14,995 | public < T > TypeSerializer < T > getRestoredNestedSerializer ( int pos ) { checkArgument ( pos < nestedSnapshots . length ) ; @ SuppressWarnings ( "unchecked" ) TypeSerializerSnapshot < T > snapshot = ( TypeSerializerSnapshot < T > ) nestedSnapshots [ pos ] ; return snapshot . restoreSerializer ( ) ; } | Creates the restore serializer from the pos - th config snapshot . |
14,996 | public < T > TypeSerializerSchemaCompatibility < T > resolveCompatibilityWithNested ( TypeSerializerSchemaCompatibility < ? > outerCompatibility , TypeSerializer < ? > ... newNestedSerializers ) { checkArgument ( newNestedSerializers . length == nestedSnapshots . length , "Different number of new serializers and existing serializer configuration snapshots" ) ; if ( outerCompatibility . isIncompatible ( ) ) { return TypeSerializerSchemaCompatibility . incompatible ( ) ; } boolean nestedSerializerRequiresMigration = false ; for ( int i = 0 ; i < nestedSnapshots . length ; i ++ ) { TypeSerializerSchemaCompatibility < ? > compatibility = resolveCompatibility ( newNestedSerializers [ i ] , nestedSnapshots [ i ] ) ; if ( compatibility . isIncompatible ( ) ) { return TypeSerializerSchemaCompatibility . incompatible ( ) ; } if ( compatibility . isCompatibleAfterMigration ( ) ) { nestedSerializerRequiresMigration = true ; } } return ( nestedSerializerRequiresMigration || ! outerCompatibility . isCompatibleAsIs ( ) ) ? TypeSerializerSchemaCompatibility . compatibleAfterMigration ( ) : TypeSerializerSchemaCompatibility . compatibleAsIs ( ) ; } | Resolves the compatibility of the nested serializer snapshots with the nested serializers of the new outer serializer . |
14,997 | public final void writeNestedSerializerSnapshots ( DataOutputView out ) throws IOException { out . writeInt ( MAGIC_NUMBER ) ; out . writeInt ( VERSION ) ; out . writeInt ( nestedSnapshots . length ) ; for ( TypeSerializerSnapshot < ? > snap : nestedSnapshots ) { TypeSerializerSnapshot . writeVersionedSnapshot ( out , snap ) ; } } | Writes the composite snapshot of all the contained serializers . |
14,998 | public static NestedSerializersSnapshotDelegate readNestedSerializerSnapshots ( DataInputView in , ClassLoader cl ) throws IOException { final int magicNumber = in . readInt ( ) ; if ( magicNumber != MAGIC_NUMBER ) { throw new IOException ( String . format ( "Corrupt data, magic number mismatch. Expected %8x, found %8x" , MAGIC_NUMBER , magicNumber ) ) ; } final int version = in . readInt ( ) ; if ( version != VERSION ) { throw new IOException ( "Unrecognized version: " + version ) ; } final int numSnapshots = in . readInt ( ) ; final TypeSerializerSnapshot < ? > [ ] nestedSnapshots = new TypeSerializerSnapshot < ? > [ numSnapshots ] ; for ( int i = 0 ; i < numSnapshots ; i ++ ) { nestedSnapshots [ i ] = TypeSerializerSnapshot . readVersionedSnapshot ( in , cl ) ; } return new NestedSerializersSnapshotDelegate ( nestedSnapshots ) ; } | Reads the composite snapshot of all the contained serializers . |
14,999 | @ SuppressWarnings ( "unchecked" ) private static < E > TypeSerializerSchemaCompatibility < E > resolveCompatibility ( TypeSerializer < ? > serializer , TypeSerializerSnapshot < ? > snapshot ) { TypeSerializer < E > typedSerializer = ( TypeSerializer < E > ) serializer ; TypeSerializerSnapshot < E > typedSnapshot = ( TypeSerializerSnapshot < E > ) snapshot ; return typedSnapshot . resolveSchemaCompatibility ( typedSerializer ) ; } | Utility method to conjure up a new scope for the generic parameters . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.