idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,500 | public void open ( Context < K , W > ctx ) throws Exception { this . ctx = ctx ; this . windowAssigner . open ( ctx ) ; } | Initialization method for the function . It is called before the actual working methods . |
14,501 | public SingleOutputStreamOperator < T > sum ( int positionToSum ) { return aggregate ( new SumAggregator < > ( positionToSum , input . getType ( ) , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that sums every window of the data stream at the given position . |
14,502 | public SingleOutputStreamOperator < T > sum ( String field ) { return aggregate ( new SumAggregator < > ( field , input . getType ( ) , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that sums every window of the pojo data stream at the given field for every window . |
14,503 | public SingleOutputStreamOperator < T > min ( int positionToMin ) { return aggregate ( new ComparableAggregator < > ( positionToMin , input . getType ( ) , AggregationFunction . AggregationType . MIN , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that that gives the minimum value of every window of the data stream at the given position . |
14,504 | public SingleOutputStreamOperator < T > min ( String field ) { return aggregate ( new ComparableAggregator < > ( field , input . getType ( ) , AggregationFunction . AggregationType . MIN , false , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that that gives the minimum value of the pojo data stream at the given field expression for every window . |
14,505 | public SingleOutputStreamOperator < T > max ( int positionToMax ) { return aggregate ( new ComparableAggregator < > ( positionToMax , input . getType ( ) , AggregationFunction . AggregationType . MAX , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that gives the maximum value of every window of the data stream at the given position . |
14,506 | public SortedGrouping < T > withPartitioner ( Partitioner < ? > partitioner ) { Preconditions . checkNotNull ( partitioner ) ; getKeys ( ) . validateCustomPartitioner ( partitioner , null ) ; this . customPartitioner = partitioner ; return this ; } | Uses a custom partitioner for the grouping . |
14,507 | public static byte [ ] encodeUTF8 ( String str ) { byte [ ] bytes = allocateReuseBytes ( str . length ( ) * MAX_BYTES_PER_CHAR ) ; int len = encodeUTF8 ( str , bytes ) ; return Arrays . copyOf ( bytes , len ) ; } | This method must have the same result with JDK s String . getBytes . |
14,508 | public MethodlessRouter < T > addRoute ( String pathPattern , T target ) { PathPattern p = new PathPattern ( pathPattern ) ; if ( routes . containsKey ( p ) ) { return this ; } routes . put ( p , target ) ; return this ; } | This method does nothing if the path pattern has already been added . A path pattern can only point to one target . |
14,509 | public boolean anyMatched ( String [ ] requestPathTokens ) { Map < String , String > pathParams = new HashMap < > ( ) ; for ( PathPattern pattern : routes . keySet ( ) ) { if ( pattern . match ( requestPathTokens , pathParams ) ) { return true ; } pathParams . clear ( ) ; } return false ; } | Checks if there s any matching route . |
14,510 | private void initialize ( String scheme , String authority , String path ) { try { this . uri = new URI ( scheme , authority , normalizePath ( path ) , null , null ) . normalize ( ) ; } catch ( URISyntaxException e ) { throw new IllegalArgumentException ( e ) ; } } | Initializes a path object given the scheme authority and path string . |
14,511 | public boolean isAbsolute ( ) { final int start = hasWindowsDrive ( uri . getPath ( ) , true ) ? 3 : 0 ; return uri . getPath ( ) . startsWith ( SEPARATOR , start ) ; } | Checks if the directory of this path is absolute . |
14,512 | public int depth ( ) { String path = uri . getPath ( ) ; int depth = 0 ; int slash = path . length ( ) == 1 && path . charAt ( 0 ) == '/' ? - 1 : 0 ; while ( slash != - 1 ) { depth ++ ; slash = path . indexOf ( SEPARATOR , slash + 1 ) ; } return depth ; } | Returns the number of elements in this path . |
14,513 | public Avro recordClass ( Class < ? extends SpecificRecord > recordClass ) { Preconditions . checkNotNull ( recordClass ) ; this . recordClass = recordClass ; return this ; } | Sets the class of the Avro specific record . |
14,514 | public static void readFully ( final InputStream in , final byte [ ] buf , int off , final int len ) throws IOException { int toRead = len ; while ( toRead > 0 ) { final int ret = in . read ( buf , off , toRead ) ; if ( ret < 0 ) { throw new IOException ( "Premeture EOF from inputStream" ) ; } toRead -= ret ; off += ret ; } } | Reads len bytes in a loop . |
14,515 | private static String getAlgorithmsListing ( ) { StrBuilder strBuilder = new StrBuilder ( ) ; strBuilder . appendNewLine ( ) . appendln ( "Select an algorithm to view usage: flink run examples/flink-gelly-examples_<version>.jar --algorithm <algorithm>" ) . appendNewLine ( ) . appendln ( "Available algorithms:" ) ; for ( Driver algorithm : driverFactory ) { strBuilder . append ( " " ) . appendFixedWidthPadRight ( algorithm . getName ( ) , 30 , ' ' ) . append ( algorithm . getShortDescription ( ) ) . appendNewLine ( ) ; } return strBuilder . toString ( ) ; } | List available algorithms . This is displayed to the user when no valid algorithm is given in the program parameterization . |
14,516 | private static String getAlgorithmUsage ( String algorithmName ) { StrBuilder strBuilder = new StrBuilder ( ) ; Driver algorithm = driverFactory . get ( algorithmName ) ; strBuilder . appendNewLine ( ) . appendNewLine ( ) . appendln ( algorithm . getLongDescription ( ) ) . appendNewLine ( ) . append ( "usage: flink run examples/flink-gelly-examples_<version>.jar --algorithm " ) . append ( algorithmName ) . append ( " [algorithm options] --input <input> [input options] --output <output> [output options]" ) . appendNewLine ( ) . appendNewLine ( ) . appendln ( "Available inputs:" ) ; for ( Input input : inputFactory ) { strBuilder . append ( " --input " ) . append ( input . getName ( ) ) . append ( " " ) . appendln ( input . getUsage ( ) ) ; } String algorithmParameterization = algorithm . getUsage ( ) ; if ( algorithmParameterization . length ( ) > 0 ) { strBuilder . appendNewLine ( ) . appendln ( "Algorithm configuration:" ) . append ( " " ) . appendln ( algorithm . getUsage ( ) ) ; } strBuilder . appendNewLine ( ) . appendln ( "Available outputs:" ) ; for ( Output output : outputFactory ) { strBuilder . append ( " --output " ) . append ( output . getName ( ) ) . append ( " " ) . appendln ( output . getUsage ( ) ) ; } return strBuilder . appendNewLine ( ) . toString ( ) ; } | Display the usage for the given algorithm . This includes options for all compatible inputs the selected algorithm and outputs implemented by the selected algorithm . |
14,517 | private void execute ( ) throws Exception { if ( result == null ) { env . execute ( executionName ) ; } else { output . write ( executionName . toString ( ) , System . out , result ) ; } System . out . println ( ) ; algorithm . printAnalytics ( System . out ) ; if ( jobDetailsPath . getValue ( ) != null ) { writeJobDetails ( env , jobDetailsPath . getValue ( ) ) ; } } | Execute the Flink job . |
14,518 | protected void startThreads ( ) { if ( this . readThread != null ) { this . readThread . start ( ) ; } if ( this . sortThread != null ) { this . sortThread . start ( ) ; } if ( this . spillThread != null ) { this . spillThread . start ( ) ; } } | Starts all the threads that are used by this sort - merger . |
14,519 | protected final void setResultIteratorException ( IOException ioex ) { synchronized ( this . iteratorLock ) { if ( this . iteratorException == null ) { this . iteratorException = ioex ; this . iteratorLock . notifyAll ( ) ; } } } | Reports an exception to all threads that are waiting for the result iterator . |
14,520 | protected static < T > CircularElement < T > endMarker ( ) { @ SuppressWarnings ( "unchecked" ) CircularElement < T > c = ( CircularElement < T > ) EOF_MARKER ; return c ; } | Gets the element that is passed as marker for the end of data . |
14,521 | protected static < T > CircularElement < T > spillingMarker ( ) { @ SuppressWarnings ( "unchecked" ) CircularElement < T > c = ( CircularElement < T > ) SPILLING_MARKER ; return c ; } | Gets the element that is passed as marker for signal beginning of spilling . |
14,522 | public void createResource ( ) throws Exception { cluster = builder . getCluster ( ) ; session = cluster . connect ( ) ; session . execute ( String . format ( "CREATE KEYSPACE IF NOT EXISTS %s with replication={'class':'SimpleStrategy', 'replication_factor':1};" , keySpace ) ) ; session . execute ( String . format ( "CREATE TABLE IF NOT EXISTS %s.%s (sink_id text, sub_id int, checkpoint_id bigint, PRIMARY KEY (sink_id, sub_id));" , keySpace , table ) ) ; try { session . close ( ) ; } catch ( Exception e ) { LOG . error ( "Error while closing session." , e ) ; } try { cluster . close ( ) ; } catch ( Exception e ) { LOG . error ( "Error while closing cluster." , e ) ; } } | Generates the necessary tables to store information . |
14,523 | public void jarDir ( File dirOrFile2Jar , File destJar ) throws IOException { if ( dirOrFile2Jar == null || destJar == null ) { throw new IllegalArgumentException ( ) ; } mDestJarName = destJar . getCanonicalPath ( ) ; FileOutputStream fout = new FileOutputStream ( destJar ) ; JarOutputStream jout = new JarOutputStream ( fout ) ; try { jarDir ( dirOrFile2Jar , jout , null ) ; } catch ( IOException ioe ) { throw ioe ; } finally { jout . close ( ) ; fout . close ( ) ; } } | Jars a given directory or single file into a JarOutputStream . |
14,524 | public void unjarDir ( File jarFile , File destDir ) throws IOException { BufferedOutputStream dest = null ; FileInputStream fis = new FileInputStream ( jarFile ) ; unjar ( fis , destDir ) ; } | Unjars a given jar file into a given directory . |
14,525 | public void unjar ( InputStream in , File destDir ) throws IOException { BufferedOutputStream dest = null ; JarInputStream jis = new JarInputStream ( in ) ; JarEntry entry ; while ( ( entry = jis . getNextJarEntry ( ) ) != null ) { if ( entry . isDirectory ( ) ) { File dir = new File ( destDir , entry . getName ( ) ) ; dir . mkdir ( ) ; if ( entry . getTime ( ) != - 1 ) { dir . setLastModified ( entry . getTime ( ) ) ; } continue ; } int count ; byte [ ] data = new byte [ BUFFER_SIZE ] ; File destFile = new File ( destDir , entry . getName ( ) ) ; if ( mVerbose ) { System . out . println ( "unjarring " + destFile + " from " + entry . getName ( ) ) ; } FileOutputStream fos = new FileOutputStream ( destFile ) ; dest = new BufferedOutputStream ( fos , BUFFER_SIZE ) ; try { while ( ( count = jis . read ( data , 0 , BUFFER_SIZE ) ) != - 1 ) { dest . write ( data , 0 , count ) ; } dest . flush ( ) ; } finally { dest . close ( ) ; } if ( entry . getTime ( ) != - 1 ) { destFile . setLastModified ( entry . getTime ( ) ) ; } } jis . close ( ) ; } | Given an InputStream on a jar file unjars the contents into the given directory . |
14,526 | private void jarDir ( File dirOrFile2jar , JarOutputStream jos , String path ) throws IOException { if ( mVerbose ) { System . out . println ( "checking " + dirOrFile2jar ) ; } if ( dirOrFile2jar . isDirectory ( ) ) { String [ ] dirList = dirOrFile2jar . list ( ) ; String subPath = ( path == null ) ? "" : ( path + dirOrFile2jar . getName ( ) + SEP ) ; if ( path != null ) { JarEntry je = new JarEntry ( subPath ) ; je . setTime ( dirOrFile2jar . lastModified ( ) ) ; jos . putNextEntry ( je ) ; jos . flush ( ) ; jos . closeEntry ( ) ; } for ( int i = 0 ; i < dirList . length ; i ++ ) { File f = new File ( dirOrFile2jar , dirList [ i ] ) ; jarDir ( f , jos , subPath ) ; } } else if ( dirOrFile2jar . exists ( ) ) { if ( dirOrFile2jar . getCanonicalPath ( ) . equals ( mDestJarName ) ) { if ( mVerbose ) { System . out . println ( "skipping " + dirOrFile2jar . getPath ( ) ) ; } return ; } if ( mVerbose ) { System . out . println ( "adding " + dirOrFile2jar . getPath ( ) ) ; } FileInputStream fis = new FileInputStream ( dirOrFile2jar ) ; try { JarEntry entry = new JarEntry ( path + dirOrFile2jar . getName ( ) ) ; entry . setTime ( dirOrFile2jar . lastModified ( ) ) ; jos . putNextEntry ( entry ) ; while ( ( mByteCount = fis . read ( mBuffer ) ) != - 1 ) { jos . write ( mBuffer , 0 , mByteCount ) ; if ( mVerbose ) { System . out . println ( "wrote " + mByteCount + " bytes" ) ; } } jos . flush ( ) ; jos . closeEntry ( ) ; } catch ( IOException ioe ) { throw ioe ; } finally { fis . close ( ) ; } } } | Recursively jars up the given path under the given directory . |
14,527 | private boolean ensureBatch ( ) throws IOException { if ( nextRow >= rowsInBatch ) { nextRow = 0 ; boolean moreRows = orcRowsReader . nextBatch ( rowBatch ) ; if ( moreRows ) { rowsInBatch = fillRows ( rows , schema , rowBatch , selectedFields ) ; } return moreRows ; } return true ; } | Checks if there is at least one row left in the batch to return . If no more row are available it reads another batch of rows . |
14,528 | public JobSubmissionResult run ( JobWithJars jobWithJars , int parallelism , SavepointRestoreSettings savepointSettings ) throws CompilerException , ProgramInvocationException { ClassLoader classLoader = jobWithJars . getUserCodeClassLoader ( ) ; if ( classLoader == null ) { throw new IllegalArgumentException ( "The given JobWithJars does not provide a usercode class loader." ) ; } OptimizedPlan optPlan = getOptimizedPlan ( compiler , jobWithJars , parallelism ) ; return run ( optPlan , jobWithJars . getJarFiles ( ) , jobWithJars . getClasspaths ( ) , classLoader , savepointSettings ) ; } | Runs a program on the Flink cluster to which this client is connected . The call blocks until the execution is complete and returns afterwards . |
14,529 | public Map < String , OptionalFailure < Object > > getAccumulators ( JobID jobID ) throws Exception { return getAccumulators ( jobID , ClassLoader . getSystemClassLoader ( ) ) ; } | Requests and returns the accumulators for the given job identifier . Accumulators can be requested while a is running or after it has finished . The default class loader is used to deserialize the incoming accumulator results . |
14,530 | private static OptimizedPlan getOptimizedPlan ( Optimizer compiler , JobWithJars prog , int parallelism ) throws CompilerException , ProgramInvocationException { return getOptimizedPlan ( compiler , prog . getPlan ( ) , parallelism ) ; } | Creates the optimized plan for a given program using this client s compiler . |
14,531 | public static String generateRuntimeName ( Class < ? > clazz , String [ ] fields ) { return TableConnectorUtils . generateRuntimeName ( clazz , fields ) ; } | Returns the table connector name used for log and web UI . |
14,532 | public ChannelFuture requestSubpartition ( final ResultPartitionID partitionId , final int subpartitionIndex , final RemoteInputChannel inputChannel , int delayMs ) throws IOException { checkNotClosed ( ) ; LOG . debug ( "Requesting subpartition {} of partition {} with {} ms delay." , subpartitionIndex , partitionId , delayMs ) ; clientHandler . addInputChannel ( inputChannel ) ; final PartitionRequest request = new PartitionRequest ( partitionId , subpartitionIndex , inputChannel . getInputChannelId ( ) , inputChannel . getInitialCredit ( ) ) ; final ChannelFutureListener listener = new ChannelFutureListener ( ) { public void operationComplete ( ChannelFuture future ) throws Exception { if ( ! future . isSuccess ( ) ) { clientHandler . removeInputChannel ( inputChannel ) ; SocketAddress remoteAddr = future . channel ( ) . remoteAddress ( ) ; inputChannel . onError ( new LocalTransportException ( String . format ( "Sending the partition request to '%s' failed." , remoteAddr ) , future . channel ( ) . localAddress ( ) , future . cause ( ) ) ) ; } } } ; if ( delayMs == 0 ) { ChannelFuture f = tcpChannel . writeAndFlush ( request ) ; f . addListener ( listener ) ; return f ; } else { final ChannelFuture [ ] f = new ChannelFuture [ 1 ] ; tcpChannel . eventLoop ( ) . schedule ( new Runnable ( ) { public void run ( ) { f [ 0 ] = tcpChannel . writeAndFlush ( request ) ; f [ 0 ] . addListener ( listener ) ; } } , delayMs , TimeUnit . MILLISECONDS ) ; return f [ 0 ] ; } } | Requests a remote intermediate result partition queue . |
14,533 | public void selectFields ( String [ ] fieldNames ) { checkNotNull ( fieldNames , "fieldNames" ) ; this . fieldNames = fieldNames ; RowTypeInfo rowTypeInfo = ( RowTypeInfo ) ParquetSchemaConverter . fromParquetType ( expectedFileSchema ) ; TypeInformation [ ] selectFieldTypes = new TypeInformation [ fieldNames . length ] ; for ( int i = 0 ; i < fieldNames . length ; i ++ ) { try { selectFieldTypes [ i ] = rowTypeInfo . getTypeAt ( fieldNames [ i ] ) ; } catch ( IndexOutOfBoundsException e ) { throw new IllegalArgumentException ( String . format ( "Fail to access Field %s , " + "which is not contained in the file schema" , fieldNames [ i ] ) , e ) ; } } this . fieldTypes = selectFieldTypes ; } | Configures the fields to be read and returned by the ParquetInputFormat . Selected fields must be present in the configured schema . |
14,534 | private MessageType getReadSchema ( MessageType fileSchema , Path filePath ) { RowTypeInfo fileTypeInfo = ( RowTypeInfo ) ParquetSchemaConverter . fromParquetType ( fileSchema ) ; List < Type > types = new ArrayList < > ( ) ; for ( int i = 0 ; i < fieldNames . length ; ++ i ) { String readFieldName = fieldNames [ i ] ; TypeInformation < ? > readFieldType = fieldTypes [ i ] ; if ( fileTypeInfo . getFieldIndex ( readFieldName ) < 0 ) { if ( ! skipWrongSchemaFileSplit ) { throw new IllegalArgumentException ( "Field " + readFieldName + " cannot be found in schema of " + " Parquet file: " + filePath + "." ) ; } else { this . skipThisSplit = true ; return fileSchema ; } } if ( ! readFieldType . equals ( fileTypeInfo . getTypeAt ( readFieldName ) ) ) { if ( ! skipWrongSchemaFileSplit ) { throw new IllegalArgumentException ( "Expecting type " + readFieldType + " for field " + readFieldName + " but found type " + fileTypeInfo . getTypeAt ( readFieldName ) + " in Parquet file: " + filePath + "." ) ; } else { this . skipThisSplit = true ; return fileSchema ; } } types . add ( fileSchema . getType ( readFieldName ) ) ; } return new MessageType ( fileSchema . getName ( ) , types ) ; } | Generates and returns the read schema based on the projected fields for a given file . |
14,535 | private long calExpirationTime ( long operatorTime , long relativeSize ) { if ( operatorTime < Long . MAX_VALUE ) { return operatorTime - relativeSize - allowedLateness - 1 ; } else { return Long . MAX_VALUE ; } } | Calculate the expiration time with the given operator time and relative window size . |
14,536 | private void registerCleanUpTimer ( Context ctx , long rowTime , boolean leftRow ) throws IOException { if ( leftRow ) { long cleanUpTime = rowTime + leftRelativeSize + minCleanUpInterval + allowedLateness + 1 ; registerTimer ( ctx , cleanUpTime ) ; rightTimerState . update ( cleanUpTime ) ; } else { long cleanUpTime = rowTime + rightRelativeSize + minCleanUpInterval + allowedLateness + 1 ; registerTimer ( ctx , cleanUpTime ) ; leftTimerState . update ( cleanUpTime ) ; } } | Register a timer for cleaning up rows in a specified time . |
14,537 | private void removeExpiredRows ( Collector < BaseRow > collector , long expirationTime , MapState < Long , List < Tuple2 < BaseRow , Boolean > > > rowCache , ValueState < Long > timerState , OnTimerContext ctx , boolean removeLeft ) throws Exception { Iterator < Map . Entry < Long , List < Tuple2 < BaseRow , Boolean > > > > iterator = rowCache . iterator ( ) ; long earliestTimestamp = - 1L ; while ( iterator . hasNext ( ) ) { Map . Entry < Long , List < Tuple2 < BaseRow , Boolean > > > entry = iterator . next ( ) ; Long rowTime = entry . getKey ( ) ; if ( rowTime <= expirationTime ) { if ( removeLeft && joinType . isLeftOuter ( ) ) { List < Tuple2 < BaseRow , Boolean > > rows = entry . getValue ( ) ; rows . forEach ( ( Tuple2 < BaseRow , Boolean > tuple ) -> { if ( ! tuple . f1 ) { collector . collect ( paddingUtil . padLeft ( tuple . f0 ) ) ; } } ) ; } else if ( ! removeLeft && joinType . isRightOuter ( ) ) { List < Tuple2 < BaseRow , Boolean > > rows = entry . getValue ( ) ; rows . forEach ( ( Tuple2 < BaseRow , Boolean > tuple ) -> { if ( ! tuple . f1 ) { collector . collect ( paddingUtil . padRight ( tuple . f0 ) ) ; } } ) ; } iterator . remove ( ) ; } else { if ( rowTime < earliestTimestamp || earliestTimestamp < 0 ) { earliestTimestamp = rowTime ; } } } if ( earliestTimestamp > 0 ) { registerCleanUpTimer ( ctx , earliestTimestamp , removeLeft ) ; } else { timerState . clear ( ) ; rowCache . clear ( ) ; } } | Remove the expired rows . Register a new timer if the cache still holds valid rows after the cleaning up . |
14,538 | public < T > DynamicResult < T > createResult ( Environment env , TableSchema schema , ExecutionConfig config ) { final RowTypeInfo outputType = new RowTypeInfo ( schema . getFieldTypes ( ) , schema . getFieldNames ( ) ) ; if ( env . getExecution ( ) . isStreamingExecution ( ) ) { final InetAddress gatewayAddress = getGatewayAddress ( env . getDeployment ( ) ) ; final int gatewayPort = getGatewayPort ( env . getDeployment ( ) ) ; if ( env . getExecution ( ) . isChangelogMode ( ) ) { return new ChangelogCollectStreamResult < > ( outputType , config , gatewayAddress , gatewayPort ) ; } else { return new MaterializedCollectStreamResult < > ( outputType , config , gatewayAddress , gatewayPort , env . getExecution ( ) . getMaxTableResultRows ( ) ) ; } } else { if ( ! env . getExecution ( ) . isTableMode ( ) ) { throw new SqlExecutionException ( "Results of batch queries can only be served in table mode." ) ; } return new MaterializedCollectBatchResult < > ( outputType , config ) ; } } | Creates a result . Might start threads or opens sockets so every created result must be closed . |
14,539 | public void lazyDestroy ( ) { synchronized ( availableMemorySegments ) { if ( ! isDestroyed ) { MemorySegment segment ; while ( ( segment = availableMemorySegments . poll ( ) ) != null ) { returnMemorySegment ( segment ) ; } BufferListener listener ; while ( ( listener = registeredListeners . poll ( ) ) != null ) { listener . notifyBufferDestroyed ( ) ; } isDestroyed = true ; } } try { networkBufferPool . destroyBufferPool ( this ) ; } catch ( IOException e ) { ExceptionUtils . rethrow ( e ) ; } } | Destroy is called after the produce or consume phase of a task finishes . |
14,540 | public static < KT , KB , VVT , VVB , EV > BipartiteGraph < KT , KB , VVT , VVB , EV > fromDataSet ( DataSet < Vertex < KT , VVT > > topVertices , DataSet < Vertex < KB , VVB > > bottomVertices , DataSet < BipartiteEdge < KT , KB , EV > > edges , ExecutionEnvironment context ) { return new BipartiteGraph < > ( topVertices , bottomVertices , edges , context ) ; } | Create bipartite graph from datasets . |
14,541 | public Graph < KT , VVT , Tuple2 < EV , EV > > projectionTopSimple ( ) { DataSet < Edge < KT , Tuple2 < EV , EV > > > newEdges = edges . join ( edges ) . where ( 1 ) . equalTo ( 1 ) . with ( new ProjectionTopSimple < > ( ) ) . name ( "Simple top projection" ) ; return Graph . fromDataSet ( topVertices , newEdges , context ) ; } | Convert a bipartite graph into an undirected graph that contains only top vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains a bottom vertex they are both connected to . |
14,542 | public Graph < KB , VVB , Tuple2 < EV , EV > > projectionBottomSimple ( ) { DataSet < Edge < KB , Tuple2 < EV , EV > > > newEdges = edges . join ( edges ) . where ( 0 ) . equalTo ( 0 ) . with ( new ProjectionBottomSimple < > ( ) ) . name ( "Simple bottom projection" ) ; return Graph . fromDataSet ( bottomVertices , newEdges , context ) ; } | Convert a bipartite graph into an undirected graph that contains only bottom vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains a top vertex they are both connected to . |
14,543 | public Graph < KT , VVT , Projection < KB , VVB , VVT , EV > > projectionTopFull ( ) { DataSet < Tuple5 < KT , KB , EV , VVT , VVB > > edgesWithVertices = joinEdgeWithVertices ( ) ; DataSet < Edge < KT , Projection < KB , VVB , VVT , EV > > > newEdges = edgesWithVertices . join ( edgesWithVertices ) . where ( 1 ) . equalTo ( 1 ) . with ( new ProjectionTopFull < > ( ) ) . name ( "Full top projection" ) ; return Graph . fromDataSet ( topVertices , newEdges , context ) ; } | Convert a bipartite graph into a graph that contains only top vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains at least one bottom vertex they both connect to . |
14,544 | public Graph < KB , VVB , Projection < KT , VVT , VVB , EV > > projectionBottomFull ( ) { DataSet < Tuple5 < KT , KB , EV , VVT , VVB > > edgesWithVertices = joinEdgeWithVertices ( ) ; DataSet < Edge < KB , Projection < KT , VVT , VVB , EV > > > newEdges = edgesWithVertices . join ( edgesWithVertices ) . where ( 0 ) . equalTo ( 0 ) . with ( new ProjectionBottomFull < > ( ) ) . name ( "Full bottom projection" ) ; return Graph . fromDataSet ( bottomVertices , newEdges , context ) ; } | Convert a bipartite graph into a graph that contains only bottom vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains at least one top vertex they both connect to . |
14,545 | public JobExecutionResult execute ( String jobName ) throws Exception { StreamGraph streamGraph = getStreamGraph ( ) ; streamGraph . setJobName ( jobName ) ; JobGraph jobGraph = streamGraph . getJobGraph ( ) ; jobGraph . setAllowQueuedScheduling ( true ) ; Configuration configuration = new Configuration ( ) ; configuration . addAll ( jobGraph . getJobConfiguration ( ) ) ; configuration . setString ( TaskManagerOptions . MANAGED_MEMORY_SIZE , "0" ) ; configuration . addAll ( this . configuration ) ; if ( ! configuration . contains ( RestOptions . BIND_PORT ) ) { configuration . setString ( RestOptions . BIND_PORT , "0" ) ; } int numSlotsPerTaskManager = configuration . getInteger ( TaskManagerOptions . NUM_TASK_SLOTS , jobGraph . getMaximumParallelism ( ) ) ; MiniClusterConfiguration cfg = new MiniClusterConfiguration . Builder ( ) . setConfiguration ( configuration ) . setNumSlotsPerTaskManager ( numSlotsPerTaskManager ) . build ( ) ; if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Running job on local embedded Flink mini cluster" ) ; } MiniCluster miniCluster = new MiniCluster ( cfg ) ; try { miniCluster . start ( ) ; configuration . setInteger ( RestOptions . PORT , miniCluster . getRestAddress ( ) . get ( ) . getPort ( ) ) ; return miniCluster . executeJobBlocking ( jobGraph ) ; } finally { transformations . clear ( ) ; miniCluster . close ( ) ; } } | Executes the JobGraph of the on a mini cluster of CLusterUtil with a user specified name . |
14,546 | public static KvStateService fromConfiguration ( TaskManagerServicesConfiguration taskManagerServicesConfiguration ) { KvStateRegistry kvStateRegistry = new KvStateRegistry ( ) ; QueryableStateConfiguration qsConfig = taskManagerServicesConfiguration . getQueryableStateConfig ( ) ; KvStateClientProxy kvClientProxy = null ; KvStateServer kvStateServer = null ; if ( qsConfig != null ) { int numProxyServerNetworkThreads = qsConfig . numProxyServerThreads ( ) == 0 ? taskManagerServicesConfiguration . getNumberOfSlots ( ) : qsConfig . numProxyServerThreads ( ) ; int numProxyServerQueryThreads = qsConfig . numProxyQueryThreads ( ) == 0 ? taskManagerServicesConfiguration . getNumberOfSlots ( ) : qsConfig . numProxyQueryThreads ( ) ; kvClientProxy = QueryableStateUtils . createKvStateClientProxy ( taskManagerServicesConfiguration . getTaskManagerAddress ( ) , qsConfig . getProxyPortRange ( ) , numProxyServerNetworkThreads , numProxyServerQueryThreads , new DisabledKvStateRequestStats ( ) ) ; int numStateServerNetworkThreads = qsConfig . numStateServerThreads ( ) == 0 ? taskManagerServicesConfiguration . getNumberOfSlots ( ) : qsConfig . numStateServerThreads ( ) ; int numStateServerQueryThreads = qsConfig . numStateQueryThreads ( ) == 0 ? taskManagerServicesConfiguration . getNumberOfSlots ( ) : qsConfig . numStateQueryThreads ( ) ; kvStateServer = QueryableStateUtils . createKvStateServer ( taskManagerServicesConfiguration . getTaskManagerAddress ( ) , qsConfig . getStateServerPortRange ( ) , numStateServerNetworkThreads , numStateServerQueryThreads , kvStateRegistry , new DisabledKvStateRequestStats ( ) ) ; } return new KvStateService ( kvStateRegistry , kvStateServer , kvClientProxy ) ; } | Creates and returns the KvState service . |
14,547 | public CompensatedSum add ( CompensatedSum other ) { double correctedSum = other . value ( ) + ( delta + other . delta ( ) ) ; double updatedValue = value + correctedSum ; double updatedDelta = correctedSum - ( updatedValue - value ) ; return new CompensatedSum ( updatedValue , updatedDelta ) ; } | Increments the Kahan sum by adding two sums and updating the correction term for reducing numeric errors . |
14,548 | public SortPartitionOperator < T > sortPartition ( int field , Order order ) { if ( useKeySelector ) { throw new InvalidProgramException ( "Expression keys cannot be appended after a KeySelector" ) ; } ensureSortableKey ( field ) ; keys . add ( new Keys . ExpressionKeys < > ( field , getType ( ) ) ) ; orders . add ( order ) ; return this ; } | Appends an additional sort order with the specified field in the specified order to the local partition sorting of the DataSet . |
14,549 | public Option < Long > getNumberOfAllocatedBytes ( ) throws NoSuchFieldException , IllegalAccessException { if ( directArenas != null ) { long numChunks = 0 ; for ( Object arena : directArenas ) { numChunks += getNumberOfAllocatedChunks ( arena , "qInit" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q000" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q025" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q050" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q075" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q100" ) ; } long allocatedBytes = numChunks * chunkSize ; return Option . apply ( allocatedBytes ) ; } else { return Option . empty ( ) ; } } | Returns the number of currently allocated bytes . |
14,550 | private long getNumberOfAllocatedChunks ( Object arena , String chunkListFieldName ) throws NoSuchFieldException , IllegalAccessException { Field chunkListField = arena . getClass ( ) . getSuperclass ( ) . getDeclaredField ( chunkListFieldName ) ; chunkListField . setAccessible ( true ) ; Object chunkList = chunkListField . get ( arena ) ; Field headChunkField = chunkList . getClass ( ) . getDeclaredField ( "head" ) ; headChunkField . setAccessible ( true ) ; Object headChunk = headChunkField . get ( chunkList ) ; if ( headChunk == null ) { return 0 ; } else { int numChunks = 0 ; Object current = headChunk ; while ( current != null ) { Field nextChunkField = headChunk . getClass ( ) . getDeclaredField ( "next" ) ; nextChunkField . setAccessible ( true ) ; current = nextChunkField . get ( current ) ; numChunks ++ ; } return numChunks ; } } | Returns the number of allocated bytes of the given arena and chunk list . |
14,551 | private void validateKeyTypes ( int [ ] keyFieldIndices ) { final TypeInformation < ? > [ ] types = getFieldTypes ( ) ; for ( int keyFieldIndex : keyFieldIndices ) { final TypeInformation < ? > type = types [ keyFieldIndex ] ; if ( ! TypeCheckUtils . isSimpleStringRepresentation ( type ) ) { throw new ValidationException ( "Only simple types that can be safely converted into a string representation " + "can be used as keys. But was: " + type ) ; } } } | Validate the types that are used for conversion to string . |
14,552 | protected void addPathRecursively ( final File sourcePath , final Path targetPath , final ContainerSpecification env ) throws IOException { final java . nio . file . Path sourceRoot = sourcePath . toPath ( ) . getParent ( ) ; Files . walkFileTree ( sourcePath . toPath ( ) , new SimpleFileVisitor < java . nio . file . Path > ( ) { public FileVisitResult visitFile ( java . nio . file . Path file , BasicFileAttributes attrs ) throws IOException { java . nio . file . Path relativePath = sourceRoot . relativize ( file ) ; ContainerSpecification . Artifact . Builder artifact = ContainerSpecification . Artifact . newBuilder ( ) . setSource ( new Path ( file . toUri ( ) ) ) . setDest ( new Path ( targetPath , relativePath . toString ( ) ) ) . setExecutable ( Files . isExecutable ( file ) ) . setCachable ( true ) . setExtract ( false ) ; env . getArtifacts ( ) . add ( artifact . build ( ) ) ; return super . visitFile ( file , attrs ) ; } } ) ; } | Add a path recursively to the container specification . |
14,553 | public void subscribeToEvent ( ResultPartitionID partitionId , EventListener < TaskEvent > eventListener , Class < ? extends TaskEvent > eventType ) { checkNotNull ( partitionId ) ; checkNotNull ( eventListener ) ; checkNotNull ( eventType ) ; TaskEventHandler taskEventHandler ; synchronized ( registeredHandlers ) { taskEventHandler = registeredHandlers . get ( partitionId ) ; } if ( taskEventHandler == null ) { throw new IllegalStateException ( "Partition " + partitionId + " not registered at task event dispatcher." ) ; } taskEventHandler . subscribe ( eventListener , eventType ) ; } | Subscribes a listener to this dispatcher for events on a partition . |
14,554 | public static PyObject adapt ( Object o ) { if ( o instanceof PyObject ) { return ( PyObject ) o ; } return Py . java2py ( o ) ; } | Convert java object to its corresponding PyObject representation . |
14,555 | public static void reset ( final Collection < MasterTriggerRestoreHook < ? > > hooks , final Logger log ) throws FlinkException { for ( MasterTriggerRestoreHook < ? > hook : hooks ) { final String id = hook . getIdentifier ( ) ; try { hook . reset ( ) ; } catch ( Throwable t ) { ExceptionUtils . rethrowIfFatalErrorOrOOM ( t ) ; throw new FlinkException ( "Error while resetting checkpoint master hook '" + id + '\'' , t ) ; } } } | Resets the master hooks . |
14,556 | public static void close ( final Collection < MasterTriggerRestoreHook < ? > > hooks , final Logger log ) throws FlinkException { for ( MasterTriggerRestoreHook < ? > hook : hooks ) { try { hook . close ( ) ; } catch ( Throwable t ) { log . warn ( "Failed to cleanly close a checkpoint master hook (" + hook . getIdentifier ( ) + ")" , t ) ; } } } | Closes the master hooks . |
14,557 | public static List < MasterState > triggerMasterHooks ( Collection < MasterTriggerRestoreHook < ? > > hooks , long checkpointId , long timestamp , Executor executor , Time timeout ) throws FlinkException { final ArrayList < MasterState > states = new ArrayList < > ( hooks . size ( ) ) ; for ( MasterTriggerRestoreHook < ? > hook : hooks ) { MasterState state = triggerHook ( hook , checkpointId , timestamp , executor , timeout ) ; if ( state != null ) { states . add ( state ) ; } } states . trimToSize ( ) ; return states ; } | Triggers all given master hooks and returns state objects for each hook that produced a state . |
14,558 | public static void restoreMasterHooks ( final Map < String , MasterTriggerRestoreHook < ? > > masterHooks , final Collection < MasterState > states , final long checkpointId , final boolean allowUnmatchedState , final Logger log ) throws FlinkException { if ( states == null || states . isEmpty ( ) || masterHooks == null || masterHooks . isEmpty ( ) ) { log . info ( "No master state to restore" ) ; return ; } log . info ( "Calling master restore hooks" ) ; final LinkedHashMap < String , MasterTriggerRestoreHook < ? > > allHooks = new LinkedHashMap < > ( masterHooks ) ; final ArrayList < Tuple2 < MasterTriggerRestoreHook < ? > , Object > > hooksAndStates = new ArrayList < > ( ) ; for ( MasterState state : states ) { if ( state != null ) { final String name = state . name ( ) ; final MasterTriggerRestoreHook < ? > hook = allHooks . remove ( name ) ; if ( hook != null ) { log . debug ( "Found state to restore for hook '{}'" , name ) ; Object deserializedState = deserializeState ( state , hook ) ; hooksAndStates . add ( new Tuple2 < > ( hook , deserializedState ) ) ; } else if ( ! allowUnmatchedState ) { throw new IllegalStateException ( "Found state '" + state . name ( ) + "' which is not resumed by any hook." ) ; } else { log . info ( "Dropping unmatched state from '{}'" , name ) ; } } } for ( Tuple2 < MasterTriggerRestoreHook < ? > , Object > hookAndState : hooksAndStates ) { restoreHook ( hookAndState . f1 , hookAndState . f0 , checkpointId ) ; } for ( MasterTriggerRestoreHook < ? > hook : allHooks . values ( ) ) { restoreHook ( null , hook , checkpointId ) ; } } | Calls the restore method given checkpoint master hooks and passes the given master state to them where state with a matching name is found . |
14,559 | public static < T > MasterTriggerRestoreHook < T > wrapHook ( MasterTriggerRestoreHook < T > hook , ClassLoader userClassLoader ) { return new WrappedMasterHook < > ( hook , userClassLoader ) ; } | Wraps a hook such that the user - code classloader is applied when the hook is invoked . |
14,560 | @ SuppressWarnings ( "unchecked" ) public static < X > X deserializeFunction ( RuntimeContext context , byte [ ] serFun ) throws FlinkException { if ( ! jythonInitialized ) { String path = context . getDistributedCache ( ) . getFile ( PythonConstants . FLINK_PYTHON_DC_ID ) . getAbsolutePath ( ) ; String scriptName = PythonStreamExecutionEnvironment . PythonJobParameters . getScriptName ( context . getExecutionConfig ( ) . getGlobalJobParameters ( ) ) ; try { initPythonInterpreter ( new String [ ] { Paths . get ( path , scriptName ) . toString ( ) } , path , scriptName ) ; } catch ( Exception e ) { try { LOG . error ( "Initialization of jython failed." , e ) ; throw new FlinkRuntimeException ( "Initialization of jython failed." , e ) ; } catch ( Exception ie ) { LOG . error ( "Initialization of jython failed. Could not print original stacktrace." , ie ) ; throw new FlinkRuntimeException ( "Initialization of jython failed. Could not print original stacktrace." ) ; } } } try { return ( X ) SerializationUtils . deserializeObject ( serFun ) ; } catch ( IOException | ClassNotFoundException ex ) { throw new FlinkException ( "Deserialization of user-function failed." , ex ) ; } } | Deserialize the given python function . If the functions class definition cannot be found we assume that this is the first invocation of this method for a given job and load the python script containing the class definition via jython . |
14,561 | public static void initAndExecPythonScript ( PythonEnvironmentFactory factory , java . nio . file . Path scriptDirectory , String scriptName , String [ ] args ) { String [ ] fullArgs = new String [ args . length + 1 ] ; fullArgs [ 0 ] = scriptDirectory . resolve ( scriptName ) . toString ( ) ; System . arraycopy ( args , 0 , fullArgs , 1 , args . length ) ; PythonInterpreter pythonInterpreter = initPythonInterpreter ( fullArgs , scriptDirectory . toUri ( ) . getPath ( ) , scriptName ) ; pythonInterpreter . set ( "__flink_env_factory__" , factory ) ; pythonInterpreter . exec ( scriptName + ".main(__flink_env_factory__)" ) ; } | Initializes the Jython interpreter and executes a python script . |
14,562 | private static void setRequiredProperties ( Properties zkProps ) { if ( zkProps . getProperty ( "clientPort" ) == null ) { zkProps . setProperty ( "clientPort" , String . valueOf ( DEFAULT_ZOOKEEPER_CLIENT_PORT ) ) ; LOG . warn ( "No 'clientPort' configured. Set to '{}'." , DEFAULT_ZOOKEEPER_CLIENT_PORT ) ; } if ( zkProps . getProperty ( "initLimit" ) == null ) { zkProps . setProperty ( "initLimit" , String . valueOf ( DEFAULT_ZOOKEEPER_INIT_LIMIT ) ) ; LOG . warn ( "No 'initLimit' configured. Set to '{}'." , DEFAULT_ZOOKEEPER_INIT_LIMIT ) ; } if ( zkProps . getProperty ( "syncLimit" ) == null ) { zkProps . setProperty ( "syncLimit" , String . valueOf ( DEFAULT_ZOOKEEPER_SYNC_LIMIT ) ) ; LOG . warn ( "No 'syncLimit' configured. Set to '{}'." , DEFAULT_ZOOKEEPER_SYNC_LIMIT ) ; } if ( zkProps . getProperty ( "dataDir" ) == null ) { String dataDir = String . format ( "%s/%s/zookeeper" , System . getProperty ( "java.io.tmpdir" ) , UUID . randomUUID ( ) . toString ( ) ) ; zkProps . setProperty ( "dataDir" , dataDir ) ; LOG . warn ( "No 'dataDir' configured. Set to '{}'." , dataDir ) ; } int peerPort = DEFAULT_ZOOKEEPER_PEER_PORT ; int leaderPort = DEFAULT_ZOOKEEPER_LEADER_PORT ; for ( Map . Entry < Object , Object > entry : zkProps . entrySet ( ) ) { String key = ( String ) entry . getKey ( ) ; if ( entry . getKey ( ) . toString ( ) . startsWith ( "server." ) ) { String value = ( String ) entry . getValue ( ) ; String [ ] parts = value . split ( ":" ) ; if ( parts . length == 1 ) { String address = String . format ( "%s:%d:%d" , parts [ 0 ] , peerPort , leaderPort ) ; zkProps . setProperty ( key , address ) ; LOG . info ( "Set peer and leader port of '{}': '{}' => '{}'." , key , value , address ) ; } else if ( parts . length == 2 ) { String address = String . format ( "%s:%d:%d" , parts [ 0 ] , Integer . valueOf ( parts [ 1 ] ) , leaderPort ) ; zkProps . setProperty ( key , address ) ; LOG . info ( "Set peer port of '{}': '{}' => '{}'." , key , value , address ) ; } } } } | Sets required properties to reasonable defaults and logs it . |
14,563 | private static void writeMyIdToDataDir ( Properties zkProps , int id ) throws IOException { if ( zkProps . getProperty ( "dataDir" ) == null ) { throw new IllegalConfigurationException ( "No dataDir configured." ) ; } File dataDir = new File ( zkProps . getProperty ( "dataDir" ) ) ; if ( ! dataDir . isDirectory ( ) && ! dataDir . mkdirs ( ) ) { throw new IOException ( "Cannot create dataDir '" + dataDir + "'." ) ; } dataDir . deleteOnExit ( ) ; LOG . info ( "Writing {} to myid file in 'dataDir'." , id ) ; try ( FileWriter writer = new FileWriter ( new File ( dataDir , "myid" ) ) ) { writer . write ( String . valueOf ( id ) ) ; } } | Write myid file to the dataDir in the given ZooKeeper configuration . |
14,564 | public static String generateRuntimeName ( Class < ? > clazz , String [ ] fields ) { String className = clazz . getSimpleName ( ) ; if ( null == fields ) { return className + "(*)" ; } else { return className + "(" + String . join ( ", " , fields ) + ")" ; } } | Returns the table connector name used for logging and web UI . |
14,565 | public MesosConfiguration withFrameworkInfo ( Protos . FrameworkInfo . Builder frameworkInfo ) { return new MesosConfiguration ( masterUrl , frameworkInfo , credential ) ; } | Revise the configuration with updated framework info . |
14,566 | public Set < String > roles ( ) { return frameworkInfo . hasRole ( ) && ! "*" . equals ( frameworkInfo . getRole ( ) ) ? Collections . singleton ( frameworkInfo . getRole ( ) ) : Collections . emptySet ( ) ; } | Gets the roles associated with the framework . |
14,567 | public SchedulerDriver createDriver ( Scheduler scheduler , boolean implicitAcknowledgements ) { MesosSchedulerDriver schedulerDriver ; if ( this . credential ( ) . isDefined ( ) ) { schedulerDriver = new MesosSchedulerDriver ( scheduler , frameworkInfo . build ( ) , this . masterUrl ( ) , implicitAcknowledgements , this . credential ( ) . get ( ) . build ( ) ) ; } else { schedulerDriver = new MesosSchedulerDriver ( scheduler , frameworkInfo . build ( ) , this . masterUrl ( ) , implicitAcknowledgements ) ; } return schedulerDriver ; } | Create the Mesos scheduler driver based on this configuration . |
14,568 | public static void logMesosConfig ( Logger log , MesosConfiguration config ) { Map < String , String > env = System . getenv ( ) ; Protos . FrameworkInfo . Builder info = config . frameworkInfo ( ) ; log . info ( "--------------------------------------------------------------------------------" ) ; log . info ( " Mesos Info:" ) ; log . info ( " Master URL: {}" , config . masterUrl ( ) ) ; log . info ( " Framework Info:" ) ; log . info ( " ID: {}" , info . hasId ( ) ? info . getId ( ) . getValue ( ) : "(none)" ) ; log . info ( " Name: {}" , info . hasName ( ) ? info . getName ( ) : "(none)" ) ; log . info ( " Failover Timeout (secs): {}" , info . getFailoverTimeout ( ) ) ; log . info ( " Role: {}" , info . hasRole ( ) ? info . getRole ( ) : "(none)" ) ; log . info ( " Capabilities: {}" , info . getCapabilitiesList ( ) . size ( ) > 0 ? info . getCapabilitiesList ( ) : "(none)" ) ; log . info ( " Principal: {}" , info . hasPrincipal ( ) ? info . getPrincipal ( ) : "(none)" ) ; log . info ( " Host: {}" , info . hasHostname ( ) ? info . getHostname ( ) : "(none)" ) ; if ( env . containsKey ( "LIBPROCESS_IP" ) ) { log . info ( " LIBPROCESS_IP: {}" , env . get ( "LIBPROCESS_IP" ) ) ; } if ( env . containsKey ( "LIBPROCESS_PORT" ) ) { log . info ( " LIBPROCESS_PORT: {}" , env . get ( "LIBPROCESS_PORT" ) ) ; } log . info ( " Web UI: {}" , info . hasWebuiUrl ( ) ? info . getWebuiUrl ( ) : "(none)" ) ; log . info ( "--------------------------------------------------------------------------------" ) ; } | A utility method to log relevant Mesos connection info . |
14,569 | public static Thread addShutdownHook ( final AutoCloseable service , final String serviceName , final Logger logger ) { checkNotNull ( service ) ; checkNotNull ( logger ) ; final Thread shutdownHook = new Thread ( ( ) -> { try { service . close ( ) ; } catch ( Throwable t ) { logger . error ( "Error during shutdown of {} via JVM shutdown hook." , serviceName , t ) ; } } , serviceName + " shutdown hook" ) ; return addShutdownHookThread ( shutdownHook , serviceName , logger ) ? shutdownHook : null ; } | Adds a shutdown hook to the JVM and returns the Thread which has been registered . |
14,570 | public static boolean addShutdownHookThread ( final Thread shutdownHook , final String serviceName , final Logger logger ) { checkNotNull ( shutdownHook ) ; checkNotNull ( logger ) ; try { Runtime . getRuntime ( ) . addShutdownHook ( shutdownHook ) ; return true ; } catch ( IllegalStateException e ) { } catch ( Throwable t ) { logger . error ( "Cannot register shutdown hook that cleanly terminates {}." , serviceName , t ) ; } return false ; } | Adds a shutdown hook to the JVM . |
14,571 | public static void removeShutdownHook ( final Thread shutdownHook , final String serviceName , final Logger logger ) { if ( shutdownHook == null || shutdownHook == Thread . currentThread ( ) ) { return ; } checkNotNull ( logger ) ; try { Runtime . getRuntime ( ) . removeShutdownHook ( shutdownHook ) ; } catch ( IllegalStateException e ) { logger . debug ( "Unable to remove shutdown hook for {}, shutdown already in progress" , serviceName , e ) ; } catch ( Throwable t ) { logger . warn ( "Exception while un-registering {}'s shutdown hook." , serviceName , t ) ; } } | Removes a shutdown hook from the JVM . |
14,572 | public void start ( final String initialOwnerAddress , final RpcService initialRpcService , final HighAvailabilityServices initialHighAvailabilityServices , final JobLeaderListener initialJobLeaderListener ) { if ( JobLeaderService . State . CREATED != state ) { throw new IllegalStateException ( "The service has already been started." ) ; } else { LOG . info ( "Start job leader service." ) ; this . ownerAddress = Preconditions . checkNotNull ( initialOwnerAddress ) ; this . rpcService = Preconditions . checkNotNull ( initialRpcService ) ; this . highAvailabilityServices = Preconditions . checkNotNull ( initialHighAvailabilityServices ) ; this . jobLeaderListener = Preconditions . checkNotNull ( initialJobLeaderListener ) ; state = JobLeaderService . State . STARTED ; } } | Start the job leader service with the given services . |
14,573 | public void stop ( ) throws Exception { LOG . info ( "Stop job leader service." ) ; if ( JobLeaderService . State . STARTED == state ) { for ( Tuple2 < LeaderRetrievalService , JobLeaderService . JobManagerLeaderListener > leaderRetrievalServiceEntry : jobLeaderServices . values ( ) ) { LeaderRetrievalService leaderRetrievalService = leaderRetrievalServiceEntry . f0 ; JobLeaderService . JobManagerLeaderListener jobManagerLeaderListener = leaderRetrievalServiceEntry . f1 ; jobManagerLeaderListener . stop ( ) ; leaderRetrievalService . stop ( ) ; } jobLeaderServices . clear ( ) ; } state = JobLeaderService . State . STOPPED ; } | Stop the job leader services . This implies stopping all leader retrieval services for the different jobs and their leader retrieval listeners . |
14,574 | public void removeJob ( JobID jobId ) throws Exception { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; Tuple2 < LeaderRetrievalService , JobLeaderService . JobManagerLeaderListener > entry = jobLeaderServices . remove ( jobId ) ; if ( entry != null ) { LOG . info ( "Remove job {} from job leader monitoring." , jobId ) ; LeaderRetrievalService leaderRetrievalService = entry . f0 ; JobLeaderService . JobManagerLeaderListener jobManagerLeaderListener = entry . f1 ; leaderRetrievalService . stop ( ) ; jobManagerLeaderListener . stop ( ) ; } } | Remove the given job from being monitored by the job leader service . |
14,575 | public void addJob ( final JobID jobId , final String defaultTargetAddress ) throws Exception { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; LOG . info ( "Add job {} for job leader monitoring." , jobId ) ; final LeaderRetrievalService leaderRetrievalService = highAvailabilityServices . getJobManagerLeaderRetriever ( jobId , defaultTargetAddress ) ; JobLeaderService . JobManagerLeaderListener jobManagerLeaderListener = new JobManagerLeaderListener ( jobId ) ; final Tuple2 < LeaderRetrievalService , JobManagerLeaderListener > oldEntry = jobLeaderServices . put ( jobId , Tuple2 . of ( leaderRetrievalService , jobManagerLeaderListener ) ) ; if ( oldEntry != null ) { oldEntry . f0 . stop ( ) ; oldEntry . f1 . stop ( ) ; } leaderRetrievalService . start ( jobManagerLeaderListener ) ; } | Add the given job to be monitored . This means that the service tries to detect leaders for this job and then tries to establish a connection to it . |
14,576 | public void reconnect ( final JobID jobId ) { Preconditions . checkNotNull ( jobId , "JobID must not be null." ) ; final Tuple2 < LeaderRetrievalService , JobManagerLeaderListener > jobLeaderService = jobLeaderServices . get ( jobId ) ; if ( jobLeaderService != null ) { jobLeaderService . f1 . reconnect ( ) ; } else { LOG . info ( "Cannot reconnect to job {} because it is not registered." , jobId ) ; } } | Triggers reconnection to the last known leader of the given job . |
14,577 | public boolean containsJob ( JobID jobId ) { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; return jobLeaderServices . containsKey ( jobId ) ; } | Check whether the service monitors the given job . |
14,578 | private void saveHandleInState ( final long checkpointId , final long timestamp ) throws Exception { if ( out != null ) { int subtaskIdx = getRuntimeContext ( ) . getIndexOfThisSubtask ( ) ; StreamStateHandle handle = out . closeAndGetHandle ( ) ; PendingCheckpoint pendingCheckpoint = new PendingCheckpoint ( checkpointId , subtaskIdx , timestamp , handle ) ; if ( pendingCheckpoints . contains ( pendingCheckpoint ) ) { handle . discardState ( ) ; } else { pendingCheckpoints . add ( pendingCheckpoint ) ; } out = null ; } } | Called when a checkpoint barrier arrives . It closes any open streams to the backend and marks them as pending for committing to the external third - party storage system . |
14,579 | public Statistics columnStats ( String columnName , ColumnStats columnStats ) { Map < String , String > map = normalizeColumnStats ( columnStats ) ; this . columnStats . put ( columnName , map ) ; return this ; } | Sets statistics for a column . Overwrites all existing statistics for this column . |
14,580 | public Statistics columnDistinctCount ( String columnName , Long ndv ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( DISTINCT_COUNT , String . valueOf ( ndv ) ) ; return this ; } | Sets the number of distinct values statistic for the given column . |
14,581 | public Statistics columnNullCount ( String columnName , Long nullCount ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( NULL_COUNT , String . valueOf ( nullCount ) ) ; return this ; } | Sets the number of null values statistic for the given column . |
14,582 | public Statistics columnAvgLength ( String columnName , Double avgLen ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( AVG_LENGTH , String . valueOf ( avgLen ) ) ; return this ; } | Sets the average length statistic for the given column . |
14,583 | public Statistics columnMaxLength ( String columnName , Integer maxLen ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MAX_LENGTH , String . valueOf ( maxLen ) ) ; return this ; } | Sets the maximum length statistic for the given column . |
14,584 | public Statistics columnMaxValue ( String columnName , Number max ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MAX_VALUE , String . valueOf ( max ) ) ; return this ; } | Sets the maximum value statistic for the given column . |
14,585 | public Statistics columnMinValue ( String columnName , Number min ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MIN_VALUE , String . valueOf ( min ) ) ; return this ; } | Sets the minimum value statistic for the given column . |
14,586 | public int size ( ) { if ( allElementsInCache ) { return orderedCache . size ( ) ; } else { int count = 0 ; try ( final RocksBytesIterator iterator = orderedBytesIterator ( ) ) { while ( iterator . hasNext ( ) ) { iterator . next ( ) ; ++ count ; } } return count ; } } | This implementation comes at a relatively high cost per invocation . It should not be called repeatedly when it is clear that the value did not change . Currently this is only truly used to realize certain higher - level tests . |
14,587 | private void generateAllFailoverRegion ( List < ExecutionJobVertex > newJobVerticesTopological ) { final IdentityHashMap < ExecutionVertex , ArrayList < ExecutionVertex > > vertexToRegion = new IdentityHashMap < > ( ) ; final IdentityHashMap < ArrayList < ExecutionVertex > , Object > distinctRegions = new IdentityHashMap < > ( ) ; for ( ExecutionJobVertex ejv : newJobVerticesTopological ) { if ( ejv . getCoLocationGroup ( ) != null ) { makeAllOneRegion ( newJobVerticesTopological ) ; return ; } final List < IntermediateResult > inputs = ejv . getInputs ( ) ; final int numInputs = inputs . size ( ) ; boolean hasPipelinedInputs = false ; for ( IntermediateResult input : inputs ) { if ( input . getResultType ( ) . isPipelined ( ) ) { hasPipelinedInputs = true ; break ; } } if ( hasPipelinedInputs ) { for ( ExecutionVertex ev : ejv . getTaskVertices ( ) ) { ArrayList < ExecutionVertex > thisRegion = null ; for ( int inputNum = 0 ; inputNum < numInputs ; inputNum ++ ) { if ( inputs . get ( inputNum ) . getResultType ( ) . isPipelined ( ) ) { for ( ExecutionEdge edge : ev . getInputEdges ( inputNum ) ) { final ExecutionVertex predecessor = edge . getSource ( ) . getProducer ( ) ; final ArrayList < ExecutionVertex > predecessorRegion = vertexToRegion . get ( predecessor ) ; if ( thisRegion != null ) { if ( predecessorRegion != thisRegion ) { predecessorRegion . addAll ( thisRegion ) ; distinctRegions . remove ( thisRegion ) ; thisRegion = predecessorRegion ; for ( ExecutionVertex inPredRegion : predecessorRegion ) { vertexToRegion . put ( inPredRegion , thisRegion ) ; } } } else if ( predecessor != null ) { thisRegion = predecessorRegion ; thisRegion . add ( ev ) ; vertexToRegion . put ( ev , thisRegion ) ; } else { throw new FlinkRuntimeException ( "bug in the logic to construct the pipelined failover regions" ) ; } } } } } } else { for ( ExecutionVertex ev : ejv . getTaskVertices ( ) ) { ArrayList < ExecutionVertex > region = new ArrayList < > ( 1 ) ; region . add ( ev ) ; vertexToRegion . put ( ev , region ) ; distinctRegions . put ( region , null ) ; } } } LOG . info ( "Creating {} individual failover regions for job {} ({})" , distinctRegions . size ( ) , executionGraph . getJobName ( ) , executionGraph . getJobID ( ) ) ; for ( List < ExecutionVertex > region : distinctRegions . keySet ( ) ) { final FailoverRegion failoverRegion = createFailoverRegion ( executionGraph , region ) ; for ( ExecutionVertex ev : region ) { this . vertexToRegion . put ( ev , failoverRegion ) ; } } } | Generate all the FailoverRegion from the new added job vertexes |
14,588 | public void reset ( ) { this . cursor = fixedSize ; for ( int i = 0 ; i < nullBitsSizeInBytes ; i += 8 ) { segment . putLong ( i , 0L ) ; } this . segment . putInt ( 0 , numElements ) ; } | First reset . |
14,589 | public static InetAddress findConnectingAddress ( InetSocketAddress targetAddress , long maxWaitMillis , long startLoggingAfter ) throws IOException { if ( targetAddress == null ) { throw new NullPointerException ( "targetAddress must not be null" ) ; } if ( maxWaitMillis <= 0 ) { throw new IllegalArgumentException ( "Max wait time must be positive" ) ; } final long startTimeNanos = System . nanoTime ( ) ; long currentSleepTime = MIN_SLEEP_TIME ; long elapsedTimeMillis = 0 ; final List < AddressDetectionState > strategies = Collections . unmodifiableList ( Arrays . asList ( AddressDetectionState . LOCAL_HOST , AddressDetectionState . ADDRESS , AddressDetectionState . FAST_CONNECT , AddressDetectionState . SLOW_CONNECT ) ) ; while ( elapsedTimeMillis < maxWaitMillis ) { boolean logging = elapsedTimeMillis >= startLoggingAfter ; if ( logging ) { LOG . info ( "Trying to connect to " + targetAddress ) ; } for ( AddressDetectionState strategy : strategies ) { InetAddress address = findAddressUsingStrategy ( strategy , targetAddress , logging ) ; if ( address != null ) { return address ; } } elapsedTimeMillis = ( System . nanoTime ( ) - startTimeNanos ) / 1_000_000 ; long toWait = Math . min ( maxWaitMillis - elapsedTimeMillis , currentSleepTime ) ; if ( toWait > 0 ) { if ( logging ) { LOG . info ( "Could not connect. Waiting for {} msecs before next attempt" , toWait ) ; } else { LOG . debug ( "Could not connect. Waiting for {} msecs before next attempt" , toWait ) ; } try { Thread . sleep ( toWait ) ; } catch ( InterruptedException e ) { throw new IOException ( "Connection attempts have been interrupted." ) ; } } currentSleepTime = Math . min ( 2 * currentSleepTime , MAX_SLEEP_TIME ) ; } LOG . warn ( "Could not connect to {}. Selecting a local address using heuristics." , targetAddress ) ; InetAddress heuristic = findAddressUsingStrategy ( AddressDetectionState . HEURISTIC , targetAddress , true ) ; if ( heuristic != null ) { return heuristic ; } else { LOG . warn ( "Could not find any IPv4 address that is not loopback or link-local. Using localhost address." ) ; return InetAddress . getLocalHost ( ) ; } } | Finds the local network address from which this machine can connect to the target address . This method tries to establish a proper network connection to the given target so it only succeeds if the target socket address actually accepts connections . The method tries various strategies multiple times and uses an exponential backoff timer between tries . |
14,590 | private static InetAddress findAddressUsingStrategy ( AddressDetectionState strategy , InetSocketAddress targetAddress , boolean logging ) throws IOException { if ( strategy == AddressDetectionState . LOCAL_HOST ) { InetAddress localhostName ; try { localhostName = InetAddress . getLocalHost ( ) ; } catch ( UnknownHostException uhe ) { LOG . warn ( "Could not resolve local hostname to an IP address: {}" , uhe . getMessage ( ) ) ; return null ; } if ( tryToConnect ( localhostName , targetAddress , strategy . getTimeout ( ) , logging ) ) { LOG . debug ( "Using InetAddress.getLocalHost() immediately for the connecting address" ) ; return localhostName ; } else { return null ; } } final InetAddress address = targetAddress . getAddress ( ) ; if ( address == null ) { return null ; } final byte [ ] targetAddressBytes = address . getAddress ( ) ; Enumeration < NetworkInterface > e = NetworkInterface . getNetworkInterfaces ( ) ; while ( e . hasMoreElements ( ) ) { NetworkInterface netInterface = e . nextElement ( ) ; Enumeration < InetAddress > ee = netInterface . getInetAddresses ( ) ; while ( ee . hasMoreElements ( ) ) { InetAddress interfaceAddress = ee . nextElement ( ) ; switch ( strategy ) { case ADDRESS : if ( hasCommonPrefix ( targetAddressBytes , interfaceAddress . getAddress ( ) ) ) { LOG . debug ( "Target address {} and local address {} share prefix - trying to connect." , targetAddress , interfaceAddress ) ; if ( tryToConnect ( interfaceAddress , targetAddress , strategy . getTimeout ( ) , logging ) ) { return tryLocalHostBeforeReturning ( interfaceAddress , targetAddress , logging ) ; } } break ; case FAST_CONNECT : case SLOW_CONNECT : LOG . debug ( "Trying to connect to {} from local address {} with timeout {}" , targetAddress , interfaceAddress , strategy . getTimeout ( ) ) ; if ( tryToConnect ( interfaceAddress , targetAddress , strategy . getTimeout ( ) , logging ) ) { return tryLocalHostBeforeReturning ( interfaceAddress , targetAddress , logging ) ; } break ; case HEURISTIC : if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Choosing InetAddress.getLocalHost() address as a heuristic." ) ; } return InetAddress . getLocalHost ( ) ; default : throw new RuntimeException ( "Unsupported strategy: " + strategy ) ; } } } return null ; } | Try to find a local address which allows as to connect to the targetAddress using the given strategy . |
14,591 | public void add ( BufferOrEvent boe ) throws IOException { try { ByteBuffer contents ; if ( boe . isBuffer ( ) ) { Buffer buf = boe . getBuffer ( ) ; contents = buf . getNioBufferReadable ( ) ; } else { contents = EventSerializer . toSerializedEvent ( boe . getEvent ( ) ) ; } headBuffer . clear ( ) ; headBuffer . putInt ( boe . getChannelIndex ( ) ) ; headBuffer . putInt ( contents . remaining ( ) ) ; headBuffer . put ( ( byte ) ( boe . isBuffer ( ) ? 0 : 1 ) ) ; headBuffer . flip ( ) ; bytesWritten += ( headBuffer . remaining ( ) + contents . remaining ( ) ) ; FileUtils . writeCompletely ( currentChannel , headBuffer ) ; FileUtils . writeCompletely ( currentChannel , contents ) ; } finally { if ( boe . isBuffer ( ) ) { boe . getBuffer ( ) . recycleBuffer ( ) ; } } } | Adds a buffer or event to the sequence of spilled buffers and events . |
14,592 | public static BaseRowKeySelector getBaseRowSelector ( int [ ] keyFields , BaseRowTypeInfo rowType ) { if ( keyFields . length > 0 ) { InternalType [ ] inputFieldTypes = rowType . getInternalTypes ( ) ; String [ ] inputFieldNames = rowType . getFieldNames ( ) ; InternalType [ ] keyFieldTypes = new InternalType [ keyFields . length ] ; String [ ] keyFieldNames = new String [ keyFields . length ] ; for ( int i = 0 ; i < keyFields . length ; ++ i ) { keyFieldTypes [ i ] = inputFieldTypes [ keyFields [ i ] ] ; keyFieldNames [ i ] = inputFieldNames [ keyFields [ i ] ] ; } RowType returnType = new RowType ( keyFieldTypes , keyFieldNames ) ; RowType inputType = new RowType ( inputFieldTypes , rowType . getFieldNames ( ) ) ; GeneratedProjection generatedProjection = ProjectionCodeGenerator . generateProjection ( CodeGeneratorContext . apply ( new TableConfig ( ) ) , "KeyProjection" , inputType , returnType , keyFields ) ; BaseRowTypeInfo keyRowType = returnType . toTypeInfo ( ) ; TypeCheckUtils . validateEqualsHashCode ( "grouping" , keyRowType ) ; return new BinaryRowKeySelector ( keyRowType , generatedProjection ) ; } else { return NullBinaryRowKeySelector . INSTANCE ; } } | Create a BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo . |
14,593 | public void close ( ) { synchronized ( this ) { if ( this . closed ) { return ; } this . closed = true ; } this . numRecordsInBuffer = 0 ; this . numRecordsReturned = 0 ; for ( int i = this . fullSegments . size ( ) - 1 ; i >= 0 ; i -- ) { this . emptySegments . add ( this . fullSegments . remove ( i ) ) ; } this . memoryManager . release ( this . emptySegments ) ; this . emptySegments . clear ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Block Resettable Iterator closed." ) ; } } | This method closes the iterator and releases all resources . This method works both as a regular shutdown and as a canceling method . The method may be called multiple times and will not produce an error . |
14,594 | private Map < Path , FileStatus > listEligibleFiles ( FileSystem fileSystem , Path path ) throws IOException { final FileStatus [ ] statuses ; try { statuses = fileSystem . listStatus ( path ) ; } catch ( IOException e ) { return Collections . emptyMap ( ) ; } if ( statuses == null ) { LOG . warn ( "Path does not exist: {}" , path ) ; return Collections . emptyMap ( ) ; } else { Map < Path , FileStatus > files = new HashMap < > ( ) ; for ( FileStatus status : statuses ) { if ( ! status . isDir ( ) ) { Path filePath = status . getPath ( ) ; long modificationTime = status . getModificationTime ( ) ; if ( ! shouldIgnore ( filePath , modificationTime ) ) { files . put ( filePath , status ) ; } } else if ( format . getNestedFileEnumeration ( ) && format . acceptFile ( status ) ) { files . putAll ( listEligibleFiles ( fileSystem , status . getPath ( ) ) ) ; } } return files ; } } | Returns the paths of the files not yet processed . |
14,595 | private TimeWindow mergeWindow ( TimeWindow curWindow , TimeWindow other , Collection < TimeWindow > mergedWindow ) { if ( curWindow . intersects ( other ) ) { mergedWindow . add ( other ) ; return curWindow . cover ( other ) ; } else { return curWindow ; } } | Merge curWindow and other return a new window which covers curWindow and other if they are overlapped . Otherwise returns the curWindow itself . |
14,596 | protected Configuration applyCommandLineOptionsToConfiguration ( CommandLine commandLine ) throws FlinkException { final Configuration resultingConfiguration = new Configuration ( configuration ) ; if ( commandLine . hasOption ( addressOption . getOpt ( ) ) ) { String addressWithPort = commandLine . getOptionValue ( addressOption . getOpt ( ) ) ; InetSocketAddress jobManagerAddress = ClientUtils . parseHostPortAddress ( addressWithPort ) ; setJobManagerAddressInConfig ( resultingConfiguration , jobManagerAddress ) ; } if ( commandLine . hasOption ( zookeeperNamespaceOption . getOpt ( ) ) ) { String zkNamespace = commandLine . getOptionValue ( zookeeperNamespaceOption . getOpt ( ) ) ; resultingConfiguration . setString ( HighAvailabilityOptions . HA_CLUSTER_ID , zkNamespace ) ; } return resultingConfiguration ; } | Override configuration settings by specified command line options . |
14,597 | private String getInternal ( String key ) { Preconditions . checkArgument ( configuredOptions . containsKey ( key ) , "The configuration " + key + " has not been configured." ) ; return configuredOptions . get ( key ) ; } | Returns the value in string format with the given key . |
14,598 | protected boolean increaseBackoff ( ) { if ( currentBackoff < 0 ) { return false ; } if ( currentBackoff == 0 ) { currentBackoff = initialBackoff ; return true ; } else if ( currentBackoff < maxBackoff ) { currentBackoff = Math . min ( currentBackoff * 2 , maxBackoff ) ; return true ; } return false ; } | Increases the current backoff and returns whether the operation was successful . |
14,599 | public int resetErrorStateAndParse ( byte [ ] bytes , int startPos , int limit , byte [ ] delim , T reuse ) { resetParserState ( ) ; return parseField ( bytes , startPos , limit , delim , reuse ) ; } | Parses the value of a field from the byte array taking care of properly reset the state of this parser . The start position within the byte array and the array s valid length is given . The content of the value is delimited by a field delimiter . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.