idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,500 | public void open ( Context < K , W > ctx ) throws Exception { this . ctx = ctx ; this . windowAssigner . open ( ctx ) ; } | Initialization method for the function . It is called before the actual working methods . |
14,501 | public SingleOutputStreamOperator < T > sum ( int positionToSum ) { return aggregate ( new SumAggregator < > ( positionToSum , input . getType ( ) , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that sums every window of the data stream at the given position . |
14,502 | public SingleOutputStreamOperator < T > sum ( String field ) { return aggregate ( new SumAggregator < > ( field , input . getType ( ) , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that sums every window of the pojo data stream at the given field for every window . |
14,503 | public SingleOutputStreamOperator < T > min ( int positionToMin ) { return aggregate ( new ComparableAggregator < > ( positionToMin , input . getType ( ) , AggregationFunction . AggregationType . MIN , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that that gives the minimum value of every window of the data stream at the given position . |
14,504 | public SingleOutputStreamOperator < T > min ( String field ) { return aggregate ( new ComparableAggregator < > ( field , input . getType ( ) , AggregationFunction . AggregationType . MIN , false , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that that gives the minimum value of the pojo data stream at the given field expression for every window . |
14,505 | public SingleOutputStreamOperator < T > max ( int positionToMax ) { return aggregate ( new ComparableAggregator < > ( positionToMax , input . getType ( ) , AggregationFunction . AggregationType . MAX , input . getExecutionConfig ( ) ) ) ; } | Applies an aggregation that gives the maximum value of every window of the data stream at the given position . |
14,506 | public SortedGrouping < T > withPartitioner ( Partitioner < ? > partitioner ) { Preconditions . checkNotNull ( partitioner ) ; getKeys ( ) . validateCustomPartitioner ( partitioner , null ) ; this . customPartitioner = partitioner ; return this ; } | Uses a custom partitioner for the grouping . |
14,507 | public static byte [ ] encodeUTF8 ( String str ) { byte [ ] bytes = allocateReuseBytes ( str . length ( ) * MAX_BYTES_PER_CHAR ) ; int len = encodeUTF8 ( str , bytes ) ; return Arrays . copyOf ( bytes , len ) ; } | This method must have the same result with JDK s String . getBytes . |
14,508 | public MethodlessRouter < T > addRoute ( String pathPattern , T target ) { PathPattern p = new PathPattern ( pathPattern ) ; if ( routes . containsKey ( p ) ) { return this ; } routes . put ( p , target ) ; return this ; } | This method does nothing if the path pattern has already been added . A path pattern can only point to one target . |
14,509 | public boolean anyMatched ( String [ ] requestPathTokens ) { Map < String , String > pathParams = new HashMap < > ( ) ; for ( PathPattern pattern : routes . keySet ( ) ) { if ( pattern . match ( requestPathTokens , pathParams ) ) { return true ; } pathParams . clear ( ) ; } return false ; } | Checks if there s any matching route . |
14,510 | private void initialize ( String scheme , String authority , String path ) { try { this . uri = new URI ( scheme , authority , normalizePath ( path ) , null , null ) . normalize ( ) ; } catch ( URISyntaxException e ) { throw new IllegalArgumentException ( e ) ; } } | Initializes a path object given the scheme authority and path string . |
14,511 | public boolean isAbsolute ( ) { final int start = hasWindowsDrive ( uri . getPath ( ) , true ) ? 3 : 0 ; return uri . getPath ( ) . startsWith ( SEPARATOR , start ) ; } | Checks if the directory of this path is absolute . |
14,512 | public int depth ( ) { String path = uri . getPath ( ) ; int depth = 0 ; int slash = path . length ( ) == 1 && path . charAt ( 0 ) == '/' ? - 1 : 0 ; while ( slash != - 1 ) { depth ++ ; slash = path . indexOf ( SEPARATOR , slash + 1 ) ; } return depth ; } | Returns the number of elements in this path . |
14,513 | public Avro recordClass ( Class < ? extends SpecificRecord > recordClass ) { Preconditions . checkNotNull ( recordClass ) ; this . recordClass = recordClass ; return this ; } | Sets the class of the Avro specific record . |
14,514 | public static void readFully ( final InputStream in , final byte [ ] buf , int off , final int len ) throws IOException { int toRead = len ; while ( toRead > 0 ) { final int ret = in . read ( buf , off , toRead ) ; if ( ret < 0 ) { throw new IOException ( "Premeture EOF from inputStream" ) ; } toRead -= ret ; off += re... | Reads len bytes in a loop . |
14,515 | private static String getAlgorithmsListing ( ) { StrBuilder strBuilder = new StrBuilder ( ) ; strBuilder . appendNewLine ( ) . appendln ( "Select an algorithm to view usage: flink run examples/flink-gelly-examples_<version>.jar --algorithm <algorithm>" ) . appendNewLine ( ) . appendln ( "Available algorithms:" ) ; for ... | List available algorithms . This is displayed to the user when no valid algorithm is given in the program parameterization . |
14,516 | private static String getAlgorithmUsage ( String algorithmName ) { StrBuilder strBuilder = new StrBuilder ( ) ; Driver algorithm = driverFactory . get ( algorithmName ) ; strBuilder . appendNewLine ( ) . appendNewLine ( ) . appendln ( algorithm . getLongDescription ( ) ) . appendNewLine ( ) . append ( "usage: flink run... | Display the usage for the given algorithm . This includes options for all compatible inputs the selected algorithm and outputs implemented by the selected algorithm . |
14,517 | private void execute ( ) throws Exception { if ( result == null ) { env . execute ( executionName ) ; } else { output . write ( executionName . toString ( ) , System . out , result ) ; } System . out . println ( ) ; algorithm . printAnalytics ( System . out ) ; if ( jobDetailsPath . getValue ( ) != null ) { writeJobDet... | Execute the Flink job . |
14,518 | protected void startThreads ( ) { if ( this . readThread != null ) { this . readThread . start ( ) ; } if ( this . sortThread != null ) { this . sortThread . start ( ) ; } if ( this . spillThread != null ) { this . spillThread . start ( ) ; } } | Starts all the threads that are used by this sort - merger . |
14,519 | protected final void setResultIteratorException ( IOException ioex ) { synchronized ( this . iteratorLock ) { if ( this . iteratorException == null ) { this . iteratorException = ioex ; this . iteratorLock . notifyAll ( ) ; } } } | Reports an exception to all threads that are waiting for the result iterator . |
14,520 | protected static < T > CircularElement < T > endMarker ( ) { @ SuppressWarnings ( "unchecked" ) CircularElement < T > c = ( CircularElement < T > ) EOF_MARKER ; return c ; } | Gets the element that is passed as marker for the end of data . |
14,521 | protected static < T > CircularElement < T > spillingMarker ( ) { @ SuppressWarnings ( "unchecked" ) CircularElement < T > c = ( CircularElement < T > ) SPILLING_MARKER ; return c ; } | Gets the element that is passed as marker for signal beginning of spilling . |
14,522 | public void createResource ( ) throws Exception { cluster = builder . getCluster ( ) ; session = cluster . connect ( ) ; session . execute ( String . format ( "CREATE KEYSPACE IF NOT EXISTS %s with replication={'class':'SimpleStrategy', 'replication_factor':1};" , keySpace ) ) ; session . execute ( String . format ( "C... | Generates the necessary tables to store information . |
14,523 | public void jarDir ( File dirOrFile2Jar , File destJar ) throws IOException { if ( dirOrFile2Jar == null || destJar == null ) { throw new IllegalArgumentException ( ) ; } mDestJarName = destJar . getCanonicalPath ( ) ; FileOutputStream fout = new FileOutputStream ( destJar ) ; JarOutputStream jout = new JarOutputStream... | Jars a given directory or single file into a JarOutputStream . |
14,524 | public void unjarDir ( File jarFile , File destDir ) throws IOException { BufferedOutputStream dest = null ; FileInputStream fis = new FileInputStream ( jarFile ) ; unjar ( fis , destDir ) ; } | Unjars a given jar file into a given directory . |
14,525 | public void unjar ( InputStream in , File destDir ) throws IOException { BufferedOutputStream dest = null ; JarInputStream jis = new JarInputStream ( in ) ; JarEntry entry ; while ( ( entry = jis . getNextJarEntry ( ) ) != null ) { if ( entry . isDirectory ( ) ) { File dir = new File ( destDir , entry . getName ( ) ) ;... | Given an InputStream on a jar file unjars the contents into the given directory . |
14,526 | private void jarDir ( File dirOrFile2jar , JarOutputStream jos , String path ) throws IOException { if ( mVerbose ) { System . out . println ( "checking " + dirOrFile2jar ) ; } if ( dirOrFile2jar . isDirectory ( ) ) { String [ ] dirList = dirOrFile2jar . list ( ) ; String subPath = ( path == null ) ? "" : ( path + dirO... | Recursively jars up the given path under the given directory . |
14,527 | private boolean ensureBatch ( ) throws IOException { if ( nextRow >= rowsInBatch ) { nextRow = 0 ; boolean moreRows = orcRowsReader . nextBatch ( rowBatch ) ; if ( moreRows ) { rowsInBatch = fillRows ( rows , schema , rowBatch , selectedFields ) ; } return moreRows ; } return true ; } | Checks if there is at least one row left in the batch to return . If no more row are available it reads another batch of rows . |
14,528 | public JobSubmissionResult run ( JobWithJars jobWithJars , int parallelism , SavepointRestoreSettings savepointSettings ) throws CompilerException , ProgramInvocationException { ClassLoader classLoader = jobWithJars . getUserCodeClassLoader ( ) ; if ( classLoader == null ) { throw new IllegalArgumentException ( "The gi... | Runs a program on the Flink cluster to which this client is connected . The call blocks until the execution is complete and returns afterwards . |
14,529 | public Map < String , OptionalFailure < Object > > getAccumulators ( JobID jobID ) throws Exception { return getAccumulators ( jobID , ClassLoader . getSystemClassLoader ( ) ) ; } | Requests and returns the accumulators for the given job identifier . Accumulators can be requested while a is running or after it has finished . The default class loader is used to deserialize the incoming accumulator results . |
14,530 | private static OptimizedPlan getOptimizedPlan ( Optimizer compiler , JobWithJars prog , int parallelism ) throws CompilerException , ProgramInvocationException { return getOptimizedPlan ( compiler , prog . getPlan ( ) , parallelism ) ; } | Creates the optimized plan for a given program using this client s compiler . |
14,531 | public static String generateRuntimeName ( Class < ? > clazz , String [ ] fields ) { return TableConnectorUtils . generateRuntimeName ( clazz , fields ) ; } | Returns the table connector name used for log and web UI . |
14,532 | public ChannelFuture requestSubpartition ( final ResultPartitionID partitionId , final int subpartitionIndex , final RemoteInputChannel inputChannel , int delayMs ) throws IOException { checkNotClosed ( ) ; LOG . debug ( "Requesting subpartition {} of partition {} with {} ms delay." , subpartitionIndex , partitionId , ... | Requests a remote intermediate result partition queue . |
14,533 | public void selectFields ( String [ ] fieldNames ) { checkNotNull ( fieldNames , "fieldNames" ) ; this . fieldNames = fieldNames ; RowTypeInfo rowTypeInfo = ( RowTypeInfo ) ParquetSchemaConverter . fromParquetType ( expectedFileSchema ) ; TypeInformation [ ] selectFieldTypes = new TypeInformation [ fieldNames . length ... | Configures the fields to be read and returned by the ParquetInputFormat . Selected fields must be present in the configured schema . |
14,534 | private MessageType getReadSchema ( MessageType fileSchema , Path filePath ) { RowTypeInfo fileTypeInfo = ( RowTypeInfo ) ParquetSchemaConverter . fromParquetType ( fileSchema ) ; List < Type > types = new ArrayList < > ( ) ; for ( int i = 0 ; i < fieldNames . length ; ++ i ) { String readFieldName = fieldNames [ i ] ;... | Generates and returns the read schema based on the projected fields for a given file . |
14,535 | private long calExpirationTime ( long operatorTime , long relativeSize ) { if ( operatorTime < Long . MAX_VALUE ) { return operatorTime - relativeSize - allowedLateness - 1 ; } else { return Long . MAX_VALUE ; } } | Calculate the expiration time with the given operator time and relative window size . |
14,536 | private void registerCleanUpTimer ( Context ctx , long rowTime , boolean leftRow ) throws IOException { if ( leftRow ) { long cleanUpTime = rowTime + leftRelativeSize + minCleanUpInterval + allowedLateness + 1 ; registerTimer ( ctx , cleanUpTime ) ; rightTimerState . update ( cleanUpTime ) ; } else { long cleanUpTime =... | Register a timer for cleaning up rows in a specified time . |
14,537 | private void removeExpiredRows ( Collector < BaseRow > collector , long expirationTime , MapState < Long , List < Tuple2 < BaseRow , Boolean > > > rowCache , ValueState < Long > timerState , OnTimerContext ctx , boolean removeLeft ) throws Exception { Iterator < Map . Entry < Long , List < Tuple2 < BaseRow , Boolean > ... | Remove the expired rows . Register a new timer if the cache still holds valid rows after the cleaning up . |
14,538 | public < T > DynamicResult < T > createResult ( Environment env , TableSchema schema , ExecutionConfig config ) { final RowTypeInfo outputType = new RowTypeInfo ( schema . getFieldTypes ( ) , schema . getFieldNames ( ) ) ; if ( env . getExecution ( ) . isStreamingExecution ( ) ) { final InetAddress gatewayAddress = get... | Creates a result . Might start threads or opens sockets so every created result must be closed . |
14,539 | public void lazyDestroy ( ) { synchronized ( availableMemorySegments ) { if ( ! isDestroyed ) { MemorySegment segment ; while ( ( segment = availableMemorySegments . poll ( ) ) != null ) { returnMemorySegment ( segment ) ; } BufferListener listener ; while ( ( listener = registeredListeners . poll ( ) ) != null ) { lis... | Destroy is called after the produce or consume phase of a task finishes . |
14,540 | public static < KT , KB , VVT , VVB , EV > BipartiteGraph < KT , KB , VVT , VVB , EV > fromDataSet ( DataSet < Vertex < KT , VVT > > topVertices , DataSet < Vertex < KB , VVB > > bottomVertices , DataSet < BipartiteEdge < KT , KB , EV > > edges , ExecutionEnvironment context ) { return new BipartiteGraph < > ( topVerti... | Create bipartite graph from datasets . |
14,541 | public Graph < KT , VVT , Tuple2 < EV , EV > > projectionTopSimple ( ) { DataSet < Edge < KT , Tuple2 < EV , EV > > > newEdges = edges . join ( edges ) . where ( 1 ) . equalTo ( 1 ) . with ( new ProjectionTopSimple < > ( ) ) . name ( "Simple top projection" ) ; return Graph . fromDataSet ( topVertices , newEdges , cont... | Convert a bipartite graph into an undirected graph that contains only top vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains a bottom vertex they are both connected to . |
14,542 | public Graph < KB , VVB , Tuple2 < EV , EV > > projectionBottomSimple ( ) { DataSet < Edge < KB , Tuple2 < EV , EV > > > newEdges = edges . join ( edges ) . where ( 0 ) . equalTo ( 0 ) . with ( new ProjectionBottomSimple < > ( ) ) . name ( "Simple bottom projection" ) ; return Graph . fromDataSet ( bottomVertices , new... | Convert a bipartite graph into an undirected graph that contains only bottom vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains a top vertex they are both connected to . |
14,543 | public Graph < KT , VVT , Projection < KB , VVB , VVT , EV > > projectionTopFull ( ) { DataSet < Tuple5 < KT , KB , EV , VVT , VVB > > edgesWithVertices = joinEdgeWithVertices ( ) ; DataSet < Edge < KT , Projection < KB , VVB , VVT , EV > > > newEdges = edgesWithVertices . join ( edgesWithVertices ) . where ( 1 ) . equ... | Convert a bipartite graph into a graph that contains only top vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains at least one bottom vertex they both connect to . |
14,544 | public Graph < KB , VVB , Projection < KT , VVT , VVB , EV > > projectionBottomFull ( ) { DataSet < Tuple5 < KT , KB , EV , VVT , VVB > > edgesWithVertices = joinEdgeWithVertices ( ) ; DataSet < Edge < KB , Projection < KT , VVT , VVB , EV > > > newEdges = edgesWithVertices . join ( edgesWithVertices ) . where ( 0 ) . ... | Convert a bipartite graph into a graph that contains only bottom vertices . An edge between two vertices in the new graph will exist only if the original bipartite graph contains at least one top vertex they both connect to . |
14,545 | public JobExecutionResult execute ( String jobName ) throws Exception { StreamGraph streamGraph = getStreamGraph ( ) ; streamGraph . setJobName ( jobName ) ; JobGraph jobGraph = streamGraph . getJobGraph ( ) ; jobGraph . setAllowQueuedScheduling ( true ) ; Configuration configuration = new Configuration ( ) ; configura... | Executes the JobGraph of the on a mini cluster of CLusterUtil with a user specified name . |
14,546 | public static KvStateService fromConfiguration ( TaskManagerServicesConfiguration taskManagerServicesConfiguration ) { KvStateRegistry kvStateRegistry = new KvStateRegistry ( ) ; QueryableStateConfiguration qsConfig = taskManagerServicesConfiguration . getQueryableStateConfig ( ) ; KvStateClientProxy kvClientProxy = nu... | Creates and returns the KvState service . |
14,547 | public CompensatedSum add ( CompensatedSum other ) { double correctedSum = other . value ( ) + ( delta + other . delta ( ) ) ; double updatedValue = value + correctedSum ; double updatedDelta = correctedSum - ( updatedValue - value ) ; return new CompensatedSum ( updatedValue , updatedDelta ) ; } | Increments the Kahan sum by adding two sums and updating the correction term for reducing numeric errors . |
14,548 | public SortPartitionOperator < T > sortPartition ( int field , Order order ) { if ( useKeySelector ) { throw new InvalidProgramException ( "Expression keys cannot be appended after a KeySelector" ) ; } ensureSortableKey ( field ) ; keys . add ( new Keys . ExpressionKeys < > ( field , getType ( ) ) ) ; orders . add ( or... | Appends an additional sort order with the specified field in the specified order to the local partition sorting of the DataSet . |
14,549 | public Option < Long > getNumberOfAllocatedBytes ( ) throws NoSuchFieldException , IllegalAccessException { if ( directArenas != null ) { long numChunks = 0 ; for ( Object arena : directArenas ) { numChunks += getNumberOfAllocatedChunks ( arena , "qInit" ) ; numChunks += getNumberOfAllocatedChunks ( arena , "q000" ) ; ... | Returns the number of currently allocated bytes . |
14,550 | private long getNumberOfAllocatedChunks ( Object arena , String chunkListFieldName ) throws NoSuchFieldException , IllegalAccessException { Field chunkListField = arena . getClass ( ) . getSuperclass ( ) . getDeclaredField ( chunkListFieldName ) ; chunkListField . setAccessible ( true ) ; Object chunkList = chunkListFi... | Returns the number of allocated bytes of the given arena and chunk list . |
14,551 | private void validateKeyTypes ( int [ ] keyFieldIndices ) { final TypeInformation < ? > [ ] types = getFieldTypes ( ) ; for ( int keyFieldIndex : keyFieldIndices ) { final TypeInformation < ? > type = types [ keyFieldIndex ] ; if ( ! TypeCheckUtils . isSimpleStringRepresentation ( type ) ) { throw new ValidationExcepti... | Validate the types that are used for conversion to string . |
14,552 | protected void addPathRecursively ( final File sourcePath , final Path targetPath , final ContainerSpecification env ) throws IOException { final java . nio . file . Path sourceRoot = sourcePath . toPath ( ) . getParent ( ) ; Files . walkFileTree ( sourcePath . toPath ( ) , new SimpleFileVisitor < java . nio . file . P... | Add a path recursively to the container specification . |
14,553 | public void subscribeToEvent ( ResultPartitionID partitionId , EventListener < TaskEvent > eventListener , Class < ? extends TaskEvent > eventType ) { checkNotNull ( partitionId ) ; checkNotNull ( eventListener ) ; checkNotNull ( eventType ) ; TaskEventHandler taskEventHandler ; synchronized ( registeredHandlers ) { ta... | Subscribes a listener to this dispatcher for events on a partition . |
14,554 | public static PyObject adapt ( Object o ) { if ( o instanceof PyObject ) { return ( PyObject ) o ; } return Py . java2py ( o ) ; } | Convert java object to its corresponding PyObject representation . |
14,555 | public static void reset ( final Collection < MasterTriggerRestoreHook < ? > > hooks , final Logger log ) throws FlinkException { for ( MasterTriggerRestoreHook < ? > hook : hooks ) { final String id = hook . getIdentifier ( ) ; try { hook . reset ( ) ; } catch ( Throwable t ) { ExceptionUtils . rethrowIfFatalErrorOrOO... | Resets the master hooks . |
14,556 | public static void close ( final Collection < MasterTriggerRestoreHook < ? > > hooks , final Logger log ) throws FlinkException { for ( MasterTriggerRestoreHook < ? > hook : hooks ) { try { hook . close ( ) ; } catch ( Throwable t ) { log . warn ( "Failed to cleanly close a checkpoint master hook (" + hook . getIdentif... | Closes the master hooks . |
14,557 | public static List < MasterState > triggerMasterHooks ( Collection < MasterTriggerRestoreHook < ? > > hooks , long checkpointId , long timestamp , Executor executor , Time timeout ) throws FlinkException { final ArrayList < MasterState > states = new ArrayList < > ( hooks . size ( ) ) ; for ( MasterTriggerRestoreHook <... | Triggers all given master hooks and returns state objects for each hook that produced a state . |
14,558 | public static void restoreMasterHooks ( final Map < String , MasterTriggerRestoreHook < ? > > masterHooks , final Collection < MasterState > states , final long checkpointId , final boolean allowUnmatchedState , final Logger log ) throws FlinkException { if ( states == null || states . isEmpty ( ) || masterHooks == nul... | Calls the restore method given checkpoint master hooks and passes the given master state to them where state with a matching name is found . |
14,559 | public static < T > MasterTriggerRestoreHook < T > wrapHook ( MasterTriggerRestoreHook < T > hook , ClassLoader userClassLoader ) { return new WrappedMasterHook < > ( hook , userClassLoader ) ; } | Wraps a hook such that the user - code classloader is applied when the hook is invoked . |
14,560 | @ SuppressWarnings ( "unchecked" ) public static < X > X deserializeFunction ( RuntimeContext context , byte [ ] serFun ) throws FlinkException { if ( ! jythonInitialized ) { String path = context . getDistributedCache ( ) . getFile ( PythonConstants . FLINK_PYTHON_DC_ID ) . getAbsolutePath ( ) ; String scriptName = Py... | Deserialize the given python function . If the functions class definition cannot be found we assume that this is the first invocation of this method for a given job and load the python script containing the class definition via jython . |
14,561 | public static void initAndExecPythonScript ( PythonEnvironmentFactory factory , java . nio . file . Path scriptDirectory , String scriptName , String [ ] args ) { String [ ] fullArgs = new String [ args . length + 1 ] ; fullArgs [ 0 ] = scriptDirectory . resolve ( scriptName ) . toString ( ) ; System . arraycopy ( args... | Initializes the Jython interpreter and executes a python script . |
14,562 | private static void setRequiredProperties ( Properties zkProps ) { if ( zkProps . getProperty ( "clientPort" ) == null ) { zkProps . setProperty ( "clientPort" , String . valueOf ( DEFAULT_ZOOKEEPER_CLIENT_PORT ) ) ; LOG . warn ( "No 'clientPort' configured. Set to '{}'." , DEFAULT_ZOOKEEPER_CLIENT_PORT ) ; } if ( zkPr... | Sets required properties to reasonable defaults and logs it . |
14,563 | private static void writeMyIdToDataDir ( Properties zkProps , int id ) throws IOException { if ( zkProps . getProperty ( "dataDir" ) == null ) { throw new IllegalConfigurationException ( "No dataDir configured." ) ; } File dataDir = new File ( zkProps . getProperty ( "dataDir" ) ) ; if ( ! dataDir . isDirectory ( ) && ... | Write myid file to the dataDir in the given ZooKeeper configuration . |
14,564 | public static String generateRuntimeName ( Class < ? > clazz , String [ ] fields ) { String className = clazz . getSimpleName ( ) ; if ( null == fields ) { return className + "(*)" ; } else { return className + "(" + String . join ( ", " , fields ) + ")" ; } } | Returns the table connector name used for logging and web UI . |
14,565 | public MesosConfiguration withFrameworkInfo ( Protos . FrameworkInfo . Builder frameworkInfo ) { return new MesosConfiguration ( masterUrl , frameworkInfo , credential ) ; } | Revise the configuration with updated framework info . |
14,566 | public Set < String > roles ( ) { return frameworkInfo . hasRole ( ) && ! "*" . equals ( frameworkInfo . getRole ( ) ) ? Collections . singleton ( frameworkInfo . getRole ( ) ) : Collections . emptySet ( ) ; } | Gets the roles associated with the framework . |
14,567 | public SchedulerDriver createDriver ( Scheduler scheduler , boolean implicitAcknowledgements ) { MesosSchedulerDriver schedulerDriver ; if ( this . credential ( ) . isDefined ( ) ) { schedulerDriver = new MesosSchedulerDriver ( scheduler , frameworkInfo . build ( ) , this . masterUrl ( ) , implicitAcknowledgements , th... | Create the Mesos scheduler driver based on this configuration . |
14,568 | public static void logMesosConfig ( Logger log , MesosConfiguration config ) { Map < String , String > env = System . getenv ( ) ; Protos . FrameworkInfo . Builder info = config . frameworkInfo ( ) ; log . info ( "--------------------------------------------------------------------------------" ) ; log . info ( " Mesos... | A utility method to log relevant Mesos connection info . |
14,569 | public static Thread addShutdownHook ( final AutoCloseable service , final String serviceName , final Logger logger ) { checkNotNull ( service ) ; checkNotNull ( logger ) ; final Thread shutdownHook = new Thread ( ( ) -> { try { service . close ( ) ; } catch ( Throwable t ) { logger . error ( "Error during shutdown of ... | Adds a shutdown hook to the JVM and returns the Thread which has been registered . |
14,570 | public static boolean addShutdownHookThread ( final Thread shutdownHook , final String serviceName , final Logger logger ) { checkNotNull ( shutdownHook ) ; checkNotNull ( logger ) ; try { Runtime . getRuntime ( ) . addShutdownHook ( shutdownHook ) ; return true ; } catch ( IllegalStateException e ) { } catch ( Throwab... | Adds a shutdown hook to the JVM . |
14,571 | public static void removeShutdownHook ( final Thread shutdownHook , final String serviceName , final Logger logger ) { if ( shutdownHook == null || shutdownHook == Thread . currentThread ( ) ) { return ; } checkNotNull ( logger ) ; try { Runtime . getRuntime ( ) . removeShutdownHook ( shutdownHook ) ; } catch ( Illegal... | Removes a shutdown hook from the JVM . |
14,572 | public void start ( final String initialOwnerAddress , final RpcService initialRpcService , final HighAvailabilityServices initialHighAvailabilityServices , final JobLeaderListener initialJobLeaderListener ) { if ( JobLeaderService . State . CREATED != state ) { throw new IllegalStateException ( "The service has alread... | Start the job leader service with the given services . |
14,573 | public void stop ( ) throws Exception { LOG . info ( "Stop job leader service." ) ; if ( JobLeaderService . State . STARTED == state ) { for ( Tuple2 < LeaderRetrievalService , JobLeaderService . JobManagerLeaderListener > leaderRetrievalServiceEntry : jobLeaderServices . values ( ) ) { LeaderRetrievalService leaderRet... | Stop the job leader services . This implies stopping all leader retrieval services for the different jobs and their leader retrieval listeners . |
14,574 | public void removeJob ( JobID jobId ) throws Exception { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; Tuple2 < LeaderRetrievalService , JobLeaderService . JobManagerLeaderListener > entry = jobLeaderServices . remove ( jobId ) ; if ( entry != nul... | Remove the given job from being monitored by the job leader service . |
14,575 | public void addJob ( final JobID jobId , final String defaultTargetAddress ) throws Exception { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; LOG . info ( "Add job {} for job leader monitoring." , jobId ) ; final LeaderRetrievalService leaderRetri... | Add the given job to be monitored . This means that the service tries to detect leaders for this job and then tries to establish a connection to it . |
14,576 | public void reconnect ( final JobID jobId ) { Preconditions . checkNotNull ( jobId , "JobID must not be null." ) ; final Tuple2 < LeaderRetrievalService , JobManagerLeaderListener > jobLeaderService = jobLeaderServices . get ( jobId ) ; if ( jobLeaderService != null ) { jobLeaderService . f1 . reconnect ( ) ; } else { ... | Triggers reconnection to the last known leader of the given job . |
14,577 | public boolean containsJob ( JobID jobId ) { Preconditions . checkState ( JobLeaderService . State . STARTED == state , "The service is currently not running." ) ; return jobLeaderServices . containsKey ( jobId ) ; } | Check whether the service monitors the given job . |
14,578 | private void saveHandleInState ( final long checkpointId , final long timestamp ) throws Exception { if ( out != null ) { int subtaskIdx = getRuntimeContext ( ) . getIndexOfThisSubtask ( ) ; StreamStateHandle handle = out . closeAndGetHandle ( ) ; PendingCheckpoint pendingCheckpoint = new PendingCheckpoint ( checkpoint... | Called when a checkpoint barrier arrives . It closes any open streams to the backend and marks them as pending for committing to the external third - party storage system . |
14,579 | public Statistics columnStats ( String columnName , ColumnStats columnStats ) { Map < String , String > map = normalizeColumnStats ( columnStats ) ; this . columnStats . put ( columnName , map ) ; return this ; } | Sets statistics for a column . Overwrites all existing statistics for this column . |
14,580 | public Statistics columnDistinctCount ( String columnName , Long ndv ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( DISTINCT_COUNT , String . valueOf ( ndv ) ) ; return this ; } | Sets the number of distinct values statistic for the given column . |
14,581 | public Statistics columnNullCount ( String columnName , Long nullCount ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( NULL_COUNT , String . valueOf ( nullCount ) ) ; return this ; } | Sets the number of null values statistic for the given column . |
14,582 | public Statistics columnAvgLength ( String columnName , Double avgLen ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( AVG_LENGTH , String . valueOf ( avgLen ) ) ; return this ; } | Sets the average length statistic for the given column . |
14,583 | public Statistics columnMaxLength ( String columnName , Integer maxLen ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MAX_LENGTH , String . valueOf ( maxLen ) ) ; return this ; } | Sets the maximum length statistic for the given column . |
14,584 | public Statistics columnMaxValue ( String columnName , Number max ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MAX_VALUE , String . valueOf ( max ) ) ; return this ; } | Sets the maximum value statistic for the given column . |
14,585 | public Statistics columnMinValue ( String columnName , Number min ) { this . columnStats . computeIfAbsent ( columnName , column -> new HashMap < > ( ) ) . put ( MIN_VALUE , String . valueOf ( min ) ) ; return this ; } | Sets the minimum value statistic for the given column . |
14,586 | public int size ( ) { if ( allElementsInCache ) { return orderedCache . size ( ) ; } else { int count = 0 ; try ( final RocksBytesIterator iterator = orderedBytesIterator ( ) ) { while ( iterator . hasNext ( ) ) { iterator . next ( ) ; ++ count ; } } return count ; } } | This implementation comes at a relatively high cost per invocation . It should not be called repeatedly when it is clear that the value did not change . Currently this is only truly used to realize certain higher - level tests . |
14,587 | private void generateAllFailoverRegion ( List < ExecutionJobVertex > newJobVerticesTopological ) { final IdentityHashMap < ExecutionVertex , ArrayList < ExecutionVertex > > vertexToRegion = new IdentityHashMap < > ( ) ; final IdentityHashMap < ArrayList < ExecutionVertex > , Object > distinctRegions = new IdentityHashM... | Generate all the FailoverRegion from the new added job vertexes |
14,588 | public void reset ( ) { this . cursor = fixedSize ; for ( int i = 0 ; i < nullBitsSizeInBytes ; i += 8 ) { segment . putLong ( i , 0L ) ; } this . segment . putInt ( 0 , numElements ) ; } | First reset . |
14,589 | public static InetAddress findConnectingAddress ( InetSocketAddress targetAddress , long maxWaitMillis , long startLoggingAfter ) throws IOException { if ( targetAddress == null ) { throw new NullPointerException ( "targetAddress must not be null" ) ; } if ( maxWaitMillis <= 0 ) { throw new IllegalArgumentException ( "... | Finds the local network address from which this machine can connect to the target address . This method tries to establish a proper network connection to the given target so it only succeeds if the target socket address actually accepts connections . The method tries various strategies multiple times and uses an expone... |
14,590 | private static InetAddress findAddressUsingStrategy ( AddressDetectionState strategy , InetSocketAddress targetAddress , boolean logging ) throws IOException { if ( strategy == AddressDetectionState . LOCAL_HOST ) { InetAddress localhostName ; try { localhostName = InetAddress . getLocalHost ( ) ; } catch ( UnknownHost... | Try to find a local address which allows as to connect to the targetAddress using the given strategy . |
14,591 | public void add ( BufferOrEvent boe ) throws IOException { try { ByteBuffer contents ; if ( boe . isBuffer ( ) ) { Buffer buf = boe . getBuffer ( ) ; contents = buf . getNioBufferReadable ( ) ; } else { contents = EventSerializer . toSerializedEvent ( boe . getEvent ( ) ) ; } headBuffer . clear ( ) ; headBuffer . putIn... | Adds a buffer or event to the sequence of spilled buffers and events . |
14,592 | public static BaseRowKeySelector getBaseRowSelector ( int [ ] keyFields , BaseRowTypeInfo rowType ) { if ( keyFields . length > 0 ) { InternalType [ ] inputFieldTypes = rowType . getInternalTypes ( ) ; String [ ] inputFieldNames = rowType . getFieldNames ( ) ; InternalType [ ] keyFieldTypes = new InternalType [ keyFiel... | Create a BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo . |
14,593 | public void close ( ) { synchronized ( this ) { if ( this . closed ) { return ; } this . closed = true ; } this . numRecordsInBuffer = 0 ; this . numRecordsReturned = 0 ; for ( int i = this . fullSegments . size ( ) - 1 ; i >= 0 ; i -- ) { this . emptySegments . add ( this . fullSegments . remove ( i ) ) ; } this . mem... | This method closes the iterator and releases all resources . This method works both as a regular shutdown and as a canceling method . The method may be called multiple times and will not produce an error . |
14,594 | private Map < Path , FileStatus > listEligibleFiles ( FileSystem fileSystem , Path path ) throws IOException { final FileStatus [ ] statuses ; try { statuses = fileSystem . listStatus ( path ) ; } catch ( IOException e ) { return Collections . emptyMap ( ) ; } if ( statuses == null ) { LOG . warn ( "Path does not exist... | Returns the paths of the files not yet processed . |
14,595 | private TimeWindow mergeWindow ( TimeWindow curWindow , TimeWindow other , Collection < TimeWindow > mergedWindow ) { if ( curWindow . intersects ( other ) ) { mergedWindow . add ( other ) ; return curWindow . cover ( other ) ; } else { return curWindow ; } } | Merge curWindow and other return a new window which covers curWindow and other if they are overlapped . Otherwise returns the curWindow itself . |
14,596 | protected Configuration applyCommandLineOptionsToConfiguration ( CommandLine commandLine ) throws FlinkException { final Configuration resultingConfiguration = new Configuration ( configuration ) ; if ( commandLine . hasOption ( addressOption . getOpt ( ) ) ) { String addressWithPort = commandLine . getOptionValue ( ad... | Override configuration settings by specified command line options . |
14,597 | private String getInternal ( String key ) { Preconditions . checkArgument ( configuredOptions . containsKey ( key ) , "The configuration " + key + " has not been configured." ) ; return configuredOptions . get ( key ) ; } | Returns the value in string format with the given key . |
14,598 | protected boolean increaseBackoff ( ) { if ( currentBackoff < 0 ) { return false ; } if ( currentBackoff == 0 ) { currentBackoff = initialBackoff ; return true ; } else if ( currentBackoff < maxBackoff ) { currentBackoff = Math . min ( currentBackoff * 2 , maxBackoff ) ; return true ; } return false ; } | Increases the current backoff and returns whether the operation was successful . |
14,599 | public int resetErrorStateAndParse ( byte [ ] bytes , int startPos , int limit , byte [ ] delim , T reuse ) { resetParserState ( ) ; return parseField ( bytes , startPos , limit , delim , reuse ) ; } | Parses the value of a field from the byte array taking care of properly reset the state of this parser . The start position within the byte array and the array s valid length is given . The content of the value is delimited by a field delimiter . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.