idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
15,200
private static long firstMondayOfFirstWeek ( int year ) { final long janFirst = ymdToJulian ( year , 1 , 1 ) ; final long janFirstDow = floorMod ( janFirst + 1 , 7 ) ; return janFirst + ( 11 - janFirstDow ) % 7 - 3 ; }
Returns the first day of the first week of a year . Per ISO - 8601 it is the Monday of the week that contains Jan 4 or equivalently it is a Monday between Dec 29 and Jan 4 . Sometimes it is in the year before the given year .
15,201
public static long addMonths ( long timestamp , int m ) { final long millis = DateTimeUtils . floorMod ( timestamp , DateTimeUtils . MILLIS_PER_DAY ) ; timestamp -= millis ; final long x = addMonths ( ( int ) ( timestamp / DateTimeUtils . MILLIS_PER_DAY ) , m ) ; return x * DateTimeUtils . MILLIS_PER_DAY + millis ; }
Adds a given number of months to a timestamp represented as the number of milliseconds since the epoch .
15,202
public static int addMonths ( int date , int m ) { int y0 = ( int ) DateTimeUtils . unixDateExtract ( TimeUnitRange . YEAR , date ) ; int m0 = ( int ) DateTimeUtils . unixDateExtract ( TimeUnitRange . MONTH , date ) ; int d0 = ( int ) DateTimeUtils . unixDateExtract ( TimeUnitRange . DAY , date ) ; int y = m / 12 ; y0 += y ; m0 += m - y * 12 ; int last = lastDay ( y0 , m0 ) ; if ( d0 > last ) { d0 = 1 ; if ( ++ m0 > 12 ) { m0 = 1 ; ++ y0 ; } } return DateTimeUtils . ymdToUnixDate ( y0 , m0 , d0 ) ; }
Adds a given number of months to a date represented as the number of days since the epoch .
15,203
public static int subtractMonths ( int date0 , int date1 ) { if ( date0 < date1 ) { return - subtractMonths ( date1 , date0 ) ; } int m = ( date0 - date1 ) / 31 ; for ( ; ; ) { int date2 = addMonths ( date1 , m ) ; if ( date2 >= date0 ) { return m ; } int date3 = addMonths ( date1 , m + 1 ) ; if ( date3 > date0 ) { return m ; } ++ m ; } }
Finds the number of months between two dates each represented as the number of days since the epoch .
15,204
public static SingleInputSemanticProperties addSourceFieldOffset ( SingleInputSemanticProperties props , int numInputFields , int offset ) { SingleInputSemanticProperties offsetProps = new SingleInputSemanticProperties ( ) ; if ( props . getReadFields ( 0 ) != null ) { FieldSet offsetReadFields = new FieldSet ( ) ; for ( int r : props . getReadFields ( 0 ) ) { offsetReadFields = offsetReadFields . addField ( r + offset ) ; } offsetProps . addReadFields ( offsetReadFields ) ; } for ( int s = 0 ; s < numInputFields ; s ++ ) { FieldSet targetFields = props . getForwardingTargetFields ( 0 , s ) ; for ( int t : targetFields ) { offsetProps . addForwardedField ( s + offset , t ) ; } } return offsetProps ; }
Creates SemanticProperties by adding an offset to each input field index of the given SemanticProperties .
15,205
public static DualInputSemanticProperties addSourceFieldOffsets ( DualInputSemanticProperties props , int numInputFields1 , int numInputFields2 , int offset1 , int offset2 ) { DualInputSemanticProperties offsetProps = new DualInputSemanticProperties ( ) ; if ( props . getReadFields ( 0 ) != null ) { FieldSet offsetReadFields = new FieldSet ( ) ; for ( int r : props . getReadFields ( 0 ) ) { offsetReadFields = offsetReadFields . addField ( r + offset1 ) ; } offsetProps . addReadFields ( 0 , offsetReadFields ) ; } if ( props . getReadFields ( 1 ) != null ) { FieldSet offsetReadFields = new FieldSet ( ) ; for ( int r : props . getReadFields ( 1 ) ) { offsetReadFields = offsetReadFields . addField ( r + offset2 ) ; } offsetProps . addReadFields ( 1 , offsetReadFields ) ; } for ( int s = 0 ; s < numInputFields1 ; s ++ ) { FieldSet targetFields = props . getForwardingTargetFields ( 0 , s ) ; for ( int t : targetFields ) { offsetProps . addForwardedField ( 0 , s + offset1 , t ) ; } } for ( int s = 0 ; s < numInputFields2 ; s ++ ) { FieldSet targetFields = props . getForwardingTargetFields ( 1 , s ) ; for ( int t : targetFields ) { offsetProps . addForwardedField ( 1 , s + offset2 , t ) ; } } return offsetProps ; }
Creates SemanticProperties by adding offsets to each input field index of the given SemanticProperties .
15,206
public static MemorySize parse ( String text , MemoryUnit defaultUnit ) throws IllegalArgumentException { if ( ! hasUnit ( text ) ) { return parse ( text + defaultUnit . getUnits ( ) [ 0 ] ) ; } return parse ( text ) ; }
Parses the given string with a default unit .
15,207
protected void buildInitialTable ( final MutableObjectIterator < BT > input ) throws IOException { final int partitionFanOut = getPartitioningFanOutNoEstimates ( this . availableMemory . size ( ) ) ; if ( partitionFanOut > MAX_NUM_PARTITIONS ) { throw new RuntimeException ( "Hash join partitions estimate exeeds maximum number of partitions." ) ; } createPartitions ( partitionFanOut , 0 ) ; final int numBuckets = getInitialTableSize ( this . availableMemory . size ( ) , this . segmentSize , partitionFanOut , this . avgRecordLen ) ; initTable ( numBuckets , ( byte ) partitionFanOut ) ; final TypeComparator < BT > buildTypeComparator = this . buildSideComparator ; BT record = this . buildSideSerializer . createInstance ( ) ; while ( this . running && ( ( record = input . next ( record ) ) != null ) ) { final int hashCode = hash ( buildTypeComparator . hash ( record ) , 0 ) ; insertIntoTable ( record , hashCode ) ; } if ( ! this . running ) { return ; } for ( int i = 0 ; i < this . partitionsBeingBuilt . size ( ) ; i ++ ) { HashPartition < BT , PT > p = this . partitionsBeingBuilt . get ( i ) ; p . finalizeBuildPhase ( this . ioManager , this . currentEnumerator , this . writeBehindBuffers ) ; } }
Creates the initial hash table . This method sets up partitions hash index and inserts the data from the given iterator .
15,208
final void buildBloomFilterForBucket ( int bucketInSegmentPos , MemorySegment bucket , HashPartition < BT , PT > p ) { final int count = bucket . getShort ( bucketInSegmentPos + HEADER_COUNT_OFFSET ) ; if ( count <= 0 ) { return ; } int [ ] hashCodes = new int [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { hashCodes [ i ] = bucket . getInt ( bucketInSegmentPos + BUCKET_HEADER_LENGTH + i * HASH_CODE_LEN ) ; } this . bloomFilter . setBitsLocation ( bucket , bucketInSegmentPos + BUCKET_HEADER_LENGTH ) ; for ( int hashCode : hashCodes ) { this . bloomFilter . addHash ( hashCode ) ; } buildBloomFilterForExtraOverflowSegments ( bucketInSegmentPos , bucket , p ) ; }
Set all the bucket memory except bucket header as the bit set of bloom filter and use hash code of build records to build bloom filter .
15,209
public static int getNumWriteBehindBuffers ( int numBuffers ) { int numIOBufs = ( int ) ( Math . log ( numBuffers ) / Math . log ( 4 ) - 1.5 ) ; return numIOBufs > 6 ? 6 : numIOBufs ; }
Determines the number of buffers to be used for asynchronous write behind . It is currently computed as the logarithm of the number of buffers to the base 4 rounded up minus 2 . The upper limit for the number of write behind buffers is however set to six .
15,210
public JobWithJars getPlanWithoutJars ( ) throws ProgramInvocationException { if ( isUsingProgramEntryPoint ( ) ) { return new JobWithJars ( getPlan ( ) , Collections . < URL > emptyList ( ) , classpaths , userCodeClassLoader ) ; } else { throw new ProgramInvocationException ( "Cannot create a " + JobWithJars . class . getSimpleName ( ) + " for a program that is using the interactive mode." , getPlan ( ) . getJobId ( ) ) ; } }
Returns the plan without the required jars when the files are already provided by the cluster .
15,211
public List < URL > getAllLibraries ( ) { List < URL > libs = new ArrayList < URL > ( this . extractedTempLibraries . size ( ) + 1 ) ; if ( jarFile != null ) { libs . add ( jarFile ) ; } for ( File tmpLib : this . extractedTempLibraries ) { try { libs . add ( tmpLib . getAbsoluteFile ( ) . toURI ( ) . toURL ( ) ) ; } catch ( MalformedURLException e ) { throw new RuntimeException ( "URL is invalid. This should not happen." , e ) ; } } return libs ; }
Returns all provided libraries needed to run the program .
15,212
private Plan getPlan ( ) throws ProgramInvocationException { if ( this . plan == null ) { Thread . currentThread ( ) . setContextClassLoader ( this . userCodeClassLoader ) ; this . plan = createPlanFromProgram ( this . program , this . args ) ; } return this . plan ; }
Returns the plan as generated from the Pact Assembler .
15,213
private static Plan createPlanFromProgram ( Program program , String [ ] options ) throws ProgramInvocationException { try { return program . getPlan ( options ) ; } catch ( Throwable t ) { throw new ProgramInvocationException ( "Error while calling the program: " + t . getMessage ( ) , t ) ; } }
Takes the jar described by the given file and invokes its pact assembler class to assemble a plan . The assembler class name is either passed through a parameter or it is read from the manifest of the jar . The assembler is handed the given options for its assembly .
15,214
public static TaskManagerServicesConfiguration fromConfiguration ( Configuration configuration , long maxJvmHeapMemory , InetAddress remoteAddress , boolean localCommunication ) { final String [ ] tmpDirs = ConfigurationUtils . parseTempDirectories ( configuration ) ; String [ ] localStateRootDir = ConfigurationUtils . parseLocalStateDirectories ( configuration ) ; if ( localStateRootDir . length == 0 ) { localStateRootDir = tmpDirs ; } boolean localRecoveryMode = configuration . getBoolean ( CheckpointingOptions . LOCAL_RECOVERY ) ; final NetworkEnvironmentConfiguration networkConfig = NetworkEnvironmentConfiguration . fromConfiguration ( configuration , maxJvmHeapMemory , localCommunication , remoteAddress ) ; final QueryableStateConfiguration queryableStateConfig = QueryableStateConfiguration . fromConfiguration ( configuration ) ; boolean preAllocateMemory = configuration . getBoolean ( TaskManagerOptions . MANAGED_MEMORY_PRE_ALLOCATE ) ; long timerServiceShutdownTimeout = AkkaUtils . getTimeout ( configuration ) . toMillis ( ) ; final RetryingRegistrationConfiguration retryingRegistrationConfiguration = RetryingRegistrationConfiguration . fromConfiguration ( configuration ) ; return new TaskManagerServicesConfiguration ( remoteAddress , tmpDirs , localStateRootDir , localRecoveryMode , networkConfig , queryableStateConfig , ConfigurationParserUtils . getSlot ( configuration ) , ConfigurationParserUtils . getManagedMemorySize ( configuration ) , ConfigurationParserUtils . getMemoryType ( configuration ) , preAllocateMemory , ConfigurationParserUtils . getManagedMemoryFraction ( configuration ) , timerServiceShutdownTimeout , retryingRegistrationConfiguration , ConfigurationUtils . getSystemResourceMetricsProbingInterval ( configuration ) ) ; }
Utility method to extract TaskManager config parameters from the configuration and to sanity check them .
15,215
public final void streamBufferWithGroups ( Iterator < IN1 > iterator1 , Iterator < IN2 > iterator2 , Collector < OUT > c ) { SingleElementPushBackIterator < IN1 > i1 = new SingleElementPushBackIterator < > ( iterator1 ) ; SingleElementPushBackIterator < IN2 > i2 = new SingleElementPushBackIterator < > ( iterator2 ) ; try { int size ; if ( i1 . hasNext ( ) || i2 . hasNext ( ) ) { while ( true ) { int sig = in . readInt ( ) ; switch ( sig ) { case SIGNAL_BUFFER_REQUEST_G0 : if ( i1 . hasNext ( ) ) { size = sender . sendBuffer1 ( i1 ) ; sendWriteNotification ( size , i1 . hasNext ( ) ) ; } break ; case SIGNAL_BUFFER_REQUEST_G1 : if ( i2 . hasNext ( ) ) { size = sender . sendBuffer2 ( i2 ) ; sendWriteNotification ( size , i2 . hasNext ( ) ) ; } break ; case SIGNAL_FINISHED : return ; case SIGNAL_ERROR : try { outPrinter . join ( ) ; } catch ( InterruptedException e ) { outPrinter . interrupt ( ) ; } try { errorPrinter . join ( ) ; } catch ( InterruptedException e ) { errorPrinter . interrupt ( ) ; } throw new RuntimeException ( "External process for task " + function . getRuntimeContext ( ) . getTaskName ( ) + " terminated prematurely due to an error." + msg ) ; default : receiver . collectBuffer ( c , sig ) ; sendReadConfirmation ( ) ; break ; } } } } catch ( SocketTimeoutException ignored ) { throw new RuntimeException ( "External process for task " + function . getRuntimeContext ( ) . getTaskName ( ) + " stopped responding." + msg ) ; } catch ( Exception e ) { throw new RuntimeException ( "Critical failure for task " + function . getRuntimeContext ( ) . getTaskName ( ) + ". " + msg . get ( ) , e ) ; } }
Sends all values contained in both iterators to the external process and collects all results .
15,216
public TableOperation create ( SetTableOperationType type , TableOperation left , TableOperation right , boolean all ) { failIfStreaming ( type , all ) ; validateSetOperation ( type , left , right ) ; return new SetTableOperation ( left , right , type , all ) ; }
Creates a valid algebraic operation .
15,217
protected Union < T > translateToDataFlow ( Operator < T > input1 , Operator < T > input2 ) { return new Union < T > ( input1 , input2 , unionLocationName ) ; }
Returns the BinaryNodeTranslation of the Union .
15,218
public < T > T getFieldNotNull ( int pos ) { T field = getField ( pos ) ; if ( field != null ) { return field ; } else { throw new NullFieldException ( pos ) ; } }
Gets the field at the specified position throws NullFieldException if the field is null . Used for comparing key fields .
15,219
public static Tuple newInstance ( int arity ) { switch ( arity ) { case 0 : return Tuple0 . INSTANCE ; case 1 : return new Tuple1 ( ) ; case 2 : return new Tuple2 ( ) ; case 3 : return new Tuple3 ( ) ; case 4 : return new Tuple4 ( ) ; case 5 : return new Tuple5 ( ) ; case 6 : return new Tuple6 ( ) ; case 7 : return new Tuple7 ( ) ; case 8 : return new Tuple8 ( ) ; case 9 : return new Tuple9 ( ) ; case 10 : return new Tuple10 ( ) ; case 11 : return new Tuple11 ( ) ; case 12 : return new Tuple12 ( ) ; case 13 : return new Tuple13 ( ) ; case 14 : return new Tuple14 ( ) ; case 15 : return new Tuple15 ( ) ; case 16 : return new Tuple16 ( ) ; case 17 : return new Tuple17 ( ) ; case 18 : return new Tuple18 ( ) ; case 19 : return new Tuple19 ( ) ; case 20 : return new Tuple20 ( ) ; case 21 : return new Tuple21 ( ) ; case 22 : return new Tuple22 ( ) ; case 23 : return new Tuple23 ( ) ; case 24 : return new Tuple24 ( ) ; case 25 : return new Tuple25 ( ) ; default : throw new IllegalArgumentException ( "The tuple arity must be in [0, " + MAX_ARITY + "]." ) ; } }
GENERATED FROM org . apache . flink . api . java . tuple . TupleGenerator .
15,220
public final void streamBufferWithoutGroups ( Iterator < IN > iterator , Collector < OUT > c ) { SingleElementPushBackIterator < IN > i = new SingleElementPushBackIterator < > ( iterator ) ; try { int size ; if ( i . hasNext ( ) ) { while ( true ) { int sig = in . readInt ( ) ; switch ( sig ) { case SIGNAL_BUFFER_REQUEST : if ( i . hasNext ( ) ) { size = sender . sendBuffer ( i ) ; sendWriteNotification ( size , i . hasNext ( ) ) ; } else { throw new RuntimeException ( "External process requested data even though none is available." ) ; } break ; case SIGNAL_FINISHED : return ; case SIGNAL_ERROR : try { outPrinter . join ( ) ; } catch ( InterruptedException e ) { outPrinter . interrupt ( ) ; } try { errorPrinter . join ( ) ; } catch ( InterruptedException e ) { errorPrinter . interrupt ( ) ; } throw new RuntimeException ( "External process for task " + function . getRuntimeContext ( ) . getTaskName ( ) + " terminated prematurely due to an error." + msg ) ; default : receiver . collectBuffer ( c , sig ) ; sendReadConfirmation ( ) ; break ; } } } } catch ( SocketTimeoutException ignored ) { throw new RuntimeException ( "External process for task " + function . getRuntimeContext ( ) . getTaskName ( ) + " stopped responding." + msg . get ( ) ) ; } catch ( Exception e ) { throw new RuntimeException ( "Critical failure for task " + function . getRuntimeContext ( ) . getTaskName ( ) + ". " + msg . get ( ) , e ) ; } }
Sends all values contained in the iterator to the external process and collects all results .
15,221
protected < K , V > KafkaProducer < K , V > getKafkaProducer ( Properties props ) { return new KafkaProducer < > ( props ) ; }
Used for testing only .
15,222
public void invoke ( IN next , Context context ) throws Exception { checkErroneous ( ) ; byte [ ] serializedKey = schema . serializeKey ( next ) ; byte [ ] serializedValue = schema . serializeValue ( next ) ; String targetTopic = schema . getTargetTopic ( next ) ; if ( targetTopic == null ) { targetTopic = defaultTopicId ; } int [ ] partitions = this . topicPartitionsMap . get ( targetTopic ) ; if ( null == partitions ) { partitions = getPartitionsByTopic ( targetTopic , producer ) ; this . topicPartitionsMap . put ( targetTopic , partitions ) ; } ProducerRecord < byte [ ] , byte [ ] > record ; if ( flinkKafkaPartitioner == null ) { record = new ProducerRecord < > ( targetTopic , serializedKey , serializedValue ) ; } else { record = new ProducerRecord < > ( targetTopic , flinkKafkaPartitioner . partition ( next , serializedKey , serializedValue , targetTopic , partitions ) , serializedKey , serializedValue ) ; } if ( flushOnCheckpoint ) { synchronized ( pendingRecordsLock ) { pendingRecords ++ ; } } producer . send ( record , callback ) ; }
Called when new data arrives to the sink and forwards it to Kafka .
15,223
public < T > BroadcastVariableMaterialization < T , ? > materializeBroadcastVariable ( String name , int superstep , BatchTask < ? , ? > holder , MutableReader < ? > reader , TypeSerializerFactory < T > serializerFactory ) throws IOException { final BroadcastVariableKey key = new BroadcastVariableKey ( holder . getEnvironment ( ) . getJobVertexId ( ) , name , superstep ) ; while ( true ) { final BroadcastVariableMaterialization < T , Object > newMat = new BroadcastVariableMaterialization < T , Object > ( key ) ; final BroadcastVariableMaterialization < ? , ? > previous = variables . putIfAbsent ( key , newMat ) ; @ SuppressWarnings ( "unchecked" ) final BroadcastVariableMaterialization < T , ? > materialization = ( previous == null ) ? newMat : ( BroadcastVariableMaterialization < T , ? > ) previous ; try { materialization . materializeVariable ( reader , serializerFactory , holder ) ; return materialization ; } catch ( MaterializationExpiredException e ) { boolean replaceSuccessful = false ; try { replaceSuccessful = variables . replace ( key , materialization , newMat ) ; } catch ( Throwable t ) { } if ( replaceSuccessful ) { try { newMat . materializeVariable ( reader , serializerFactory , holder ) ; return newMat ; } catch ( MaterializationExpiredException ee ) { } } } } }
Materializes the broadcast variable for the given name scoped to the given task and its iteration superstep . An existing materialization created by another parallel subtask may be returned if it hasn t expired yet .
15,224
public void setGroupOrder ( int inputNum , Ordering order ) { if ( inputNum == 0 ) { this . groupOrder1 = order ; } else if ( inputNum == 1 ) { this . groupOrder2 = order ; } else { throw new IndexOutOfBoundsException ( ) ; } }
Sets the order of the elements within a group for the given input .
15,225
public Ordering appendOrdering ( Integer index , Class < ? extends Comparable < ? > > type , Order order ) { if ( index < 0 ) { throw new IllegalArgumentException ( "The key index must not be negative." ) ; } if ( order == null ) { throw new NullPointerException ( ) ; } if ( order == Order . NONE ) { throw new IllegalArgumentException ( "An ordering must not be created with a NONE order." ) ; } if ( ! this . indexes . contains ( index ) ) { this . indexes = this . indexes . addField ( index ) ; this . types . add ( type ) ; this . orders . add ( order ) ; } return this ; }
Extends this ordering by appending an additional order requirement . If the index has been previously appended then the unmodified Ordering is returned .
15,226
protected AmazonKinesis createKinesisClient ( Properties configProps ) { ClientConfiguration awsClientConfig = new ClientConfigurationFactory ( ) . getConfig ( ) ; setAwsClientConfigProperties ( awsClientConfig , configProps ) ; AWSCredentialsProvider credentials = getCredentialsProvider ( configProps ) ; awsClientConfig . setUserAgentPrefix ( String . format ( USER_AGENT_FORMAT , EnvironmentInformation . getVersion ( ) , EnvironmentInformation . getRevisionInformation ( ) . commitId ) ) ; AmazonDynamoDBStreamsAdapterClient adapterClient = new AmazonDynamoDBStreamsAdapterClient ( credentials , awsClientConfig ) ; if ( configProps . containsKey ( AWS_ENDPOINT ) ) { adapterClient . setEndpoint ( configProps . getProperty ( AWS_ENDPOINT ) ) ; } else { adapterClient . setRegion ( Region . getRegion ( Regions . fromName ( configProps . getProperty ( AWS_REGION ) ) ) ) ; } return adapterClient ; }
Creates an AmazonDynamoDBStreamsAdapterClient . Uses it as the internal client interacting with the DynamoDB streams .
15,227
public static String concat ( CharacterFilter filter , Character delimiter , String ... components ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( filter . filterCharacters ( components [ 0 ] ) ) ; for ( int x = 1 ; x < components . length ; x ++ ) { sb . append ( delimiter ) ; sb . append ( filter . filterCharacters ( components [ x ] ) ) ; } return sb . toString ( ) ; }
Concatenates the given component names separated by the delimiter character . Additionally the character filter is applied to all component names .
15,228
public static void main ( String [ ] args ) throws IOException { String outputDirectory = args [ 0 ] ; for ( final RestAPIVersion apiVersion : RestAPIVersion . values ( ) ) { if ( apiVersion == RestAPIVersion . V0 ) { continue ; } createHtmlFile ( new DocumentingDispatcherRestEndpoint ( ) , apiVersion , Paths . get ( outputDirectory , "rest_" + apiVersion . getURLVersionPrefix ( ) + "_dispatcher.html" ) ) ; } }
Generates the REST API documentation .
15,229
@ SuppressWarnings ( "unchecked" ) public static < T extends SpecificRecord > TypeInformation < Row > convertToTypeInfo ( Class < T > avroClass ) { Preconditions . checkNotNull ( avroClass , "Avro specific record class must not be null." ) ; final Schema schema = SpecificData . get ( ) . getSchema ( avroClass ) ; return ( TypeInformation < Row > ) convertToTypeInfo ( schema ) ; }
Converts an Avro class into a nested row structure with deterministic field order and data types that are compatible with Flink s Table & SQL API .
15,230
@ SuppressWarnings ( "unchecked" ) public static < T > TypeInformation < T > convertToTypeInfo ( String avroSchemaString ) { Preconditions . checkNotNull ( avroSchemaString , "Avro schema must not be null." ) ; final Schema schema ; try { schema = new Schema . Parser ( ) . parse ( avroSchemaString ) ; } catch ( SchemaParseException e ) { throw new IllegalArgumentException ( "Could not parse Avro schema string." , e ) ; } return ( TypeInformation < T > ) convertToTypeInfo ( schema ) ; }
Converts an Avro schema string into a nested row structure with deterministic field order and data types that are compatible with Flink s Table & SQL API .
15,231
public void registerTypeWithKryoSerializer ( Class < ? > type , Class < ? extends Serializer < ? > > serializerClass ) { config . registerTypeWithKryoSerializer ( type , serializerClass ) ; }
Registers the given Serializer via its class as a serializer for the given type at the KryoSerializer .
15,232
public < X > DataSource < X > fromCollection ( Collection < X > data ) { if ( data == null ) { throw new IllegalArgumentException ( "The data must not be null." ) ; } if ( data . size ( ) == 0 ) { throw new IllegalArgumentException ( "The size of the collection must not be empty." ) ; } X firstValue = data . iterator ( ) . next ( ) ; TypeInformation < X > type = TypeExtractor . getForObject ( firstValue ) ; CollectionInputFormat . checkCollection ( data , type . getTypeClass ( ) ) ; return new DataSource < > ( this , new CollectionInputFormat < > ( data , type . createSerializer ( config ) ) , type , Utils . getCallLocationName ( ) ) ; }
Creates a DataSet from the given non - empty collection . The type of the data set is that of the elements in the collection .
15,233
public < X > DataSource < X > fromCollection ( Collection < X > data , TypeInformation < X > type ) { return fromCollection ( data , type , Utils . getCallLocationName ( ) ) ; }
Creates a DataSet from the given non - empty collection . Note that this operation will result in a non - parallel data source i . e . a data source with a parallelism of one .
15,234
private < X > DataSource < X > fromParallelCollection ( SplittableIterator < X > iterator , TypeInformation < X > type , String callLocationName ) { return new DataSource < > ( this , new ParallelIteratorInputFormat < > ( iterator ) , type , callLocationName ) ; }
private helper for passing different call location names
15,235
protected void registerCachedFilesWithPlan ( Plan p ) throws IOException { for ( Tuple2 < String , DistributedCacheEntry > entry : cacheFile ) { p . registerCachedFile ( entry . f0 , entry . f1 ) ; } }
Registers all files that were registered at this execution environment s cache registry of the given plan s cache registry .
15,236
private void processEvent ( NFAState nfaState , IN event , long timestamp ) throws Exception { try ( SharedBufferAccessor < IN > sharedBufferAccessor = partialMatches . getAccessor ( ) ) { Collection < Map < String , List < IN > > > patterns = nfa . process ( sharedBufferAccessor , nfaState , event , timestamp , afterMatchSkipStrategy , cepTimerService ) ; processMatchedSequences ( patterns , timestamp ) ; } }
Process the given event by giving it to the NFA and outputting the produced set of matched event sequences .
15,237
private static < IN , OUT > SingleOutputStreamOperator < OUT > addOperator ( DataStream < IN > in , AsyncFunction < IN , OUT > func , long timeout , int bufSize , OutputMode mode ) { TypeInformation < OUT > outTypeInfo = TypeExtractor . getUnaryOperatorReturnType ( func , AsyncFunction . class , 0 , 1 , new int [ ] { 1 , 0 } , in . getType ( ) , Utils . getCallLocationName ( ) , true ) ; AsyncWaitOperator < IN , OUT > operator = new AsyncWaitOperator < > ( in . getExecutionEnvironment ( ) . clean ( func ) , timeout , bufSize , mode ) ; return in . transform ( "async wait operator" , outTypeInfo , operator ) ; }
Add an AsyncWaitOperator .
15,238
public static < IN , OUT > SingleOutputStreamOperator < OUT > unorderedWait ( DataStream < IN > in , AsyncFunction < IN , OUT > func , long timeout , TimeUnit timeUnit , int capacity ) { return addOperator ( in , func , timeUnit . toMillis ( timeout ) , capacity , OutputMode . UNORDERED ) ; }
Add an AsyncWaitOperator . The order of output stream records may be reordered .
15,239
public static < IN , OUT > SingleOutputStreamOperator < OUT > orderedWait ( DataStream < IN > in , AsyncFunction < IN , OUT > func , long timeout , TimeUnit timeUnit , int capacity ) { return addOperator ( in , func , timeUnit . toMillis ( timeout ) , capacity , OutputMode . ORDERED ) ; }
Add an AsyncWaitOperator . The order to process input records is guaranteed to be the same as input ones .
15,240
public String getJobParameter ( String key , String defaultValue ) { final GlobalJobParameters conf = context . getExecutionConfig ( ) . getGlobalJobParameters ( ) ; if ( conf != null && conf . toMap ( ) . containsKey ( key ) ) { return conf . toMap ( ) . get ( key ) ; } else { return defaultValue ; } }
Gets the global job parameter value associated with the given key as a string .
15,241
public static Deadline fromNow ( Duration duration ) { return new Deadline ( Math . addExact ( System . nanoTime ( ) , duration . toNanos ( ) ) ) ; }
Constructs a Deadline that is a given duration after now .
15,242
public void start ( ResourceManagerId newResourceManagerId , Executor newMainThreadExecutor , ResourceActions newResourceActions ) { LOG . info ( "Starting the SlotManager." ) ; this . resourceManagerId = Preconditions . checkNotNull ( newResourceManagerId ) ; mainThreadExecutor = Preconditions . checkNotNull ( newMainThreadExecutor ) ; resourceActions = Preconditions . checkNotNull ( newResourceActions ) ; started = true ; taskManagerTimeoutCheck = scheduledExecutor . scheduleWithFixedDelay ( ( ) -> mainThreadExecutor . execute ( ( ) -> checkTaskManagerTimeouts ( ) ) , 0L , taskManagerTimeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; slotRequestTimeoutCheck = scheduledExecutor . scheduleWithFixedDelay ( ( ) -> mainThreadExecutor . execute ( ( ) -> checkSlotRequestTimeouts ( ) ) , 0L , slotRequestTimeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; }
Starts the slot manager with the given leader id and resource manager actions .
15,243
public void suspend ( ) { LOG . info ( "Suspending the SlotManager." ) ; if ( taskManagerTimeoutCheck != null ) { taskManagerTimeoutCheck . cancel ( false ) ; taskManagerTimeoutCheck = null ; } if ( slotRequestTimeoutCheck != null ) { slotRequestTimeoutCheck . cancel ( false ) ; slotRequestTimeoutCheck = null ; } for ( PendingSlotRequest pendingSlotRequest : pendingSlotRequests . values ( ) ) { cancelPendingSlotRequest ( pendingSlotRequest ) ; } pendingSlotRequests . clear ( ) ; ArrayList < InstanceID > registeredTaskManagers = new ArrayList < > ( taskManagerRegistrations . keySet ( ) ) ; for ( InstanceID registeredTaskManager : registeredTaskManagers ) { unregisterTaskManager ( registeredTaskManager ) ; } resourceManagerId = null ; resourceActions = null ; started = false ; }
Suspends the component . This clears the internal state of the slot manager .
15,244
public boolean registerSlotRequest ( SlotRequest slotRequest ) throws SlotManagerException { checkInit ( ) ; if ( checkDuplicateRequest ( slotRequest . getAllocationId ( ) ) ) { LOG . debug ( "Ignoring a duplicate slot request with allocation id {}." , slotRequest . getAllocationId ( ) ) ; return false ; } else { PendingSlotRequest pendingSlotRequest = new PendingSlotRequest ( slotRequest ) ; pendingSlotRequests . put ( slotRequest . getAllocationId ( ) , pendingSlotRequest ) ; try { internalRequestSlot ( pendingSlotRequest ) ; } catch ( ResourceManagerException e ) { pendingSlotRequests . remove ( slotRequest . getAllocationId ( ) ) ; throw new SlotManagerException ( "Could not fulfill slot request " + slotRequest . getAllocationId ( ) + '.' , e ) ; } return true ; } }
Requests a slot with the respective resource profile .
15,245
public boolean unregisterSlotRequest ( AllocationID allocationId ) { checkInit ( ) ; PendingSlotRequest pendingSlotRequest = pendingSlotRequests . remove ( allocationId ) ; if ( null != pendingSlotRequest ) { LOG . debug ( "Cancel slot request {}." , allocationId ) ; cancelPendingSlotRequest ( pendingSlotRequest ) ; return true ; } else { LOG . debug ( "No pending slot request with allocation id {} found. Ignoring unregistration request." , allocationId ) ; return false ; } }
Cancels and removes a pending slot request with the given allocation id . If there is no such pending request then nothing is done .
15,246
public void registerTaskManager ( final TaskExecutorConnection taskExecutorConnection , SlotReport initialSlotReport ) { checkInit ( ) ; LOG . debug ( "Registering TaskManager {} under {} at the SlotManager." , taskExecutorConnection . getResourceID ( ) , taskExecutorConnection . getInstanceID ( ) ) ; if ( taskManagerRegistrations . containsKey ( taskExecutorConnection . getInstanceID ( ) ) ) { reportSlotStatus ( taskExecutorConnection . getInstanceID ( ) , initialSlotReport ) ; } else { ArrayList < SlotID > reportedSlots = new ArrayList < > ( ) ; for ( SlotStatus slotStatus : initialSlotReport ) { reportedSlots . add ( slotStatus . getSlotID ( ) ) ; } TaskManagerRegistration taskManagerRegistration = new TaskManagerRegistration ( taskExecutorConnection , reportedSlots ) ; taskManagerRegistrations . put ( taskExecutorConnection . getInstanceID ( ) , taskManagerRegistration ) ; for ( SlotStatus slotStatus : initialSlotReport ) { registerSlot ( slotStatus . getSlotID ( ) , slotStatus . getAllocationID ( ) , slotStatus . getJobID ( ) , slotStatus . getResourceProfile ( ) , taskExecutorConnection ) ; } } }
Registers a new task manager at the slot manager . This will make the task managers slots known and thus available for allocation .
15,247
public boolean unregisterTaskManager ( InstanceID instanceId ) { checkInit ( ) ; LOG . debug ( "Unregister TaskManager {} from the SlotManager." , instanceId ) ; TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . remove ( instanceId ) ; if ( null != taskManagerRegistration ) { internalUnregisterTaskManager ( taskManagerRegistration ) ; return true ; } else { LOG . debug ( "There is no task manager registered with instance ID {}. Ignoring this message." , instanceId ) ; return false ; } }
Unregisters the task manager identified by the given instance id and its associated slots from the slot manager .
15,248
public boolean reportSlotStatus ( InstanceID instanceId , SlotReport slotReport ) { checkInit ( ) ; LOG . debug ( "Received slot report from instance {}: {}." , instanceId , slotReport ) ; TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . get ( instanceId ) ; if ( null != taskManagerRegistration ) { for ( SlotStatus slotStatus : slotReport ) { updateSlot ( slotStatus . getSlotID ( ) , slotStatus . getAllocationID ( ) , slotStatus . getJobID ( ) ) ; } return true ; } else { LOG . debug ( "Received slot report for unknown task manager with instance id {}. Ignoring this report." , instanceId ) ; return false ; } }
Reports the current slot allocations for a task manager identified by the given instance id .
15,249
public void freeSlot ( SlotID slotId , AllocationID allocationId ) { checkInit ( ) ; TaskManagerSlot slot = slots . get ( slotId ) ; if ( null != slot ) { if ( slot . getState ( ) == TaskManagerSlot . State . ALLOCATED ) { if ( Objects . equals ( allocationId , slot . getAllocationId ( ) ) ) { TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . get ( slot . getInstanceId ( ) ) ; if ( taskManagerRegistration == null ) { throw new IllegalStateException ( "Trying to free a slot from a TaskManager " + slot . getInstanceId ( ) + " which has not been registered." ) ; } updateSlotState ( slot , taskManagerRegistration , null , null ) ; } else { LOG . debug ( "Received request to free slot {} with expected allocation id {}, " + "but actual allocation id {} differs. Ignoring the request." , slotId , allocationId , slot . getAllocationId ( ) ) ; } } else { LOG . debug ( "Slot {} has not been allocated." , allocationId ) ; } } else { LOG . debug ( "Trying to free a slot {} which has not been registered. Ignoring this message." , slotId ) ; } }
Free the given slot from the given allocation . If the slot is still allocated by the given allocation id then the slot will be marked as free and will be subject to new slot requests .
15,250
protected PendingSlotRequest findMatchingRequest ( ResourceProfile slotResourceProfile ) { for ( PendingSlotRequest pendingSlotRequest : pendingSlotRequests . values ( ) ) { if ( ! pendingSlotRequest . isAssigned ( ) && slotResourceProfile . isMatching ( pendingSlotRequest . getResourceProfile ( ) ) ) { return pendingSlotRequest ; } } return null ; }
Finds a matching slot request for a given resource profile . If there is no such request the method returns null .
15,251
protected TaskManagerSlot findMatchingSlot ( ResourceProfile requestResourceProfile ) { Iterator < Map . Entry < SlotID , TaskManagerSlot > > iterator = freeSlots . entrySet ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { TaskManagerSlot taskManagerSlot = iterator . next ( ) . getValue ( ) ; Preconditions . checkState ( taskManagerSlot . getState ( ) == TaskManagerSlot . State . FREE , "TaskManagerSlot %s is not in state FREE but %s." , taskManagerSlot . getSlotId ( ) , taskManagerSlot . getState ( ) ) ; if ( taskManagerSlot . getResourceProfile ( ) . isMatching ( requestResourceProfile ) ) { iterator . remove ( ) ; return taskManagerSlot ; } } return null ; }
Finds a matching slot for a given resource profile . A matching slot has at least as many resources available as the given resource profile . If there is no such slot available then the method returns null .
15,252
private void registerSlot ( SlotID slotId , AllocationID allocationId , JobID jobId , ResourceProfile resourceProfile , TaskExecutorConnection taskManagerConnection ) { if ( slots . containsKey ( slotId ) ) { removeSlot ( slotId ) ; } final TaskManagerSlot slot = createAndRegisterTaskManagerSlot ( slotId , resourceProfile , taskManagerConnection ) ; final PendingTaskManagerSlot pendingTaskManagerSlot ; if ( allocationId == null ) { pendingTaskManagerSlot = findExactlyMatchingPendingTaskManagerSlot ( resourceProfile ) ; } else { pendingTaskManagerSlot = null ; } if ( pendingTaskManagerSlot == null ) { updateSlot ( slotId , allocationId , jobId ) ; } else { pendingSlots . remove ( pendingTaskManagerSlot . getTaskManagerSlotId ( ) ) ; final PendingSlotRequest assignedPendingSlotRequest = pendingTaskManagerSlot . getAssignedPendingSlotRequest ( ) ; if ( assignedPendingSlotRequest == null ) { handleFreeSlot ( slot ) ; } else { assignedPendingSlotRequest . unassignPendingTaskManagerSlot ( ) ; allocateSlot ( slot , assignedPendingSlotRequest ) ; } } }
Registers a slot for the given task manager at the slot manager . The slot is identified by the given slot id . The given resource profile defines the available resources for the slot . The task manager connection can be used to communicate with the task manager .
15,253
private boolean updateSlot ( SlotID slotId , AllocationID allocationId , JobID jobId ) { final TaskManagerSlot slot = slots . get ( slotId ) ; if ( slot != null ) { final TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . get ( slot . getInstanceId ( ) ) ; if ( taskManagerRegistration != null ) { updateSlotState ( slot , taskManagerRegistration , allocationId , jobId ) ; return true ; } else { throw new IllegalStateException ( "Trying to update a slot from a TaskManager " + slot . getInstanceId ( ) + " which has not been registered." ) ; } } else { LOG . debug ( "Trying to update unknown slot with slot id {}." , slotId ) ; return false ; } }
Updates a slot with the given allocation id .
15,254
private void internalRequestSlot ( PendingSlotRequest pendingSlotRequest ) throws ResourceManagerException { final ResourceProfile resourceProfile = pendingSlotRequest . getResourceProfile ( ) ; TaskManagerSlot taskManagerSlot = findMatchingSlot ( resourceProfile ) ; if ( taskManagerSlot != null ) { allocateSlot ( taskManagerSlot , pendingSlotRequest ) ; } else { Optional < PendingTaskManagerSlot > pendingTaskManagerSlotOptional = findFreeMatchingPendingTaskManagerSlot ( resourceProfile ) ; if ( ! pendingTaskManagerSlotOptional . isPresent ( ) ) { pendingTaskManagerSlotOptional = allocateResource ( resourceProfile ) ; } pendingTaskManagerSlotOptional . ifPresent ( pendingTaskManagerSlot -> assignPendingTaskManagerSlot ( pendingSlotRequest , pendingTaskManagerSlot ) ) ; } }
Tries to allocate a slot for the given slot request . If there is no slot available the resource manager is informed to allocate more resources and a timeout for the request is registered .
15,255
private void allocateSlot ( TaskManagerSlot taskManagerSlot , PendingSlotRequest pendingSlotRequest ) { Preconditions . checkState ( taskManagerSlot . getState ( ) == TaskManagerSlot . State . FREE ) ; TaskExecutorConnection taskExecutorConnection = taskManagerSlot . getTaskManagerConnection ( ) ; TaskExecutorGateway gateway = taskExecutorConnection . getTaskExecutorGateway ( ) ; final CompletableFuture < Acknowledge > completableFuture = new CompletableFuture < > ( ) ; final AllocationID allocationId = pendingSlotRequest . getAllocationId ( ) ; final SlotID slotId = taskManagerSlot . getSlotId ( ) ; final InstanceID instanceID = taskManagerSlot . getInstanceId ( ) ; taskManagerSlot . assignPendingSlotRequest ( pendingSlotRequest ) ; pendingSlotRequest . setRequestFuture ( completableFuture ) ; returnPendingTaskManagerSlotIfAssigned ( pendingSlotRequest ) ; TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . get ( instanceID ) ; if ( taskManagerRegistration == null ) { throw new IllegalStateException ( "Could not find a registered task manager for instance id " + instanceID + '.' ) ; } taskManagerRegistration . markUsed ( ) ; CompletableFuture < Acknowledge > requestFuture = gateway . requestSlot ( slotId , pendingSlotRequest . getJobId ( ) , allocationId , pendingSlotRequest . getTargetAddress ( ) , resourceManagerId , taskManagerRequestTimeout ) ; requestFuture . whenComplete ( ( Acknowledge acknowledge , Throwable throwable ) -> { if ( acknowledge != null ) { completableFuture . complete ( acknowledge ) ; } else { completableFuture . completeExceptionally ( throwable ) ; } } ) ; completableFuture . whenCompleteAsync ( ( Acknowledge acknowledge , Throwable throwable ) -> { try { if ( acknowledge != null ) { updateSlot ( slotId , allocationId , pendingSlotRequest . getJobId ( ) ) ; } else { if ( throwable instanceof SlotOccupiedException ) { SlotOccupiedException exception = ( SlotOccupiedException ) throwable ; updateSlot ( slotId , exception . getAllocationId ( ) , exception . getJobId ( ) ) ; } else { removeSlotRequestFromSlot ( slotId , allocationId ) ; } if ( ! ( throwable instanceof CancellationException ) ) { handleFailedSlotRequest ( slotId , allocationId , throwable ) ; } else { LOG . debug ( "Slot allocation request {} has been cancelled." , allocationId , throwable ) ; } } } catch ( Exception e ) { LOG . error ( "Error while completing the slot allocation." , e ) ; } } , mainThreadExecutor ) ; }
Allocates the given slot for the given slot request . This entails sending a registration message to the task manager and treating failures .
15,256
private void handleFreeSlot ( TaskManagerSlot freeSlot ) { Preconditions . checkState ( freeSlot . getState ( ) == TaskManagerSlot . State . FREE ) ; PendingSlotRequest pendingSlotRequest = findMatchingRequest ( freeSlot . getResourceProfile ( ) ) ; if ( null != pendingSlotRequest ) { allocateSlot ( freeSlot , pendingSlotRequest ) ; } else { freeSlots . put ( freeSlot . getSlotId ( ) , freeSlot ) ; } }
Handles a free slot . It first tries to find a pending slot request which can be fulfilled . If there is no such request then it will add the slot to the set of free slots .
15,257
private void removeSlot ( SlotID slotId ) { TaskManagerSlot slot = slots . remove ( slotId ) ; if ( null != slot ) { freeSlots . remove ( slotId ) ; if ( slot . getState ( ) == TaskManagerSlot . State . PENDING ) { rejectPendingSlotRequest ( slot . getAssignedSlotRequest ( ) , new Exception ( "The assigned slot " + slot . getSlotId ( ) + " was removed." ) ) ; } AllocationID oldAllocationId = slot . getAllocationId ( ) ; if ( oldAllocationId != null ) { fulfilledSlotRequests . remove ( oldAllocationId ) ; resourceActions . notifyAllocationFailure ( slot . getJobId ( ) , oldAllocationId , new FlinkException ( "The assigned slot " + slot . getSlotId ( ) + " was removed." ) ) ; } } else { LOG . debug ( "There was no slot registered with slot id {}." , slotId ) ; } }
Removes the given slot from the slot manager .
15,258
private void removeSlotRequestFromSlot ( SlotID slotId , AllocationID allocationId ) { TaskManagerSlot taskManagerSlot = slots . get ( slotId ) ; if ( null != taskManagerSlot ) { if ( taskManagerSlot . getState ( ) == TaskManagerSlot . State . PENDING && Objects . equals ( allocationId , taskManagerSlot . getAssignedSlotRequest ( ) . getAllocationId ( ) ) ) { TaskManagerRegistration taskManagerRegistration = taskManagerRegistrations . get ( taskManagerSlot . getInstanceId ( ) ) ; if ( taskManagerRegistration == null ) { throw new IllegalStateException ( "Trying to remove slot request from slot for which there is no TaskManager " + taskManagerSlot . getInstanceId ( ) + " is registered." ) ; } taskManagerSlot . clearPendingSlotRequest ( ) ; updateSlotState ( taskManagerSlot , taskManagerRegistration , null , null ) ; } else { LOG . debug ( "Ignore slot request removal for slot {}." , slotId ) ; } } else { LOG . debug ( "There was no slot with {} registered. Probably this slot has been already freed." , slotId ) ; } }
Removes a pending slot request identified by the given allocation id from a slot identified by the given slot id .
15,259
private void handleFailedSlotRequest ( SlotID slotId , AllocationID allocationId , Throwable cause ) { PendingSlotRequest pendingSlotRequest = pendingSlotRequests . get ( allocationId ) ; LOG . debug ( "Slot request with allocation id {} failed for slot {}." , allocationId , slotId , cause ) ; if ( null != pendingSlotRequest ) { pendingSlotRequest . setRequestFuture ( null ) ; try { internalRequestSlot ( pendingSlotRequest ) ; } catch ( ResourceManagerException e ) { pendingSlotRequests . remove ( allocationId ) ; resourceActions . notifyAllocationFailure ( pendingSlotRequest . getJobId ( ) , allocationId , e ) ; } } else { LOG . debug ( "There was not pending slot request with allocation id {}. Probably the request has been fulfilled or cancelled." , allocationId ) ; } }
Handles a failed slot request . The slot manager tries to find a new slot fulfilling the resource requirements for the failed slot request .
15,260
private void cancelPendingSlotRequest ( PendingSlotRequest pendingSlotRequest ) { CompletableFuture < Acknowledge > request = pendingSlotRequest . getRequestFuture ( ) ; returnPendingTaskManagerSlotIfAssigned ( pendingSlotRequest ) ; if ( null != request ) { request . cancel ( false ) ; } }
Cancels the given slot request .
15,261
protected FlinkKafkaConsumerBase < Row > getKafkaConsumer ( String topic , Properties properties , DeserializationSchema < Row > deserializationSchema ) { FlinkKafkaConsumerBase < Row > kafkaConsumer = createKafkaConsumer ( topic , properties , deserializationSchema ) ; switch ( startupMode ) { case EARLIEST : kafkaConsumer . setStartFromEarliest ( ) ; break ; case LATEST : kafkaConsumer . setStartFromLatest ( ) ; break ; case GROUP_OFFSETS : kafkaConsumer . setStartFromGroupOffsets ( ) ; break ; case SPECIFIC_OFFSETS : kafkaConsumer . setStartFromSpecificOffsets ( specificStartupOffsets ) ; break ; } return kafkaConsumer ; }
Returns a version - specific Kafka consumer with the start position configured .
15,262
public boolean triggerCheckpoint ( CheckpointMetaData checkpointMetaData , CheckpointOptions checkpointOptions , boolean advanceToEndOfEventTime ) throws Exception { throw new UnsupportedOperationException ( String . format ( "triggerCheckpoint not supported by %s" , this . getClass ( ) . getName ( ) ) ) ; }
This method is called to trigger a checkpoint asynchronously by the checkpoint coordinator .
15,263
public void triggerCheckpointOnBarrier ( CheckpointMetaData checkpointMetaData , CheckpointOptions checkpointOptions , CheckpointMetrics checkpointMetrics ) throws Exception { throw new UnsupportedOperationException ( String . format ( "triggerCheckpointOnBarrier not supported by %s" , this . getClass ( ) . getName ( ) ) ) ; }
This method is called when a checkpoint is triggered as a result of receiving checkpoint barriers on all input streams .
15,264
public Map < String , Object > getAllAccumulatorResults ( ) { return accumulatorResults . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( Map . Entry :: getKey , entry -> entry . getValue ( ) . getUnchecked ( ) ) ) ; }
Gets all accumulators produced by the job . The map contains the accumulators as mappings from the accumulator name to the accumulator value .
15,265
public Integer getIntCounterResult ( String accumulatorName ) { Object result = this . accumulatorResults . get ( accumulatorName ) . getUnchecked ( ) ; if ( result == null ) { return null ; } if ( ! ( result instanceof Integer ) ) { throw new ClassCastException ( "Requested result of the accumulator '" + accumulatorName + "' should be Integer but has type " + result . getClass ( ) ) ; } return ( Integer ) result ; }
Gets the accumulator with the given name as an integer .
15,266
private static ByteBuf allocateBuffer ( ByteBufAllocator allocator , byte id , int messageHeaderLength , int contentLength , boolean allocateForContent ) { checkArgument ( contentLength <= Integer . MAX_VALUE - FRAME_HEADER_LENGTH ) ; final ByteBuf buffer ; if ( ! allocateForContent ) { buffer = allocator . directBuffer ( FRAME_HEADER_LENGTH + messageHeaderLength ) ; } else if ( contentLength != - 1 ) { buffer = allocator . directBuffer ( FRAME_HEADER_LENGTH + messageHeaderLength + contentLength ) ; } else { buffer = allocator . directBuffer ( ) ; } buffer . writeInt ( FRAME_HEADER_LENGTH + messageHeaderLength + contentLength ) ; buffer . writeInt ( MAGIC_NUMBER ) ; buffer . writeByte ( id ) ; return buffer ; }
Allocates a new buffer and adds some header information for the frame decoder .
15,267
protected TwoPhaseCommitSinkFunction < IN , TXN , CONTEXT > setTransactionTimeout ( long transactionTimeout ) { checkArgument ( transactionTimeout >= 0 , "transactionTimeout must not be negative" ) ; this . transactionTimeout = transactionTimeout ; return this ; }
Sets the transaction timeout . Setting only the transaction timeout has no effect in itself .
15,268
public int hash ( ) { hash ^= 4 * count ; hash ^= hash >>> 16 ; hash *= 0x85ebca6b ; hash ^= hash >>> 13 ; hash *= 0xc2b2ae35 ; hash ^= hash >>> 16 ; return hash ; }
Finalize and return the MurmurHash output .
15,269
static Set < String > extractPortKeys ( Configuration config ) { final LinkedHashSet < String > tmPortKeys = new LinkedHashSet < > ( TM_PORT_KEYS ) ; final String portKeys = config . getString ( PORT_ASSIGNMENTS ) ; if ( portKeys != null ) { Arrays . stream ( portKeys . split ( "," ) ) . map ( String :: trim ) . peek ( key -> LOG . debug ( "Adding port key {} to mesos request" ) ) . forEach ( tmPortKeys :: add ) ; } return Collections . unmodifiableSet ( tmPortKeys ) ; }
Get the port keys representing the TM s configured endpoints . This includes mandatory TM endpoints such as data and rpc as well as optionally configured endpoints for services such as prometheus reporter
15,270
static void configureArtifactServer ( MesosArtifactServer server , ContainerSpecification container ) throws IOException { for ( ContainerSpecification . Artifact artifact : container . getArtifacts ( ) ) { server . addPath ( artifact . source , artifact . dest ) ; } }
Configures an artifact server to serve the artifacts associated with a container specification .
15,271
public CirculantGraph addRange ( long offset , long length ) { Preconditions . checkArgument ( offset >= MINIMUM_OFFSET , "Range offset must be at least " + MINIMUM_OFFSET ) ; Preconditions . checkArgument ( length <= vertexCount - offset , "Range length must not be greater than the vertex count minus the range offset." ) ; offsetRanges . add ( new OffsetRange ( offset , length ) ) ; return this ; }
Required configuration for each range of offsets in the graph .
15,272
public static CuratorFramework useNamespaceAndEnsurePath ( final CuratorFramework client , final String path ) throws Exception { Preconditions . checkNotNull ( client , "client must not be null" ) ; Preconditions . checkNotNull ( path , "path must not be null" ) ; client . newNamespaceAwareEnsurePath ( path ) . ensure ( client . getZookeeperClient ( ) ) ; return client . usingNamespace ( generateZookeeperPath ( client . getNamespace ( ) , path ) ) ; }
Returns a facade of the client that uses the specified namespace and ensures that all nodes in the path exist .
15,273
public static FullTypeInfo getFullTemplateType ( Type type , int templatePosition ) { if ( type instanceof ParameterizedType ) { return getFullTemplateType ( ( ( ParameterizedType ) type ) . getActualTypeArguments ( ) [ templatePosition ] ) ; } else { throw new IllegalArgumentException ( ) ; } }
Extract the full template type information from the given type s template parameter at the given position .
15,274
public static FullTypeInfo getFullTemplateType ( Type type ) { if ( type instanceof ParameterizedType ) { ParameterizedType parameterizedType = ( ParameterizedType ) type ; FullTypeInfo [ ] templateTypeInfos = new FullTypeInfo [ parameterizedType . getActualTypeArguments ( ) . length ] ; for ( int i = 0 ; i < parameterizedType . getActualTypeArguments ( ) . length ; i ++ ) { templateTypeInfos [ i ] = getFullTemplateType ( parameterizedType . getActualTypeArguments ( ) [ i ] ) ; } return new FullTypeInfo ( ( Class < ? > ) parameterizedType . getRawType ( ) , templateTypeInfos ) ; } else { return new FullTypeInfo ( ( Class < ? > ) type , null ) ; } }
Extract the full type information from the given type .
15,275
static String sqlToRegexLike ( String sqlPattern , char escapeChar ) { int i ; final int len = sqlPattern . length ( ) ; final StringBuilder javaPattern = new StringBuilder ( len + len ) ; for ( i = 0 ; i < len ; i ++ ) { char c = sqlPattern . charAt ( i ) ; if ( JAVA_REGEX_SPECIALS . indexOf ( c ) >= 0 ) { javaPattern . append ( '\\' ) ; } if ( c == escapeChar ) { if ( i == ( sqlPattern . length ( ) - 1 ) ) { throw invalidEscapeSequence ( sqlPattern , i ) ; } char nextChar = sqlPattern . charAt ( i + 1 ) ; if ( ( nextChar == '_' ) || ( nextChar == '%' ) || ( nextChar == escapeChar ) ) { javaPattern . append ( nextChar ) ; i ++ ; } else { throw invalidEscapeSequence ( sqlPattern , i ) ; } } else if ( c == '_' ) { javaPattern . append ( '.' ) ; } else if ( c == '%' ) { javaPattern . append ( "(?s:.*)" ) ; } else { javaPattern . append ( c ) ; } } return javaPattern . toString ( ) ; }
Translates a SQL LIKE pattern to Java regex pattern .
15,276
static String sqlToRegexSimilar ( String sqlPattern , CharSequence escapeStr ) { final char escapeChar ; if ( escapeStr != null ) { if ( escapeStr . length ( ) != 1 ) { throw invalidEscapeCharacter ( escapeStr . toString ( ) ) ; } escapeChar = escapeStr . charAt ( 0 ) ; } else { escapeChar = 0 ; } return sqlToRegexSimilar ( sqlPattern , escapeChar ) ; }
Translates a SQL SIMILAR pattern to Java regex pattern with optional escape string .
15,277
static String sqlToRegexSimilar ( String sqlPattern , char escapeChar ) { similarEscapeRuleChecking ( sqlPattern , escapeChar ) ; boolean insideCharacterEnumeration = false ; final StringBuilder javaPattern = new StringBuilder ( sqlPattern . length ( ) * 2 ) ; final int len = sqlPattern . length ( ) ; for ( int i = 0 ; i < len ; i ++ ) { char c = sqlPattern . charAt ( i ) ; if ( c == escapeChar ) { if ( i == ( len - 1 ) ) { throw invalidEscapeSequence ( sqlPattern , i ) ; } char nextChar = sqlPattern . charAt ( i + 1 ) ; if ( SQL_SIMILAR_SPECIALS . indexOf ( nextChar ) >= 0 ) { if ( JAVA_REGEX_SPECIALS . indexOf ( nextChar ) >= 0 ) { javaPattern . append ( '\\' ) ; } javaPattern . append ( nextChar ) ; } else if ( nextChar == escapeChar ) { javaPattern . append ( nextChar ) ; } else { throw invalidEscapeSequence ( sqlPattern , i ) ; } i ++ ; } else { switch ( c ) { case '_' : javaPattern . append ( '.' ) ; break ; case '%' : javaPattern . append ( "(?s:.*)" ) ; break ; case '[' : javaPattern . append ( '[' ) ; insideCharacterEnumeration = true ; i = sqlSimilarRewriteCharEnumeration ( sqlPattern , javaPattern , i , escapeChar ) ; break ; case ']' : if ( ! insideCharacterEnumeration ) { throw invalidRegularExpression ( sqlPattern , i ) ; } insideCharacterEnumeration = false ; javaPattern . append ( ']' ) ; break ; case '\\' : javaPattern . append ( "\\\\" ) ; break ; case '$' : javaPattern . append ( "\\$" ) ; break ; default : javaPattern . append ( c ) ; } } } if ( insideCharacterEnumeration ) { throw invalidRegularExpression ( sqlPattern , len ) ; } return javaPattern . toString ( ) ; }
Translates SQL SIMILAR pattern to Java regex pattern .
15,278
private static String toHtmlTable ( final List < OptionWithMetaInfo > options ) { StringBuilder htmlTable = new StringBuilder ( ) ; htmlTable . append ( "<table class=\"table table-bordered\">\n" ) ; htmlTable . append ( " <thead>\n" ) ; htmlTable . append ( " <tr>\n" ) ; htmlTable . append ( " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n" ) ; htmlTable . append ( " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n" ) ; htmlTable . append ( " <th class=\"text-left\" style=\"width: 65%\">Description</th>\n" ) ; htmlTable . append ( " </tr>\n" ) ; htmlTable . append ( " </thead>\n" ) ; htmlTable . append ( " <tbody>\n" ) ; for ( OptionWithMetaInfo option : options ) { htmlTable . append ( toHtmlString ( option ) ) ; } htmlTable . append ( " </tbody>\n" ) ; htmlTable . append ( "</table>\n" ) ; return htmlTable . toString ( ) ; }
Transforms this configuration group into HTML formatted table . Options are sorted alphabetically by key .
15,279
private static String toHtmlString ( final OptionWithMetaInfo optionWithMetaInfo ) { ConfigOption < ? > option = optionWithMetaInfo . option ; String defaultValue = stringifyDefault ( optionWithMetaInfo ) ; return "" + " <tr>\n" + " <td><h5>" + escapeCharacters ( option . key ( ) ) + "</h5></td>\n" + " <td style=\"word-wrap: break-word;\">" + escapeCharacters ( addWordBreakOpportunities ( defaultValue ) ) + "</td>\n" + " <td>" + formatter . format ( option . description ( ) ) + "</td>\n" + " </tr>\n" ; }
Transforms option to table row .
15,280
public void dispose ( ) { if ( this . disposed ) { return ; } super . dispose ( ) ; rocksDBResourceGuard . close ( ) ; if ( db != null ) { IOUtils . closeQuietly ( writeBatchWrapper ) ; if ( nativeMetricMonitor != null ) { nativeMetricMonitor . close ( ) ; } List < ColumnFamilyOptions > columnFamilyOptions = new ArrayList < > ( kvStateInformation . values ( ) . size ( ) ) ; RocksDBOperationUtils . addColumnFamilyOptionsToCloseLater ( columnFamilyOptions , defaultColumnFamily ) ; IOUtils . closeQuietly ( defaultColumnFamily ) ; for ( RocksDbKvStateInfo kvStateInfo : kvStateInformation . values ( ) ) { RocksDBOperationUtils . addColumnFamilyOptionsToCloseLater ( columnFamilyOptions , kvStateInfo . columnFamilyHandle ) ; IOUtils . closeQuietly ( kvStateInfo . columnFamilyHandle ) ; } IOUtils . closeQuietly ( db ) ; columnFamilyOptions . forEach ( IOUtils :: closeQuietly ) ; IOUtils . closeQuietly ( dbOptions ) ; IOUtils . closeQuietly ( writeOptions ) ; ttlCompactFiltersManager . disposeAndClearRegisteredCompactionFactories ( ) ; kvStateInformation . clear ( ) ; cleanInstanceBasePath ( ) ; } this . disposed = true ; }
Should only be called by one thread and only after all accesses to the DB happened .
15,281
public boolean startsWith ( CharSequence prefix , int startIndex ) { final char [ ] thisChars = this . value ; final int pLen = this . len ; final int sLen = prefix . length ( ) ; if ( ( startIndex < 0 ) || ( startIndex > pLen - sLen ) ) { return false ; } int sPos = 0 ; while ( sPos < sLen ) { if ( thisChars [ startIndex ++ ] != prefix . charAt ( sPos ++ ) ) { return false ; } } return true ; }
Checks whether the substring starting at the specified index starts with the given prefix string .
15,282
private void grow ( int size ) { if ( this . value . length < size ) { char [ ] value = new char [ Math . max ( this . value . length * 3 / 2 , size ) ] ; System . arraycopy ( this . value , 0 , value , 0 , this . len ) ; this . value = value ; } }
Grow and retain content .
15,283
public Optional < TableStats > getTableStats ( ) { DescriptorProperties normalizedProps = new DescriptorProperties ( ) ; normalizedProps . putProperties ( normalizedProps ) ; Optional < Long > rowCount = normalizedProps . getOptionalLong ( STATISTICS_ROW_COUNT ) ; if ( rowCount . isPresent ( ) ) { Map < String , ColumnStats > columnStats = readColumnStats ( normalizedProps , STATISTICS_COLUMNS ) ; return Optional . of ( new TableStats ( rowCount . get ( ) , columnStats ) ) ; } else { return Optional . empty ( ) ; } }
Reads table statistics from the descriptors properties .
15,284
public int close ( ) throws IOException { if ( ! writer . isClosed ( ) ) { int currentPositionInSegment = getCurrentPositionInSegment ( ) ; writer . writeBlock ( getCurrentSegment ( ) ) ; clear ( ) ; writer . getReturnQueue ( ) . clear ( ) ; this . writer . close ( ) ; return currentPositionInSegment ; } return - 1 ; }
Closes this OutputView closing the underlying writer . And return number bytes in last memory segment .
15,285
public void addBroadcastSet ( String name , DataSet < ? > data ) { this . bcVars . add ( new Tuple2 < > ( name , data ) ) ; }
Adds a data set as a broadcast set to the compute function .
15,286
public OptimizedPlan compile ( Plan program ) throws CompilerException { final OptimizerPostPass postPasser = getPostPassFromPlan ( program ) ; return compile ( program , postPasser ) ; }
Translates the given program to an OptimizedPlan where all nodes have their local strategy assigned and all channels have a shipping strategy assigned .
15,287
public void registerBufferPool ( BufferPool bufferPool ) { checkArgument ( bufferPool . getNumberOfRequiredMemorySegments ( ) >= getNumberOfSubpartitions ( ) , "Bug in result partition setup logic: Buffer pool has not enough guaranteed buffers for this result partition." ) ; checkState ( this . bufferPool == null , "Bug in result partition setup logic: Already registered buffer pool." ) ; this . bufferPool = checkNotNull ( bufferPool ) ; }
Registers a buffer pool with this result partition .
15,288
public void finish ( ) throws IOException { boolean success = false ; try { checkInProduceState ( ) ; for ( ResultSubpartition subpartition : subpartitions ) { subpartition . finish ( ) ; } success = true ; } finally { if ( success ) { isFinished = true ; notifyPipelinedConsumers ( ) ; } } }
Finishes the result partition .
15,289
public void release ( Throwable cause ) { if ( isReleased . compareAndSet ( false , true ) ) { LOG . debug ( "{}: Releasing {}." , owningTaskName , this ) ; if ( cause != null ) { this . cause = cause ; } for ( ResultSubpartition subpartition : subpartitions ) { try { subpartition . release ( ) ; } catch ( Throwable t ) { LOG . error ( "Error during release of result subpartition: " + t . getMessage ( ) , t ) ; } } } }
Releases the result partition .
15,290
public ResultSubpartitionView createSubpartitionView ( int index , BufferAvailabilityListener availabilityListener ) throws IOException { int refCnt = pendingReferences . get ( ) ; checkState ( refCnt != - 1 , "Partition released." ) ; checkState ( refCnt > 0 , "Partition not pinned." ) ; checkElementIndex ( index , subpartitions . length , "Subpartition not found." ) ; ResultSubpartitionView readView = subpartitions [ index ] . createReadView ( availabilityListener ) ; LOG . debug ( "Created {}" , readView ) ; return readView ; }
Returns the requested subpartition .
15,291
public void releaseMemory ( int toRelease ) throws IOException { checkArgument ( toRelease > 0 ) ; for ( ResultSubpartition subpartition : subpartitions ) { toRelease -= subpartition . releaseMemory ( ) ; if ( toRelease <= 0 ) { break ; } } }
Releases buffers held by this result partition .
15,292
void pin ( ) { while ( true ) { int refCnt = pendingReferences . get ( ) ; if ( refCnt >= 0 ) { if ( pendingReferences . compareAndSet ( refCnt , refCnt + subpartitions . length ) ) { break ; } } else { throw new IllegalStateException ( "Released." ) ; } } }
Pins the result partition .
15,293
void onConsumedSubpartition ( int subpartitionIndex ) { if ( isReleased . get ( ) ) { return ; } int refCnt = pendingReferences . decrementAndGet ( ) ; if ( refCnt == 0 ) { partitionManager . onConsumedPartition ( this ) ; } else if ( refCnt < 0 ) { throw new IllegalStateException ( "All references released." ) ; } LOG . debug ( "{}: Received release notification for subpartition {} (reference count now at: {})." , this , subpartitionIndex , pendingReferences ) ; }
Notification when a subpartition is released .
15,294
private void notifyPipelinedConsumers ( ) { if ( sendScheduleOrUpdateConsumersMessage && ! hasNotifiedPipelinedConsumers && partitionType . isPipelined ( ) ) { partitionConsumableNotifier . notifyPartitionConsumable ( jobId , partitionId , taskActions ) ; hasNotifiedPipelinedConsumers = true ; } }
Notifies pipelined consumers of this result partition once .
15,295
public RMatGraph < T > setConstants ( float a , float b , float c ) { Preconditions . checkArgument ( a >= 0.0f && b >= 0.0f && c >= 0.0f && a + b + c <= 1.0f , "RMat parameters A, B, and C must be non-negative and sum to less than or equal to one" ) ; this . a = a ; this . b = b ; this . c = c ; return this ; }
The parameters for recursively subdividing the adjacency matrix .
15,296
public RMatGraph < T > setNoise ( boolean noiseEnabled , float noise ) { Preconditions . checkArgument ( noise >= 0.0f && noise <= 2.0f , "RMat parameter noise must be non-negative and less than or equal to 2.0" ) ; this . noiseEnabled = noiseEnabled ; this . noise = noise ; return this ; }
Enable and configure noise . Each edge is generated independently but when noise is enabled the parameters A B and C are randomly increased or decreased then normalized by a fraction of the noise factor during the computation of each bit .
15,297
@ SuppressWarnings ( { "unchecked" , "rawtypes" } ) public DataStream < T > closeWith ( DataStream < T > feedbackStream ) { Collection < StreamTransformation < ? > > predecessors = feedbackStream . getTransformation ( ) . getTransitivePredecessors ( ) ; if ( ! predecessors . contains ( this . transformation ) ) { throw new UnsupportedOperationException ( "Cannot close an iteration with a feedback DataStream that does not originate from said iteration." ) ; } ( ( FeedbackTransformation ) getTransformation ( ) ) . addFeedbackEdge ( feedbackStream . getTransformation ( ) ) ; return feedbackStream ; }
Closes the iteration . This method defines the end of the iterative program part that will be fed back to the start of the iteration .
15,298
public void open ( ) { isRunning = true ; terminal . writer ( ) . append ( CliStrings . MESSAGE_WELCOME ) ; while ( isRunning ) { terminal . writer ( ) . append ( "\n" ) ; terminal . flush ( ) ; final String line ; try { line = lineReader . readLine ( prompt , null , ( MaskingCallback ) null , null ) ; } catch ( UserInterruptException e ) { continue ; } catch ( EndOfFileException | IOError e ) { break ; } catch ( Throwable t ) { throw new SqlClientException ( "Could not read from command line." , t ) ; } if ( line == null ) { continue ; } final Optional < SqlCommandCall > cmdCall = parseCommand ( line ) ; cmdCall . ifPresent ( this :: callCommand ) ; } }
Opens the interactive CLI shell .
15,299
public static SlotProfile noLocality ( ResourceProfile resourceProfile ) { return new SlotProfile ( resourceProfile , Collections . emptyList ( ) , Collections . emptyList ( ) ) ; }
Returns a slot profile for the given resource profile without any locality requirements .