idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
15,400 | private static Configuration loadYAMLResource ( File file ) { final Configuration config = new Configuration ( ) ; try ( BufferedReader reader = new BufferedReader ( new InputStreamReader ( new FileInputStream ( file ) ) ) ) { String line ; int lineNo = 0 ; while ( ( line = reader . readLine ( ) ) != null ) { lineNo ++ ; String [ ] comments = line . split ( "#" , 2 ) ; String conf = comments [ 0 ] . trim ( ) ; if ( conf . length ( ) > 0 ) { String [ ] kv = conf . split ( ": " , 2 ) ; if ( kv . length == 1 ) { LOG . warn ( "Error while trying to split key and value in configuration file " + file + ":" + lineNo + ": \"" + line + "\"" ) ; continue ; } String key = kv [ 0 ] . trim ( ) ; String value = kv [ 1 ] . trim ( ) ; if ( key . length ( ) == 0 || value . length ( ) == 0 ) { LOG . warn ( "Error after splitting key and value in configuration file " + file + ":" + lineNo + ": \"" + line + "\"" ) ; continue ; } LOG . info ( "Loading configuration property: {}, {}" , key , isSensitive ( key ) ? HIDDEN_CONTENT : value ) ; config . setString ( key , value ) ; } } } catch ( IOException e ) { throw new RuntimeException ( "Error parsing YAML configuration." , e ) ; } return config ; } | Loads a YAML - file of key - value pairs . |
15,401 | public static boolean isSensitive ( String key ) { Preconditions . checkNotNull ( key , "key is null" ) ; final String keyInLower = key . toLowerCase ( ) ; for ( String hideKey : SENSITIVE_KEYS ) { if ( keyInLower . length ( ) >= hideKey . length ( ) && keyInLower . contains ( hideKey ) ) { return true ; } } return false ; } | Check whether the key is a hidden key . |
15,402 | public boolean close ( ) { lock . lock ( ) ; try { if ( open ) { if ( elements . isEmpty ( ) ) { open = false ; nonEmpty . signalAll ( ) ; return true ; } else { return false ; } } else { return true ; } } finally { lock . unlock ( ) ; } } | Tries to close the queue . Closing the queue only succeeds when no elements are in the queue when this method is called . Checking whether the queue is empty and marking the queue as closed is one atomic operation . |
15,403 | public boolean addIfOpen ( E element ) { requireNonNull ( element ) ; lock . lock ( ) ; try { if ( open ) { elements . addLast ( element ) ; if ( elements . size ( ) == 1 ) { nonEmpty . signalAll ( ) ; } } return open ; } finally { lock . unlock ( ) ; } } | Tries to add an element to the queue if the queue is still open . Checking whether the queue is open and adding the element is one atomic operation . |
15,404 | public void add ( E element ) throws IllegalStateException { requireNonNull ( element ) ; lock . lock ( ) ; try { if ( open ) { elements . addLast ( element ) ; if ( elements . size ( ) == 1 ) { nonEmpty . signalAll ( ) ; } } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Adds the element to the queue or fails with an exception if the queue is closed . Checking whether the queue is open and adding the element is one atomic operation . |
15,405 | public E peek ( ) { lock . lock ( ) ; try { if ( open ) { if ( elements . size ( ) > 0 ) { return elements . getFirst ( ) ; } else { return null ; } } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Returns the queue s next element without removing it if the queue is non - empty . Otherwise returns null . |
15,406 | public E poll ( ) { lock . lock ( ) ; try { if ( open ) { if ( elements . size ( ) > 0 ) { return elements . removeFirst ( ) ; } else { return null ; } } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Returns the queue s next element and removes it the queue is non - empty . Otherwise this method returns null . |
15,407 | public List < E > pollBatch ( ) { lock . lock ( ) ; try { if ( open ) { if ( elements . size ( ) > 0 ) { ArrayList < E > result = new ArrayList < > ( elements ) ; elements . clear ( ) ; return result ; } else { return null ; } } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Returns all of the queue s current elements in a list if the queue is non - empty . Otherwise this method returns null . |
15,408 | public E getElementBlocking ( ) throws InterruptedException { lock . lock ( ) ; try { while ( open && elements . isEmpty ( ) ) { nonEmpty . await ( ) ; } if ( open ) { return elements . removeFirst ( ) ; } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Returns the next element in the queue . If the queue is empty this method waits until at least one element is added . |
15,409 | public E getElementBlocking ( long timeoutMillis ) throws InterruptedException { if ( timeoutMillis == 0L ) { return getElementBlocking ( ) ; } else if ( timeoutMillis < 0L ) { throw new IllegalArgumentException ( "invalid timeout" ) ; } final long deadline = System . nanoTime ( ) + timeoutMillis * 1_000_000L ; lock . lock ( ) ; try { while ( open && elements . isEmpty ( ) && timeoutMillis > 0 ) { nonEmpty . await ( timeoutMillis , TimeUnit . MILLISECONDS ) ; timeoutMillis = ( deadline - System . nanoTime ( ) ) / 1_000_000L ; } if ( ! open ) { throw new IllegalStateException ( "queue is closed" ) ; } else if ( elements . isEmpty ( ) ) { return null ; } else { return elements . removeFirst ( ) ; } } finally { lock . unlock ( ) ; } } | Returns the next element in the queue . If the queue is empty this method waits at most a certain time until an element becomes available . If no element is available after that time the method returns null . |
15,410 | public List < E > getBatchBlocking ( ) throws InterruptedException { lock . lock ( ) ; try { while ( open && elements . isEmpty ( ) ) { nonEmpty . await ( ) ; } if ( open ) { ArrayList < E > result = new ArrayList < > ( elements ) ; elements . clear ( ) ; return result ; } else { throw new IllegalStateException ( "queue is closed" ) ; } } finally { lock . unlock ( ) ; } } | Gets all the elements found in the list or blocks until at least one element was added . If the queue is empty when this method is called it blocks until at least one element is added . |
15,411 | public void onCompleteHandler ( StreamElementQueueEntry < ? > streamElementQueueEntry ) throws InterruptedException { lock . lockInterruptibly ( ) ; try { if ( firstSet . remove ( streamElementQueueEntry ) ) { completedQueue . offer ( streamElementQueueEntry ) ; while ( firstSet . isEmpty ( ) && firstSet != lastSet ) { firstSet = uncompletedQueue . poll ( ) ; Iterator < StreamElementQueueEntry < ? > > it = firstSet . iterator ( ) ; while ( it . hasNext ( ) ) { StreamElementQueueEntry < ? > bufferEntry = it . next ( ) ; if ( bufferEntry . isDone ( ) ) { completedQueue . offer ( bufferEntry ) ; it . remove ( ) ; } } } LOG . debug ( "Signal unordered stream element queue has completed entries." ) ; hasCompletedEntries . signalAll ( ) ; } } finally { lock . unlock ( ) ; } } | Callback for onComplete events for the given stream element queue entry . Whenever a queue entry is completed it is checked whether this entry belongs to the first set . If this is the case then the element is added to the completed entries queue from where it can be consumed . If the first set becomes empty then the next set is polled from the uncompleted entries queue . Completed entries from this new set are then added to the completed entries queue . |
15,412 | private < T > void addEntry ( StreamElementQueueEntry < T > streamElementQueueEntry ) { assert ( lock . isHeldByCurrentThread ( ) ) ; if ( streamElementQueueEntry . isWatermark ( ) ) { lastSet = new HashSet < > ( capacity ) ; if ( firstSet . isEmpty ( ) ) { firstSet . add ( streamElementQueueEntry ) ; } else { Set < StreamElementQueueEntry < ? > > watermarkSet = new HashSet < > ( 1 ) ; watermarkSet . add ( streamElementQueueEntry ) ; uncompletedQueue . offer ( watermarkSet ) ; } uncompletedQueue . offer ( lastSet ) ; } else { lastSet . add ( streamElementQueueEntry ) ; } streamElementQueueEntry . onComplete ( ( StreamElementQueueEntry < T > value ) -> { try { onCompleteHandler ( value ) ; } catch ( InterruptedException e ) { LOG . debug ( "AsyncBufferEntry could not be properly completed because the " + "executor thread has been interrupted." , e ) ; } catch ( Throwable t ) { operatorActions . failOperator ( new Exception ( "Could not complete the " + "stream element queue entry: " + value + '.' , t ) ) ; } } , executor ) ; numberEntries ++ ; } | Add the given stream element queue entry to the current last set if it is not a watermark . If it is a watermark then stop adding to the current last set insert the watermark into its own set and add a new last set . |
15,413 | private StreamGraph generateInternal ( List < StreamTransformation < ? > > transformations ) { for ( StreamTransformation < ? > transformation : transformations ) { transform ( transformation ) ; } return streamGraph ; } | This starts the actual transformation beginning from the sinks . |
15,414 | private String determineSlotSharingGroup ( String specifiedGroup , Collection < Integer > inputIds ) { if ( specifiedGroup != null ) { return specifiedGroup ; } else { String inputGroup = null ; for ( int id : inputIds ) { String inputGroupCandidate = streamGraph . getSlotSharingGroup ( id ) ; if ( inputGroup == null ) { inputGroup = inputGroupCandidate ; } else if ( ! inputGroup . equals ( inputGroupCandidate ) ) { return "default" ; } } return inputGroup == null ? "default" : inputGroup ; } } | Determines the slot sharing group for an operation based on the slot sharing group set by the user and the slot sharing groups of the inputs . |
15,415 | public static void installAsShutdownHook ( Logger logger , long delayMillis ) { checkArgument ( delayMillis >= 0 , "delay must be >= 0" ) ; Thread shutdownHook = new JvmShutdownSafeguard ( delayMillis ) ; ShutdownHookUtil . addShutdownHookThread ( shutdownHook , JvmShutdownSafeguard . class . getSimpleName ( ) , logger ) ; } | Installs the safeguard shutdown hook . The maximum time that the JVM is allowed to spend on shutdown before being killed is the given number of milliseconds . |
15,416 | public void start ( JobLeaderIdActions initialJobLeaderIdActions ) throws Exception { if ( isStarted ( ) ) { clear ( ) ; } this . jobLeaderIdActions = Preconditions . checkNotNull ( initialJobLeaderIdActions ) ; } | Start the service with the given job leader actions . |
15,417 | public void clear ( ) throws Exception { Exception exception = null ; for ( JobLeaderIdListener listener : jobLeaderIdListeners . values ( ) ) { try { listener . stop ( ) ; } catch ( Exception e ) { exception = ExceptionUtils . firstOrSuppressed ( e , exception ) ; } } if ( exception != null ) { ExceptionUtils . rethrowException ( exception , "Could not properly stop the " + JobLeaderIdService . class . getSimpleName ( ) + '.' ) ; } jobLeaderIdListeners . clear ( ) ; } | Stop and clear the currently registered job leader id listeners . |
15,418 | public void addJob ( JobID jobId ) throws Exception { Preconditions . checkNotNull ( jobLeaderIdActions ) ; LOG . debug ( "Add job {} to job leader id monitoring." , jobId ) ; if ( ! jobLeaderIdListeners . containsKey ( jobId ) ) { LeaderRetrievalService leaderRetrievalService = highAvailabilityServices . getJobManagerLeaderRetriever ( jobId ) ; JobLeaderIdListener jobIdListener = new JobLeaderIdListener ( jobId , jobLeaderIdActions , leaderRetrievalService ) ; jobLeaderIdListeners . put ( jobId , jobIdListener ) ; } } | Add a job to be monitored to retrieve the job leader id . |
15,419 | public void removeJob ( JobID jobId ) throws Exception { LOG . debug ( "Remove job {} from job leader id monitoring." , jobId ) ; JobLeaderIdListener listener = jobLeaderIdListeners . remove ( jobId ) ; if ( listener != null ) { listener . stop ( ) ; } } | Remove the given job from being monitored by the service . |
15,420 | public static Configuration generateTaskManagerConfiguration ( Configuration baseConfig , String jobManagerHostname , int jobManagerPort , int numSlots , FiniteDuration registrationTimeout ) { Configuration cfg = cloneConfiguration ( baseConfig ) ; if ( jobManagerHostname != null && ! jobManagerHostname . isEmpty ( ) ) { cfg . setString ( JobManagerOptions . ADDRESS , jobManagerHostname ) ; } if ( jobManagerPort > 0 ) { cfg . setInteger ( JobManagerOptions . PORT , jobManagerPort ) ; } cfg . setString ( TaskManagerOptions . REGISTRATION_TIMEOUT , registrationTimeout . toString ( ) ) ; if ( numSlots != - 1 ) { cfg . setInteger ( TaskManagerOptions . NUM_TASK_SLOTS , numSlots ) ; } return cfg ; } | Generate a task manager configuration . |
15,421 | public static void writeConfiguration ( Configuration cfg , File file ) throws IOException { try ( FileWriter fwrt = new FileWriter ( file ) ; PrintWriter out = new PrintWriter ( fwrt ) ) { for ( String key : cfg . keySet ( ) ) { String value = cfg . getString ( key , null ) ; out . print ( key ) ; out . print ( ": " ) ; out . println ( value ) ; } } } | Writes a Flink YAML config file from a Flink Configuration object . |
15,422 | public static void substituteDeprecatedConfigKey ( Configuration config , String deprecated , String designated ) { if ( ! config . containsKey ( designated ) ) { final String valueForDeprecated = config . getString ( deprecated , null ) ; if ( valueForDeprecated != null ) { config . setString ( designated , valueForDeprecated ) ; } } } | Sets the value of a new config key to the value of a deprecated config key . |
15,423 | public static void substituteDeprecatedConfigPrefix ( Configuration config , String deprecatedPrefix , String designatedPrefix ) { final int prefixLen = deprecatedPrefix . length ( ) ; Configuration replacement = new Configuration ( ) ; for ( String key : config . keySet ( ) ) { if ( key . startsWith ( deprecatedPrefix ) ) { String newKey = designatedPrefix + key . substring ( prefixLen ) ; if ( ! config . containsKey ( newKey ) ) { replacement . setString ( newKey , config . getString ( key , null ) ) ; } } } config . addAll ( replacement ) ; } | Sets the value of a new config key to the value of a deprecated config key . Taking into account the changed prefix . |
15,424 | public static String getTaskManagerShellCommand ( Configuration flinkConfig , ContaineredTaskManagerParameters tmParams , String configDirectory , String logDirectory , boolean hasLogback , boolean hasLog4j , boolean hasKrb5 , Class < ? > mainClass ) { final Map < String , String > startCommandValues = new HashMap < > ( ) ; startCommandValues . put ( "java" , "$JAVA_HOME/bin/java" ) ; ArrayList < String > params = new ArrayList < > ( ) ; params . add ( String . format ( "-Xms%dm" , tmParams . taskManagerHeapSizeMB ( ) ) ) ; params . add ( String . format ( "-Xmx%dm" , tmParams . taskManagerHeapSizeMB ( ) ) ) ; if ( tmParams . taskManagerDirectMemoryLimitMB ( ) >= 0 ) { params . add ( String . format ( "-XX:MaxDirectMemorySize=%dm" , tmParams . taskManagerDirectMemoryLimitMB ( ) ) ) ; } startCommandValues . put ( "jvmmem" , StringUtils . join ( params , ' ' ) ) ; String javaOpts = flinkConfig . getString ( CoreOptions . FLINK_JVM_OPTIONS ) ; if ( flinkConfig . getString ( CoreOptions . FLINK_TM_JVM_OPTIONS ) . length ( ) > 0 ) { javaOpts += " " + flinkConfig . getString ( CoreOptions . FLINK_TM_JVM_OPTIONS ) ; } if ( hasKrb5 ) { javaOpts += " -Djava.security.krb5.conf=krb5.conf" ; } startCommandValues . put ( "jvmopts" , javaOpts ) ; String logging = "" ; if ( hasLogback || hasLog4j ) { logging = "-Dlog.file=" + logDirectory + "/taskmanager.log" ; if ( hasLogback ) { logging += " -Dlogback.configurationFile=file:" + configDirectory + "/logback.xml" ; } if ( hasLog4j ) { logging += " -Dlog4j.configuration=file:" + configDirectory + "/log4j.properties" ; } } startCommandValues . put ( "logging" , logging ) ; startCommandValues . put ( "class" , mainClass . getName ( ) ) ; startCommandValues . put ( "redirects" , "1> " + logDirectory + "/taskmanager.out " + "2> " + logDirectory + "/taskmanager.err" ) ; startCommandValues . put ( "args" , "--configDir " + configDirectory ) ; final String commandTemplate = flinkConfig . getString ( ConfigConstants . YARN_CONTAINER_START_COMMAND_TEMPLATE , ConfigConstants . DEFAULT_YARN_CONTAINER_START_COMMAND_TEMPLATE ) ; String startCommand = getStartCommand ( commandTemplate , startCommandValues ) ; LOG . debug ( "TaskManager start command: " + startCommand ) ; return startCommand ; } | Generates the shell command to start a task manager . |
15,425 | public static String getStartCommand ( String template , Map < String , String > startCommandValues ) { for ( Map . Entry < String , String > variable : startCommandValues . entrySet ( ) ) { template = template . replace ( "%" + variable . getKey ( ) + "%" , variable . getValue ( ) ) ; } return template ; } | Replaces placeholders in the template start command with values from startCommandValues . |
15,426 | public static Configuration cloneConfiguration ( Configuration configuration ) { final Configuration clonedConfiguration = new Configuration ( configuration ) ; if ( clonedConfiguration . getBoolean ( USE_LOCAL_DEFAULT_TMP_DIRS ) ) { clonedConfiguration . removeConfig ( CoreOptions . TMP_DIRS ) ; clonedConfiguration . removeConfig ( USE_LOCAL_DEFAULT_TMP_DIRS ) ; } return clonedConfiguration ; } | Clones the given configuration and resets instance specific config options . |
15,427 | public boolean readBufferFromFileChannel ( Buffer buffer ) throws IOException { checkArgument ( fileChannel . size ( ) - fileChannel . position ( ) > 0 ) ; header . clear ( ) ; fileChannel . read ( header ) ; header . flip ( ) ; final boolean isBuffer = header . getInt ( ) == 1 ; final int size = header . getInt ( ) ; if ( size > buffer . getMaxCapacity ( ) ) { throw new IllegalStateException ( "Buffer is too small for data: " + buffer . getMaxCapacity ( ) + " bytes available, but " + size + " needed. This is most likely due to an serialized event, which is larger than the buffer size." ) ; } checkArgument ( buffer . getSize ( ) == 0 , "Buffer not empty" ) ; fileChannel . read ( buffer . getNioBuffer ( 0 , size ) ) ; buffer . setSize ( size ) ; if ( ! isBuffer ) { buffer . tagAsEvent ( ) ; } return fileChannel . size ( ) - fileChannel . position ( ) == 0 ; } | Reads data from the object s file channel into the given buffer . |
15,428 | public void collect ( final KEY key , final VALUE val ) throws IOException { this . outTuple . f0 = key ; this . outTuple . f1 = val ; this . flinkCollector . collect ( outTuple ) ; } | Use the wrapped Flink collector to collect a key - value pair for Flink . |
15,429 | public boolean isMatchingTopic ( String topic ) { if ( isFixedTopics ( ) ) { return getFixedTopics ( ) . contains ( topic ) ; } else { return topicPattern . matcher ( topic ) . matches ( ) ; } } | Check if the input topic matches the topics described by this KafkaTopicDescriptor . |
15,430 | public static OffsetCommitMode fromConfiguration ( boolean enableAutoCommit , boolean enableCommitOnCheckpoint , boolean enableCheckpointing ) { if ( enableCheckpointing ) { return ( enableCommitOnCheckpoint ) ? OffsetCommitMode . ON_CHECKPOINTS : OffsetCommitMode . DISABLED ; } else { return ( enableAutoCommit ) ? OffsetCommitMode . KAFKA_PERIODIC : OffsetCommitMode . DISABLED ; } } | Determine the offset commit mode using several configuration values . |
15,431 | void assignExclusiveSegments ( List < MemorySegment > segments ) { checkState ( this . initialCredit == 0 , "Bug in input channel setup logic: exclusive buffers have " + "already been set for this input channel." ) ; checkNotNull ( segments ) ; checkArgument ( segments . size ( ) > 0 , "The number of exclusive buffers per channel should be larger than 0." ) ; this . initialCredit = segments . size ( ) ; this . numRequiredBuffers = segments . size ( ) ; synchronized ( bufferQueue ) { for ( MemorySegment segment : segments ) { bufferQueue . addExclusiveBuffer ( new NetworkBuffer ( segment , this ) , numRequiredBuffers ) ; } } } | Assigns exclusive buffers to this input channel and this method should be called only once after this input channel is created . |
15,432 | public void requestSubpartition ( int subpartitionIndex ) throws IOException , InterruptedException { if ( partitionRequestClient == null ) { partitionRequestClient = connectionManager . createPartitionRequestClient ( connectionId ) ; partitionRequestClient . requestSubpartition ( partitionId , subpartitionIndex , this , 0 ) ; } } | Requests a remote subpartition . |
15,433 | void retriggerSubpartitionRequest ( int subpartitionIndex ) throws IOException , InterruptedException { checkState ( partitionRequestClient != null , "Missing initial subpartition request." ) ; if ( increaseBackoff ( ) ) { partitionRequestClient . requestSubpartition ( partitionId , subpartitionIndex , this , getCurrentBackoff ( ) ) ; } else { failPartitionRequest ( ) ; } } | Retriggers a remote subpartition request . |
15,434 | public void recycle ( MemorySegment segment ) { int numAddedBuffers ; synchronized ( bufferQueue ) { if ( isReleased . get ( ) ) { try { inputGate . returnExclusiveSegments ( Collections . singletonList ( segment ) ) ; return ; } catch ( Throwable t ) { ExceptionUtils . rethrow ( t ) ; } } numAddedBuffers = bufferQueue . addExclusiveBuffer ( new NetworkBuffer ( segment , this ) , numRequiredBuffers ) ; } if ( numAddedBuffers > 0 && unannouncedCredit . getAndAdd ( numAddedBuffers ) == 0 ) { notifyCreditAvailable ( ) ; } } | Exclusive buffer is recycled to this input channel directly and it may trigger return extra floating buffer and notify increased credit to the producer . |
15,435 | void onSenderBacklog ( int backlog ) throws IOException { int numRequestedBuffers = 0 ; synchronized ( bufferQueue ) { if ( isReleased . get ( ) ) { return ; } numRequiredBuffers = backlog + initialCredit ; while ( bufferQueue . getAvailableBufferSize ( ) < numRequiredBuffers && ! isWaitingForFloatingBuffers ) { Buffer buffer = inputGate . getBufferPool ( ) . requestBuffer ( ) ; if ( buffer != null ) { bufferQueue . addFloatingBuffer ( buffer ) ; numRequestedBuffers ++ ; } else if ( inputGate . getBufferProvider ( ) . addBufferListener ( this ) ) { isWaitingForFloatingBuffers = true ; break ; } } } if ( numRequestedBuffers > 0 && unannouncedCredit . getAndAdd ( numRequestedBuffers ) == 0 ) { notifyCreditAvailable ( ) ; } } | Receives the backlog from the producer s buffer response . If the number of available buffers is less than backlog + initialCredit it will request floating buffers from the buffer pool and then notify unannounced credits to the producer . |
15,436 | public void registerOngoingOperation ( final K operationKey , final CompletableFuture < R > operationResultFuture ) { final ResultAccessTracker < R > inProgress = ResultAccessTracker . inProgress ( ) ; registeredOperationTriggers . put ( operationKey , inProgress ) ; operationResultFuture . whenComplete ( ( result , error ) -> { if ( error == null ) { completedOperations . put ( operationKey , inProgress . finishOperation ( Either . Right ( result ) ) ) ; } else { completedOperations . put ( operationKey , inProgress . finishOperation ( Either . Left ( error ) ) ) ; } registeredOperationTriggers . remove ( operationKey ) ; } ) ; } | Registers an ongoing operation with the cache . |
15,437 | public InetSocketAddress getServerAddress ( ) { synchronized ( lock ) { Preconditions . checkState ( state != State . CREATED , "The RestServerEndpoint has not been started yet." ) ; Channel server = this . serverChannel ; if ( server != null ) { try { return ( ( InetSocketAddress ) server . localAddress ( ) ) ; } catch ( Exception e ) { log . error ( "Cannot access local server address" , e ) ; } } return null ; } } | Returns the address on which this endpoint is accepting requests . |
15,438 | protected CompletableFuture < Void > shutDownInternal ( ) { synchronized ( lock ) { CompletableFuture < ? > channelFuture = new CompletableFuture < > ( ) ; if ( serverChannel != null ) { serverChannel . close ( ) . addListener ( finished -> { if ( finished . isSuccess ( ) ) { channelFuture . complete ( null ) ; } else { channelFuture . completeExceptionally ( finished . cause ( ) ) ; } } ) ; serverChannel = null ; } final CompletableFuture < Void > channelTerminationFuture = new CompletableFuture < > ( ) ; channelFuture . thenRun ( ( ) -> { CompletableFuture < ? > groupFuture = new CompletableFuture < > ( ) ; CompletableFuture < ? > childGroupFuture = new CompletableFuture < > ( ) ; final Time gracePeriod = Time . seconds ( 10L ) ; if ( bootstrap != null ) { final ServerBootstrapConfig config = bootstrap . config ( ) ; final EventLoopGroup group = config . group ( ) ; if ( group != null ) { group . shutdownGracefully ( 0L , gracePeriod . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) . addListener ( finished -> { if ( finished . isSuccess ( ) ) { groupFuture . complete ( null ) ; } else { groupFuture . completeExceptionally ( finished . cause ( ) ) ; } } ) ; } else { groupFuture . complete ( null ) ; } final EventLoopGroup childGroup = config . childGroup ( ) ; if ( childGroup != null ) { childGroup . shutdownGracefully ( 0L , gracePeriod . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) . addListener ( finished -> { if ( finished . isSuccess ( ) ) { childGroupFuture . complete ( null ) ; } else { childGroupFuture . completeExceptionally ( finished . cause ( ) ) ; } } ) ; } else { childGroupFuture . complete ( null ) ; } bootstrap = null ; } else { groupFuture . complete ( null ) ; childGroupFuture . complete ( null ) ; } CompletableFuture < Void > combinedFuture = FutureUtils . completeAll ( Arrays . asList ( groupFuture , childGroupFuture ) ) ; combinedFuture . whenComplete ( ( Void ignored , Throwable throwable ) -> { if ( throwable != null ) { channelTerminationFuture . completeExceptionally ( throwable ) ; } else { channelTerminationFuture . complete ( null ) ; } } ) ; } ) ; return channelTerminationFuture ; } } | Stops this REST server endpoint . |
15,439 | static void createUploadDir ( final Path uploadDir , final Logger log , final boolean initialCreation ) throws IOException { if ( ! Files . exists ( uploadDir ) ) { if ( initialCreation ) { log . info ( "Upload directory {} does not exist. " + uploadDir ) ; } else { log . warn ( "Upload directory {} has been deleted externally. " + "Previously uploaded files are no longer available." , uploadDir ) ; } checkAndCreateUploadDir ( uploadDir , log ) ; } } | Creates the upload dir if needed . |
15,440 | public MatchIterator get ( long key , int hashCode ) { int bucket = hashCode & numBucketsMask ; int bucketOffset = bucket << 4 ; MemorySegment segment = buckets [ bucketOffset >>> segmentSizeBits ] ; int segOffset = bucketOffset & segmentSizeMask ; while ( true ) { long address = segment . getLong ( segOffset + 8 ) ; if ( address != INVALID_ADDRESS ) { if ( segment . getLong ( segOffset ) == key ) { return valueIter ( address ) ; } else { bucket = ( bucket + 1 ) & numBucketsMask ; if ( segOffset + 16 < segmentSize ) { segOffset += 16 ; } else { bucketOffset = bucket << 4 ; segOffset = bucketOffset & segmentSizeMask ; segment = buckets [ bucketOffset >>> segmentSizeBits ] ; } } } else { return valueIter ( INVALID_ADDRESS ) ; } } } | Returns an iterator for all the values for the given key or null if no value found . |
15,441 | private void updateIndex ( long key , int hashCode , long address , int size , MemorySegment dataSegment , int currentPositionInSegment ) throws IOException { assert ( numKeys <= numBuckets / 2 ) ; int bucketId = hashCode & numBucketsMask ; int bucketOffset = bucketId * SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES ; MemorySegment segment = buckets [ bucketOffset >>> segmentSizeBits ] ; int segOffset = bucketOffset & segmentSizeMask ; long currAddress ; while ( true ) { currAddress = segment . getLong ( segOffset + 8 ) ; if ( segment . getLong ( segOffset ) != key && currAddress != INVALID_ADDRESS ) { bucketId = ( bucketId + 1 ) & numBucketsMask ; if ( segOffset + SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES < segmentSize ) { segOffset += SPARSE_BUCKET_ELEMENT_SIZE_IN_BYTES ; } else { bucketOffset = bucketId * 16 ; segment = buckets [ bucketOffset >>> segmentSizeBits ] ; segOffset = bucketOffset & segmentSizeMask ; } } else { break ; } } if ( currAddress == INVALID_ADDRESS ) { segment . putLong ( segOffset , key ) ; segment . putLong ( segOffset + 8 , address ) ; numKeys += 1 ; if ( dataSegment != null ) { dataSegment . putLong ( currentPositionInSegment , toAddrAndLen ( INVALID_ADDRESS , size ) ) ; } if ( numKeys * 2 > numBuckets ) { resize ( ) ; } } else { dataSegment . putLong ( currentPositionInSegment , toAddrAndLen ( currAddress , size ) ) ; segment . putLong ( segOffset + 8 , address ) ; } } | Update the address in array for given key . |
15,442 | public void registerListener ( JobID jobId , KvStateRegistryListener listener ) { final KvStateRegistryListener previousValue = listeners . putIfAbsent ( jobId , listener ) ; if ( previousValue != null ) { throw new IllegalStateException ( "Listener already registered under " + jobId + '.' ) ; } } | Registers a listener with the registry . |
15,443 | public KvStateID registerKvState ( JobID jobId , JobVertexID jobVertexId , KeyGroupRange keyGroupRange , String registrationName , InternalKvState < ? , ? , ? > kvState ) { KvStateID kvStateId = new KvStateID ( ) ; if ( registeredKvStates . putIfAbsent ( kvStateId , new KvStateEntry < > ( kvState ) ) == null ) { final KvStateRegistryListener listener = getKvStateRegistryListener ( jobId ) ; if ( listener != null ) { listener . notifyKvStateRegistered ( jobId , jobVertexId , keyGroupRange , registrationName , kvStateId ) ; } return kvStateId ; } else { throw new IllegalStateException ( "State \"" + registrationName + " \"(id=" + kvStateId + ") appears registered although it should not." ) ; } } | Registers the KvState instance and returns the assigned ID . |
15,444 | public void unregisterKvState ( JobID jobId , JobVertexID jobVertexId , KeyGroupRange keyGroupRange , String registrationName , KvStateID kvStateId ) { KvStateEntry < ? , ? , ? > entry = registeredKvStates . remove ( kvStateId ) ; if ( entry != null ) { entry . clear ( ) ; final KvStateRegistryListener listener = getKvStateRegistryListener ( jobId ) ; if ( listener != null ) { listener . notifyKvStateUnregistered ( jobId , jobVertexId , keyGroupRange , registrationName ) ; } } } | Unregisters the KvState instance identified by the given KvStateID . |
15,445 | public BinaryMergeIterator < Entry > getMergingIterator ( List < ChannelWithMeta > channelIDs , List < FileIOChannel > openChannels ) throws IOException { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Performing merge of " + channelIDs . size ( ) + " sorted streams." ) ; } final List < MutableObjectIterator < Entry > > iterators = new ArrayList < > ( channelIDs . size ( ) + 1 ) ; for ( ChannelWithMeta channel : channelIDs ) { AbstractChannelReaderInputView view = FileChannelUtil . createInputView ( ioManager , channel , openChannels , compressionEnable , compressionCodecFactory , compressionBlockSize , pageSize ) ; iterators . add ( channelReaderInputViewIterator ( view ) ) ; } return new BinaryMergeIterator < > ( iterators , mergeReusedEntries ( channelIDs . size ( ) ) , mergeComparator ( ) ) ; } | Returns an iterator that iterates over the merged result from all given channels . |
15,446 | public List < ChannelWithMeta > mergeChannelList ( List < ChannelWithMeta > channelIDs ) throws IOException { final double scale = Math . ceil ( Math . log ( channelIDs . size ( ) ) / Math . log ( maxFanIn ) ) - 1 ; final int numStart = channelIDs . size ( ) ; final int numEnd = ( int ) Math . pow ( maxFanIn , scale ) ; final int numMerges = ( int ) Math . ceil ( ( numStart - numEnd ) / ( double ) ( maxFanIn - 1 ) ) ; final int numNotMerged = numEnd - numMerges ; final int numToMerge = numStart - numNotMerged ; final List < ChannelWithMeta > mergedChannelIDs = new ArrayList < > ( numEnd ) ; mergedChannelIDs . addAll ( channelIDs . subList ( 0 , numNotMerged ) ) ; final int channelsToMergePerStep = ( int ) Math . ceil ( numToMerge / ( double ) numMerges ) ; final List < ChannelWithMeta > channelsToMergeThisStep = new ArrayList < > ( channelsToMergePerStep ) ; int channelNum = numNotMerged ; while ( ! closed && channelNum < channelIDs . size ( ) ) { channelsToMergeThisStep . clear ( ) ; for ( int i = 0 ; i < channelsToMergePerStep && channelNum < channelIDs . size ( ) ; i ++ , channelNum ++ ) { channelsToMergeThisStep . add ( channelIDs . get ( channelNum ) ) ; } mergedChannelIDs . add ( mergeChannels ( channelsToMergeThisStep ) ) ; } return mergedChannelIDs ; } | Merges the given sorted runs to a smaller number of sorted runs . |
15,447 | private ChannelWithMeta mergeChannels ( List < ChannelWithMeta > channelIDs ) throws IOException { List < FileIOChannel > openChannels = new ArrayList < > ( channelIDs . size ( ) ) ; final BinaryMergeIterator < Entry > mergeIterator = getMergingIterator ( channelIDs , openChannels ) ; final FileIOChannel . ID mergedChannelID = ioManager . createChannel ( ) ; channelManager . addChannel ( mergedChannelID ) ; AbstractChannelWriterOutputView output = null ; int numBytesInLastBlock ; int numBlocksWritten ; try { output = FileChannelUtil . createOutputView ( ioManager , mergedChannelID , compressionEnable , compressionCodecFactory , compressionBlockSize , pageSize ) ; writeMergingOutput ( mergeIterator , output ) ; numBytesInLastBlock = output . close ( ) ; numBlocksWritten = output . getBlockCount ( ) ; } catch ( IOException e ) { if ( output != null ) { output . close ( ) ; output . getChannel ( ) . deleteChannel ( ) ; } throw e ; } for ( FileIOChannel channel : openChannels ) { channelManager . removeChannel ( channel . getChannelID ( ) ) ; try { channel . closeAndDelete ( ) ; } catch ( Throwable ignored ) { } } return new ChannelWithMeta ( mergedChannelID , numBlocksWritten , numBytesInLastBlock ) ; } | Merges the sorted runs described by the given Channel IDs into a single sorted run . |
15,448 | public boolean isMetBy ( LocalProperties other ) { if ( this . ordering != null ) { return other . getOrdering ( ) != null && this . ordering . isMetBy ( other . getOrdering ( ) ) ; } else if ( this . groupedFields != null ) { if ( other . getGroupedFields ( ) != null && other . getGroupedFields ( ) . isValidUnorderedPrefix ( this . groupedFields ) ) { return true ; } else { return other . areFieldsUnique ( this . groupedFields ) ; } } else { return true ; } } | Checks if this set of properties as interesting properties is met by the given properties . |
15,449 | public void parameterizeChannel ( Channel channel ) { LocalProperties current = channel . getLocalProperties ( ) ; if ( isMetBy ( current ) ) { channel . setLocalStrategy ( LocalStrategy . NONE ) ; } else if ( this . ordering != null ) { channel . setLocalStrategy ( LocalStrategy . SORT , this . ordering . getInvolvedIndexes ( ) , this . ordering . getFieldSortDirections ( ) ) ; } else if ( this . groupedFields != null ) { boolean [ ] dirs = new boolean [ this . groupedFields . size ( ) ] ; Arrays . fill ( dirs , true ) ; channel . setLocalStrategy ( LocalStrategy . SORT , Utils . createOrderedFromSet ( this . groupedFields ) , dirs ) ; } else { channel . setLocalStrategy ( LocalStrategy . NONE ) ; } } | Parametrizes the local strategy fields of a channel such that the channel produces the desired local properties . |
15,450 | public boolean get ( ) { switch ( state ) { case UNSET : return valueIfUnset ; case FALSE : return false ; case TRUE : return true ; case CONFLICTING : return valueIfConflicting ; default : throw new RuntimeException ( "Unknown state" ) ; } } | Get the boolean state . |
15,451 | public boolean conflictsWith ( OptionalBoolean other ) { return state == State . CONFLICTING || other . state == State . CONFLICTING || ( state == State . TRUE && other . state == State . FALSE ) || ( state == State . FALSE && other . state == State . TRUE ) ; } | The conflicting states are true with false and false with true . |
15,452 | public void mergeWith ( OptionalBoolean other ) { if ( state == other . state ) { } else if ( state == State . UNSET ) { state = other . state ; } else if ( other . state == State . UNSET ) { } else { state = State . CONFLICTING ; } } | State transitions . - if the states are the same then no change - if either state is unset then change to the other state - if the states are conflicting then set to the conflicting state |
15,453 | @ SuppressWarnings ( "deprecation" ) public RestartStrategies . RestartStrategyConfiguration getRestartStrategy ( ) { if ( restartStrategyConfiguration instanceof RestartStrategies . FallbackRestartStrategyConfiguration ) { if ( getNumberOfExecutionRetries ( ) > 0 && getExecutionRetryDelay ( ) >= 0 ) { return RestartStrategies . fixedDelayRestart ( getNumberOfExecutionRetries ( ) , getExecutionRetryDelay ( ) ) ; } else if ( getNumberOfExecutionRetries ( ) == 0 ) { return RestartStrategies . noRestart ( ) ; } else { return restartStrategyConfiguration ; } } else { return restartStrategyConfiguration ; } } | Returns the restart strategy which has been set for the current job . |
15,454 | @ SuppressWarnings ( "rawtypes" ) public void registerTypeWithKryoSerializer ( Class < ? > type , Class < ? extends Serializer > serializerClass ) { if ( type == null || serializerClass == null ) { throw new NullPointerException ( "Cannot register null class or serializer." ) ; } @ SuppressWarnings ( "unchecked" ) Class < ? extends Serializer < ? > > castedSerializerClass = ( Class < ? extends Serializer < ? > > ) serializerClass ; registeredTypesWithKryoSerializerClasses . put ( type , castedSerializerClass ) ; } | Registers the given Serializer via its class as a serializer for the given type at the KryoSerializer |
15,455 | public LinkedHashSet < Class < ? > > getRegisteredKryoTypes ( ) { if ( isForceKryoEnabled ( ) ) { LinkedHashSet < Class < ? > > result = new LinkedHashSet < > ( ) ; result . addAll ( registeredKryoTypes ) ; for ( Class < ? > t : registeredPojoTypes ) { if ( ! result . contains ( t ) ) { result . add ( t ) ; } } return result ; } else { return registeredKryoTypes ; } } | Returns the registered Kryo types . |
15,456 | public CompletableFuture < Acknowledge > start ( final JobMasterId newJobMasterId ) throws Exception { start ( ) ; return callAsyncWithoutFencing ( ( ) -> startJobExecution ( newJobMasterId ) , RpcUtils . INF_TIMEOUT ) ; } | Start the rpc service and begin to run the job . |
15,457 | public CompletableFuture < Void > onStop ( ) { log . info ( "Stopping the JobMaster for job {}({})." , jobGraph . getName ( ) , jobGraph . getJobID ( ) ) ; final Set < ResourceID > taskManagerResourceIds = new HashSet < > ( registeredTaskManagers . keySet ( ) ) ; final FlinkException cause = new FlinkException ( "Stopping JobMaster for job " + jobGraph . getName ( ) + '(' + jobGraph . getJobID ( ) + ")." ) ; for ( ResourceID taskManagerResourceId : taskManagerResourceIds ) { disconnectTaskManager ( taskManagerResourceId , cause ) ; } taskManagerHeartbeatManager . stop ( ) ; resourceManagerHeartbeatManager . stop ( ) ; suspendExecution ( new FlinkException ( "JobManager is shutting down." ) ) ; slotPool . close ( ) ; return CompletableFuture . completedFuture ( null ) ; } | Suspend the job and shutdown all other services including rpc . |
15,458 | public CompletableFuture < Acknowledge > updateTaskExecutionState ( final TaskExecutionState taskExecutionState ) { checkNotNull ( taskExecutionState , "taskExecutionState" ) ; if ( executionGraph . updateState ( taskExecutionState ) ) { return CompletableFuture . completedFuture ( Acknowledge . get ( ) ) ; } else { return FutureUtils . completedExceptionally ( new ExecutionGraphException ( "The execution attempt " + taskExecutionState . getID ( ) + " was not found." ) ) ; } } | Updates the task execution state for a given task . |
15,459 | public Option defaultValue ( String defaultValue ) throws RequiredParametersException { if ( this . choices . isEmpty ( ) ) { return this . setDefaultValue ( defaultValue ) ; } else { if ( this . choices . contains ( defaultValue ) ) { return this . setDefaultValue ( defaultValue ) ; } else { throw new RequiredParametersException ( "Default value " + defaultValue + " is not in the list of valid values for option " + this . longName ) ; } } } | Define a default value for the option . |
15,460 | public Option choices ( String ... choices ) throws RequiredParametersException { if ( this . defaultValue != null ) { if ( Arrays . asList ( choices ) . contains ( defaultValue ) ) { Collections . addAll ( this . choices , choices ) ; } else { throw new RequiredParametersException ( "Valid values for option " + this . longName + " do not contain defined default value " + defaultValue ) ; } } else { Collections . addAll ( this . choices , choices ) ; } return this ; } | Restrict the list of possible values of the parameter . |
15,461 | public static int getInt ( Properties config , String key , int defaultValue ) { String val = config . getProperty ( key ) ; if ( val == null ) { return defaultValue ; } else { try { return Integer . parseInt ( val ) ; } catch ( NumberFormatException nfe ) { throw new IllegalArgumentException ( "Value for configuration key='" + key + "' is not set correctly. " + "Entered value='" + val + "'. Default value='" + defaultValue + "'" ) ; } } } | Get integer from properties . This method throws an exception if the integer is not valid . |
15,462 | public static long getLong ( Properties config , String key , long defaultValue ) { String val = config . getProperty ( key ) ; if ( val == null ) { return defaultValue ; } else { try { return Long . parseLong ( val ) ; } catch ( NumberFormatException nfe ) { throw new IllegalArgumentException ( "Value for configuration key='" + key + "' is not set correctly. " + "Entered value='" + val + "'. Default value='" + defaultValue + "'" ) ; } } } | Get long from properties . This method throws an exception if the long is not valid . |
15,463 | public static long getLong ( Properties config , String key , long defaultValue , Logger logger ) { try { return getLong ( config , key , defaultValue ) ; } catch ( IllegalArgumentException iae ) { logger . warn ( iae . getMessage ( ) ) ; return defaultValue ; } } | Get long from properties . This method only logs if the long is not valid . |
15,464 | CheckpointStatsHistory createSnapshot ( ) { if ( readOnly ) { throw new UnsupportedOperationException ( "Can't create a snapshot of a read-only history." ) ; } List < AbstractCheckpointStats > checkpointsHistory ; Map < Long , AbstractCheckpointStats > checkpointsById ; checkpointsById = new HashMap < > ( checkpointsArray . length ) ; if ( maxSize == 0 ) { checkpointsHistory = Collections . emptyList ( ) ; } else { AbstractCheckpointStats [ ] newCheckpointsArray = new AbstractCheckpointStats [ checkpointsArray . length ] ; System . arraycopy ( checkpointsArray , nextPos , newCheckpointsArray , 0 , checkpointsArray . length - nextPos ) ; System . arraycopy ( checkpointsArray , 0 , newCheckpointsArray , checkpointsArray . length - nextPos , nextPos ) ; checkpointsHistory = Arrays . asList ( newCheckpointsArray ) ; Collections . reverse ( checkpointsHistory ) ; for ( AbstractCheckpointStats checkpoint : checkpointsHistory ) { checkpointsById . put ( checkpoint . getCheckpointId ( ) , checkpoint ) ; } } if ( latestCompletedCheckpoint != null ) { checkpointsById . put ( latestCompletedCheckpoint . getCheckpointId ( ) , latestCompletedCheckpoint ) ; } if ( latestFailedCheckpoint != null ) { checkpointsById . put ( latestFailedCheckpoint . getCheckpointId ( ) , latestFailedCheckpoint ) ; } if ( latestSavepoint != null ) { checkpointsById . put ( latestSavepoint . getCheckpointId ( ) , latestSavepoint ) ; } return new CheckpointStatsHistory ( true , maxSize , null , checkpointsHistory , checkpointsById , latestCompletedCheckpoint , latestFailedCheckpoint , latestSavepoint ) ; } | Creates a snapshot of the current state . |
15,465 | void addInProgressCheckpoint ( PendingCheckpointStats pending ) { if ( readOnly ) { throw new UnsupportedOperationException ( "Can't create a snapshot of a read-only history." ) ; } if ( maxSize == 0 ) { return ; } checkNotNull ( pending , "Pending checkpoint" ) ; if ( checkpointsArray . length < maxSize ) { checkpointsArray = Arrays . copyOf ( checkpointsArray , checkpointsArray . length + 1 ) ; } if ( nextPos == checkpointsArray . length ) { nextPos = 0 ; } checkpointsArray [ nextPos ++ ] = pending ; } | Adds an in progress checkpoint to the checkpoint history . |
15,466 | boolean replacePendingCheckpointById ( AbstractCheckpointStats completedOrFailed ) { checkArgument ( ! completedOrFailed . getStatus ( ) . isInProgress ( ) , "Not allowed to replace with in progress checkpoints." ) ; if ( readOnly ) { throw new UnsupportedOperationException ( "Can't create a snapshot of a read-only history." ) ; } if ( completedOrFailed . getStatus ( ) . isCompleted ( ) ) { CompletedCheckpointStats completed = ( CompletedCheckpointStats ) completedOrFailed ; if ( completed . getProperties ( ) . isSavepoint ( ) && ( latestSavepoint == null || completed . getCheckpointId ( ) > latestSavepoint . getCheckpointId ( ) ) ) { latestSavepoint = completed ; } else if ( latestCompletedCheckpoint == null || completed . getCheckpointId ( ) > latestCompletedCheckpoint . getCheckpointId ( ) ) { latestCompletedCheckpoint = completed ; } } else if ( completedOrFailed . getStatus ( ) . isFailed ( ) ) { FailedCheckpointStats failed = ( FailedCheckpointStats ) completedOrFailed ; if ( latestFailedCheckpoint == null || failed . getCheckpointId ( ) > latestFailedCheckpoint . getCheckpointId ( ) ) { latestFailedCheckpoint = failed ; } } if ( maxSize == 0 ) { return false ; } long checkpointId = completedOrFailed . getCheckpointId ( ) ; int startPos = nextPos == checkpointsArray . length ? checkpointsArray . length - 1 : nextPos - 1 ; for ( int i = startPos ; i >= 0 ; i -- ) { if ( checkpointsArray [ i ] . getCheckpointId ( ) == checkpointId ) { checkpointsArray [ i ] = completedOrFailed ; return true ; } } for ( int i = checkpointsArray . length - 1 ; i > startPos ; i -- ) { if ( checkpointsArray [ i ] . getCheckpointId ( ) == checkpointId ) { checkpointsArray [ i ] = completedOrFailed ; return true ; } } return false ; } | Searches for the in progress checkpoint with the given ID and replaces it with the given completed or failed checkpoint . |
15,467 | public static MessageType toParquetType ( TypeInformation < ? > typeInformation , boolean legacyMode ) { return ( MessageType ) convertField ( null , typeInformation , Type . Repetition . OPTIONAL , legacyMode ) ; } | Converts Flink Internal Type to Parquet schema . |
15,468 | public static int hashUnsafeBytesByWords ( Object base , long offset , int lengthInBytes ) { return hashUnsafeBytesByWords ( base , offset , lengthInBytes , DEFAULT_SEED ) ; } | Hash unsafe bytes length must be aligned to 4 bytes . |
15,469 | public static int hashUnsafeBytes ( Object base , long offset , int lengthInBytes ) { return hashUnsafeBytes ( base , offset , lengthInBytes , DEFAULT_SEED ) ; } | Hash unsafe bytes . |
15,470 | public static int hashBytesByWords ( MemorySegment segment , int offset , int lengthInBytes ) { return hashBytesByWords ( segment , offset , lengthInBytes , DEFAULT_SEED ) ; } | Hash bytes in MemorySegment length must be aligned to 4 bytes . |
15,471 | public static int hashBytes ( MemorySegment segment , int offset , int lengthInBytes ) { return hashBytes ( segment , offset , lengthInBytes , DEFAULT_SEED ) ; } | Hash bytes in MemorySegment . |
15,472 | public SerializedValue < JobInformation > getSerializedJobInformation ( ) { if ( serializedJobInformation instanceof NonOffloaded ) { NonOffloaded < JobInformation > jobInformation = ( NonOffloaded < JobInformation > ) serializedJobInformation ; return jobInformation . serializedValue ; } else { throw new IllegalStateException ( "Trying to work with offloaded serialized job information." ) ; } } | Return the sub task s serialized job information . |
15,473 | public SerializedValue < TaskInformation > getSerializedTaskInformation ( ) { if ( serializedTaskInformation instanceof NonOffloaded ) { NonOffloaded < TaskInformation > taskInformation = ( NonOffloaded < TaskInformation > ) serializedTaskInformation ; return taskInformation . serializedValue ; } else { throw new IllegalStateException ( "Trying to work with offloaded serialized job information." ) ; } } | Return the sub task s serialized task information . |
15,474 | public void accept ( Visitor < PlanNode > visitor ) { for ( SinkPlanNode node : this . dataSinks ) { node . accept ( visitor ) ; } } | Applies the given visitor top down to all nodes starting at the sinks . |
15,475 | boolean insertToBucket ( int hashCode , int pointer , boolean spillingAllowed , boolean sizeAddAndCheckResize ) throws IOException { final int posHashCode = findBucket ( hashCode ) ; final int bucketArrayPos = posHashCode >> table . bucketsPerSegmentBits ; final int bucketInSegmentPos = ( posHashCode & table . bucketsPerSegmentMask ) << BUCKET_SIZE_BITS ; final MemorySegment bucket = this . buckets [ bucketArrayPos ] ; return insertToBucket ( bucket , bucketInSegmentPos , hashCode , pointer , spillingAllowed , sizeAddAndCheckResize ) ; } | Insert into bucket by hashCode and pointer . |
15,476 | boolean appendRecordAndInsert ( BinaryRow record , int hashCode ) throws IOException { final int posHashCode = findBucket ( hashCode ) ; final int bucketArrayPos = posHashCode >> table . bucketsPerSegmentBits ; final int bucketInSegmentPos = ( posHashCode & table . bucketsPerSegmentMask ) << BUCKET_SIZE_BITS ; final MemorySegment bucket = this . buckets [ bucketArrayPos ] ; if ( ! table . tryDistinctBuildRow || ! partition . isInMemory ( ) || ! findFirstSameBuildRow ( bucket , hashCode , bucketInSegmentPos , record ) ) { int pointer = partition . insertIntoBuildBuffer ( record ) ; if ( pointer != - 1 ) { insertToBucket ( bucket , bucketInSegmentPos , hashCode , pointer , true , true ) ; return true ; } else { return false ; } } else { return true ; } } | Append record and insert to bucket . |
15,477 | private boolean findFirstSameBuildRow ( MemorySegment bucket , int searchHashCode , int bucketInSegmentOffset , BinaryRow buildRowToInsert ) { int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH ; int countInBucket = bucket . getShort ( bucketInSegmentOffset + HEADER_COUNT_OFFSET ) ; int numInBucket = 0 ; RandomAccessInputView view = partition . getBuildStateInputView ( ) ; while ( countInBucket != 0 ) { while ( numInBucket < countInBucket ) { final int thisCode = bucket . getInt ( posInSegment ) ; posInSegment += HASH_CODE_LEN ; if ( thisCode == searchHashCode ) { final int pointer = bucket . getInt ( bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + ( numInBucket * POINTER_LEN ) ) ; numInBucket ++ ; try { view . setReadPosition ( pointer ) ; BinaryRow row = table . binaryBuildSideSerializer . mapFromPages ( table . reuseBuildRow , view ) ; if ( buildRowToInsert . equals ( row ) ) { return true ; } } catch ( IOException e ) { throw new RuntimeException ( "Error deserializing key or value from the hashtable: " + e . getMessage ( ) , e ) ; } } else { numInBucket ++ ; } } final int forwardPointer = bucket . getInt ( bucketInSegmentOffset + HEADER_FORWARD_OFFSET ) ; if ( forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET ) { return false ; } final int overflowSegIndex = forwardPointer >>> table . segmentSizeBits ; bucket = overflowSegments [ overflowSegIndex ] ; bucketInSegmentOffset = forwardPointer & table . segmentSizeMask ; countInBucket = bucket . getShort ( bucketInSegmentOffset + HEADER_COUNT_OFFSET ) ; posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH ; numInBucket = 0 ; } return false ; } | For distinct build . |
15,478 | void startLookup ( int hashCode ) { final int posHashCode = findBucket ( hashCode ) ; final int bucketArrayPos = posHashCode >> table . bucketsPerSegmentBits ; final int bucketInSegmentOffset = ( posHashCode & table . bucketsPerSegmentMask ) << BUCKET_SIZE_BITS ; final MemorySegment bucket = this . buckets [ bucketArrayPos ] ; table . bucketIterator . set ( bucket , overflowSegments , partition , hashCode , bucketInSegmentOffset ) ; } | Probe start lookup joined build rows . |
15,479 | private static boolean repStep ( BufferedReader in , boolean readConsoleInput ) throws IOException , InterruptedException { long startTime = System . currentTimeMillis ( ) ; while ( ( System . currentTimeMillis ( ) - startTime ) < CLIENT_POLLING_INTERVAL_MS && ( ! readConsoleInput || ! in . ready ( ) ) ) { Thread . sleep ( 200L ) ; } if ( readConsoleInput && in . ready ( ) ) { String command = in . readLine ( ) ; switch ( command ) { case "quit" : case "stop" : return false ; case "help" : System . err . println ( YARN_SESSION_HELP ) ; break ; default : System . err . println ( "Unknown command '" + command + "'. Showing help:" ) ; System . err . println ( YARN_SESSION_HELP ) ; break ; } } return true ; } | Read - Evaluate - Print step for the REPL . |
15,480 | public static List < InputChannelDeploymentDescriptor > fromEdges ( List < ExecutionEdge > edges , boolean allowLazyDeployment ) { return edges . stream ( ) . map ( edge -> fromEdgeAndValidate ( allowLazyDeployment , edge ) ) . collect ( Collectors . toList ( ) ) ; } | Creates an input channel deployment descriptor for each partition . |
15,481 | long refreshAndGetTotal ( ) { long total = 0 ; for ( InputChannel channel : inputGate . getInputChannels ( ) . values ( ) ) { if ( channel instanceof RemoteInputChannel ) { RemoteInputChannel rc = ( RemoteInputChannel ) channel ; total += rc . unsynchronizedGetNumberOfQueuedBuffers ( ) ; } } return total ; } | Iterates over all input channels and collects the total number of queued buffers in a best - effort way . |
15,482 | int refreshAndGetMin ( ) { int min = Integer . MAX_VALUE ; Collection < InputChannel > channels = inputGate . getInputChannels ( ) . values ( ) ; for ( InputChannel channel : channels ) { if ( channel instanceof RemoteInputChannel ) { RemoteInputChannel rc = ( RemoteInputChannel ) channel ; int size = rc . unsynchronizedGetNumberOfQueuedBuffers ( ) ; min = Math . min ( min , size ) ; } } if ( min == Integer . MAX_VALUE ) { return 0 ; } return min ; } | Iterates over all input channels and collects the minimum number of queued buffers in a channel in a best - effort way . |
15,483 | int refreshAndGetMax ( ) { int max = 0 ; for ( InputChannel channel : inputGate . getInputChannels ( ) . values ( ) ) { if ( channel instanceof RemoteInputChannel ) { RemoteInputChannel rc = ( RemoteInputChannel ) channel ; int size = rc . unsynchronizedGetNumberOfQueuedBuffers ( ) ; max = Math . max ( max , size ) ; } } return max ; } | Iterates over all input channels and collects the maximum number of queued buffers in a channel in a best - effort way . |
15,484 | float refreshAndGetAvg ( ) { long total = 0 ; int count = 0 ; for ( InputChannel channel : inputGate . getInputChannels ( ) . values ( ) ) { if ( channel instanceof RemoteInputChannel ) { RemoteInputChannel rc = ( RemoteInputChannel ) channel ; int size = rc . unsynchronizedGetNumberOfQueuedBuffers ( ) ; total += size ; ++ count ; } } return count == 0 ? 0 : total / ( float ) count ; } | Iterates over all input channels and collects the average number of queued buffers in a channel in a best - effort way . |
15,485 | static JarFileWithEntryClass findOnlyEntryClass ( Iterable < File > jarFiles ) throws IOException { List < JarFileWithEntryClass > jarsWithEntryClasses = new ArrayList < > ( ) ; for ( File jarFile : jarFiles ) { findEntryClass ( jarFile ) . ifPresent ( entryClass -> jarsWithEntryClasses . add ( new JarFileWithEntryClass ( jarFile , entryClass ) ) ) ; } int size = jarsWithEntryClasses . size ( ) ; if ( size == 0 ) { throw new NoSuchElementException ( "No JAR with manifest attribute for entry class" ) ; } if ( size == 1 ) { return jarsWithEntryClasses . get ( 0 ) ; } throw new IllegalArgumentException ( "Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses ) ; } | Returns a JAR file with its entry class as specified in the manifest . |
15,486 | static Optional < String > findEntryClass ( File jarFile ) throws IOException { return findFirstManifestAttribute ( jarFile , PackagedProgram . MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS , PackagedProgram . MANIFEST_ATTRIBUTE_MAIN_CLASS ) ; } | Returns the entry class as specified in the manifest of the provided JAR file . |
15,487 | private static Optional < String > findFirstManifestAttribute ( File jarFile , String ... attributes ) throws IOException { if ( attributes . length == 0 ) { return Optional . empty ( ) ; } try ( JarFile f = new JarFile ( jarFile ) ) { return findFirstManifestAttribute ( f , attributes ) ; } } | Returns the value of the first manifest attribute found in the provided JAR file . |
15,488 | public final TypeSerializer < T > restoreSerializer ( ) { if ( serializer == null ) { throw new IllegalStateException ( "Trying to restore the prior serializer via TypeSerializerConfigSnapshot, " + "but the prior serializer has not been set." ) ; } else if ( serializer instanceof UnloadableDummyTypeSerializer ) { Throwable originalError = ( ( UnloadableDummyTypeSerializer < ? > ) serializer ) . getOriginalError ( ) ; throw new IllegalStateException ( "Could not Java-deserialize TypeSerializer while restoring checkpoint metadata for serializer " + "snapshot '" + getClass ( ) . getName ( ) + "'. " + "Please update to the TypeSerializerSnapshot interface that removes Java Serialization to avoid " + "this problem in the future." , originalError ) ; } else { return this . serializer ; } } | Creates a serializer using this configuration that is capable of reading data written by the serializer described by this configuration . |
15,489 | private static < V , R extends Serializable > Accumulator < V , R > mergeSingle ( Accumulator < ? , ? > target , Accumulator < ? , ? > toMerge ) { @ SuppressWarnings ( "unchecked" ) Accumulator < V , R > typedTarget = ( Accumulator < V , R > ) target ; @ SuppressWarnings ( "unchecked" ) Accumulator < V , R > typedToMerge = ( Accumulator < V , R > ) toMerge ; typedTarget . merge ( typedToMerge ) ; return typedTarget ; } | Workaround method for type safety . |
15,490 | public static Map < String , OptionalFailure < Object > > toResultMap ( Map < String , Accumulator < ? , ? > > accumulators ) { Map < String , OptionalFailure < Object > > resultMap = new HashMap < > ( ) ; for ( Map . Entry < String , Accumulator < ? , ? > > entry : accumulators . entrySet ( ) ) { resultMap . put ( entry . getKey ( ) , wrapUnchecked ( entry . getKey ( ) , ( ) -> entry . getValue ( ) . getLocalValue ( ) ) ) ; } return resultMap ; } | Transform the Map with accumulators into a Map containing only the results . |
15,491 | public static Map < String , OptionalFailure < Object > > deserializeAccumulators ( Map < String , SerializedValue < OptionalFailure < Object > > > serializedAccumulators , ClassLoader loader ) throws IOException , ClassNotFoundException { if ( serializedAccumulators == null || serializedAccumulators . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } Map < String , OptionalFailure < Object > > accumulators = new HashMap < > ( serializedAccumulators . size ( ) ) ; for ( Map . Entry < String , SerializedValue < OptionalFailure < Object > > > entry : serializedAccumulators . entrySet ( ) ) { OptionalFailure < Object > value = null ; if ( entry . getValue ( ) != null ) { value = entry . getValue ( ) . deserializeValue ( loader ) ; } accumulators . put ( entry . getKey ( ) , value ) ; } return accumulators ; } | Takes the serialized accumulator results and tries to deserialize them using the provided class loader . |
15,492 | public void prepareAndCommitOffsets ( Map < KafkaTopicPartition , Long > internalOffsets ) throws Exception { for ( Map . Entry < KafkaTopicPartition , Long > entry : internalOffsets . entrySet ( ) ) { KafkaTopicPartition tp = entry . getKey ( ) ; Long lastProcessedOffset = entry . getValue ( ) ; if ( lastProcessedOffset != null && lastProcessedOffset >= 0 ) { setOffsetInZooKeeper ( curatorClient , groupId , tp . getTopic ( ) , tp . getPartition ( ) , lastProcessedOffset + 1 ) ; } } } | Commits offsets for Kafka partitions to ZooKeeper . The given offsets to this method should be the offsets of the last processed records ; this method will take care of incrementing the offsets by 1 before committing them so that the committed offsets to Zookeeper represent the next record to process . |
15,493 | public void publish ( TaskEvent event ) { synchronized ( listeners ) { for ( EventListener < TaskEvent > listener : listeners . get ( event . getClass ( ) ) ) { listener . onEvent ( event ) ; } } } | Publishes the task event to all subscribed event listeners . |
15,494 | public static void register ( final Logger LOG ) { synchronized ( SignalHandler . class ) { if ( registered ) { return ; } registered = true ; final String [ ] SIGNALS = OperatingSystem . isWindows ( ) ? new String [ ] { "TERM" , "INT" } : new String [ ] { "TERM" , "HUP" , "INT" } ; StringBuilder bld = new StringBuilder ( ) ; bld . append ( "Registered UNIX signal handlers for [" ) ; String separator = "" ; for ( String signalName : SIGNALS ) { try { new Handler ( signalName , LOG ) ; bld . append ( separator ) ; bld . append ( signalName ) ; separator = ", " ; } catch ( Exception e ) { LOG . info ( "Error while registering signal handler" , e ) ; } } bld . append ( "]" ) ; LOG . info ( bld . toString ( ) ) ; } } | Register some signal handlers . |
15,495 | private static < X extends CopyableValue < X > > CopyableValueSerializer < X > createCopyableValueSerializer ( Class < X > clazz ) { return new CopyableValueSerializer < X > ( clazz ) ; } | utility method to summon the necessary bound |
15,496 | private static void registerFactory ( Type t , Class < ? extends TypeInfoFactory > factory ) { Preconditions . checkNotNull ( t , "Type parameter must not be null." ) ; Preconditions . checkNotNull ( factory , "Factory parameter must not be null." ) ; if ( ! TypeInfoFactory . class . isAssignableFrom ( factory ) ) { throw new IllegalArgumentException ( "Class is not a TypeInfoFactory." ) ; } if ( registeredTypeInfoFactories . containsKey ( t ) ) { throw new InvalidTypesException ( "A TypeInfoFactory for type '" + t + "' is already registered." ) ; } registeredTypeInfoFactories . put ( t , factory ) ; } | Registers a type information factory globally for a certain type . Every following type extraction operation will use the provided factory for this type . The factory will have highest precedence for this type . In a hierarchy of types the registered factory has higher precedence than annotations at the same level but lower precedence than factories defined down the hierarchy . |
15,497 | @ SuppressWarnings ( "unchecked" ) public static < IN , OUT > TypeInformation < OUT > getUnaryOperatorReturnType ( Function function , Class < ? > baseClass , int inputTypeArgumentIndex , int outputTypeArgumentIndex , int [ ] lambdaOutputTypeArgumentIndices , TypeInformation < IN > inType , String functionName , boolean allowMissing ) { Preconditions . checkArgument ( inType == null || inputTypeArgumentIndex >= 0 , "Input type argument index was not provided" ) ; Preconditions . checkArgument ( outputTypeArgumentIndex >= 0 , "Output type argument index was not provided" ) ; Preconditions . checkArgument ( lambdaOutputTypeArgumentIndices != null , "Indices for output type arguments within lambda not provided" ) ; if ( function instanceof ResultTypeQueryable ) { return ( ( ResultTypeQueryable < OUT > ) function ) . getProducedType ( ) ; } try { final LambdaExecutable exec ; try { exec = checkAndExtractLambda ( function ) ; } catch ( TypeExtractionException e ) { throw new InvalidTypesException ( "Internal error occurred." , e ) ; } if ( exec != null ) { final int paramLen = exec . getParameterTypes ( ) . length ; final Method sam = TypeExtractionUtils . getSingleAbstractMethod ( baseClass ) ; final int baseParametersLen = sam . getParameterTypes ( ) . length ; final Type output ; if ( lambdaOutputTypeArgumentIndices . length > 0 ) { output = TypeExtractionUtils . extractTypeFromLambda ( baseClass , exec , lambdaOutputTypeArgumentIndices , paramLen , baseParametersLen ) ; } else { output = exec . getReturnType ( ) ; TypeExtractionUtils . validateLambdaType ( baseClass , output ) ; } return new TypeExtractor ( ) . privateCreateTypeInfo ( output , inType , null ) ; } else { if ( inType != null ) { validateInputType ( baseClass , function . getClass ( ) , inputTypeArgumentIndex , inType ) ; } return new TypeExtractor ( ) . privateCreateTypeInfo ( baseClass , function . getClass ( ) , outputTypeArgumentIndex , inType , null ) ; } } catch ( InvalidTypesException e ) { if ( allowMissing ) { return ( TypeInformation < OUT > ) new MissingTypeInfo ( functionName != null ? functionName : function . toString ( ) , e ) ; } else { throw e ; } } } | Returns the unary operator s return type . |
15,498 | @ SuppressWarnings ( "unchecked" ) public static < IN1 , IN2 , OUT > TypeInformation < OUT > getBinaryOperatorReturnType ( Function function , Class < ? > baseClass , int input1TypeArgumentIndex , int input2TypeArgumentIndex , int outputTypeArgumentIndex , int [ ] lambdaOutputTypeArgumentIndices , TypeInformation < IN1 > in1Type , TypeInformation < IN2 > in2Type , String functionName , boolean allowMissing ) { Preconditions . checkArgument ( in1Type == null || input1TypeArgumentIndex >= 0 , "Input 1 type argument index was not provided" ) ; Preconditions . checkArgument ( in2Type == null || input2TypeArgumentIndex >= 0 , "Input 2 type argument index was not provided" ) ; Preconditions . checkArgument ( outputTypeArgumentIndex >= 0 , "Output type argument index was not provided" ) ; Preconditions . checkArgument ( lambdaOutputTypeArgumentIndices != null , "Indices for output type arguments within lambda not provided" ) ; if ( function instanceof ResultTypeQueryable ) { return ( ( ResultTypeQueryable < OUT > ) function ) . getProducedType ( ) ; } try { final LambdaExecutable exec ; try { exec = checkAndExtractLambda ( function ) ; } catch ( TypeExtractionException e ) { throw new InvalidTypesException ( "Internal error occurred." , e ) ; } if ( exec != null ) { final Method sam = TypeExtractionUtils . getSingleAbstractMethod ( baseClass ) ; final int baseParametersLen = sam . getParameterTypes ( ) . length ; final int paramLen = exec . getParameterTypes ( ) . length ; final Type output ; if ( lambdaOutputTypeArgumentIndices . length > 0 ) { output = TypeExtractionUtils . extractTypeFromLambda ( baseClass , exec , lambdaOutputTypeArgumentIndices , paramLen , baseParametersLen ) ; } else { output = exec . getReturnType ( ) ; TypeExtractionUtils . validateLambdaType ( baseClass , output ) ; } return new TypeExtractor ( ) . privateCreateTypeInfo ( output , in1Type , in2Type ) ; } else { if ( in1Type != null ) { validateInputType ( baseClass , function . getClass ( ) , input1TypeArgumentIndex , in1Type ) ; } if ( in2Type != null ) { validateInputType ( baseClass , function . getClass ( ) , input2TypeArgumentIndex , in2Type ) ; } return new TypeExtractor ( ) . privateCreateTypeInfo ( baseClass , function . getClass ( ) , outputTypeArgumentIndex , in1Type , in2Type ) ; } } catch ( InvalidTypesException e ) { if ( allowMissing ) { return ( TypeInformation < OUT > ) new MissingTypeInfo ( functionName != null ? functionName : function . toString ( ) , e ) ; } else { throw e ; } } } | Returns the binary operator s return type . |
15,499 | @ SuppressWarnings ( "unchecked" ) private < IN1 , IN2 , OUT > TypeInformation < OUT > createTypeInfoFromFactory ( Type t , ArrayList < Type > typeHierarchy , TypeInformation < IN1 > in1Type , TypeInformation < IN2 > in2Type ) { final ArrayList < Type > factoryHierarchy = new ArrayList < > ( typeHierarchy ) ; final TypeInfoFactory < ? super OUT > factory = getClosestFactory ( factoryHierarchy , t ) ; if ( factory == null ) { return null ; } final Type factoryDefiningType = factoryHierarchy . get ( factoryHierarchy . size ( ) - 1 ) ; final Map < String , TypeInformation < ? > > genericParams ; if ( factoryDefiningType instanceof ParameterizedType ) { genericParams = new HashMap < > ( ) ; final ParameterizedType paramDefiningType = ( ParameterizedType ) factoryDefiningType ; final Type [ ] args = typeToClass ( paramDefiningType ) . getTypeParameters ( ) ; final TypeInformation < ? > [ ] subtypeInfo = createSubTypesInfo ( t , paramDefiningType , factoryHierarchy , in1Type , in2Type , true ) ; assert subtypeInfo != null ; for ( int i = 0 ; i < subtypeInfo . length ; i ++ ) { genericParams . put ( args [ i ] . toString ( ) , subtypeInfo [ i ] ) ; } } else { genericParams = Collections . emptyMap ( ) ; } final TypeInformation < OUT > createdTypeInfo = ( TypeInformation < OUT > ) factory . createTypeInfo ( t , genericParams ) ; if ( createdTypeInfo == null ) { throw new InvalidTypesException ( "TypeInfoFactory returned invalid TypeInformation 'null'" ) ; } return createdTypeInfo ; } | Creates type information using a factory if for this type or super types . Returns null otherwise . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.