idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
14,400
public void addUniqueFields ( Set < FieldSet > uniqueFieldSets ) { if ( this . uniqueFields == null ) { this . uniqueFields = new HashSet < FieldSet > ( ) ; } this . uniqueFields . addAll ( uniqueFieldSets ) ; }
Adds multiple FieldSets to be unique
14,401
public static DescriptorProperties normalizeYaml ( Map < String , Object > yamlMap ) { final Map < String , String > normalized = new HashMap < > ( ) ; yamlMap . forEach ( ( k , v ) -> normalizeYamlObject ( normalized , k , v ) ) ; final DescriptorProperties properties = new DescriptorProperties ( true ) ; properties . putProperties ( normalized ) ; return properties ; }
Normalizes key - value properties from Yaml in the normalized format of the Table API .
14,402
void retriggerSubpartitionRequest ( Timer timer , final int subpartitionIndex ) { synchronized ( requestLock ) { checkState ( subpartitionView == null , "already requested partition" ) ; timer . schedule ( new TimerTask ( ) { public void run ( ) { try { requestSubpartition ( subpartitionIndex ) ; } catch ( Throwable t ) { setError ( t ) ; } } } , getCurrentBackoff ( ) ) ; } }
Retriggers a subpartition request .
14,403
public static String formatAddress ( int address ) { int b1 = ( address >>> 24 ) & 0xff ; int b2 = ( address >>> 16 ) & 0xff ; int b3 = ( address >>> 8 ) & 0xff ; int b4 = address & 0xff ; return "" + b1 + '.' + b2 + '.' + b3 + '.' + b4 ; }
Util method to create a string representation of a 32 bit integer representing an IPv4 address .
14,404
public PythonDataStream from_collection ( Iterator < Object > iter ) throws Exception { return new PythonDataStream < > ( env . addSource ( new PythonIteratorFunction ( iter ) , TypeExtractor . getForClass ( Object . class ) ) . map ( new AdapterMap < > ( ) ) ) ; }
Creates a python data stream from the given iterator .
14,405
public void seek ( long block , long recordInBlock ) throws IOException { List < BlockMetaData > blockMetaData = reader . getRowGroups ( ) ; if ( block == - 1L && recordInBlock == - 1L ) { currentBlock = blockMetaData . size ( ) - 1 ; numReadRecords = numTotalRecords ; numRecordsUpToCurrentBlock = numTotalRecords ; return ; } currentBlock = 0 ; numRecordsUpToPreviousBlock = 0 ; numRecordsUpToCurrentBlock = blockMetaData . get ( 0 ) . getRowCount ( ) ; numReadRecords = 0 ; while ( currentBlock < block ) { currentBlock ++ ; reader . skipNextRowGroup ( ) ; numRecordsUpToPreviousBlock = numRecordsUpToCurrentBlock ; numRecordsUpToCurrentBlock += blockMetaData . get ( currentBlock ) . getRowCount ( ) ; numReadRecords = numRecordsUpToPreviousBlock ; } PageReadStore pages = reader . readNextRowGroup ( ) ; recordReader = createRecordReader ( pages ) ; for ( int i = 0 ; i <= recordInBlock ; i ++ ) { readNextRecord ( ) ; } }
Moves the reading position to the given block and seeks to and reads the given record .
14,406
public Tuple2 < Long , Long > getCurrentReadPosition ( ) { long numRecordsReturned = numReadRecords ; if ( ! readRecordReturned && numReadRecords > 0 ) { numRecordsReturned -= 1 ; } if ( numRecordsReturned == numTotalRecords ) { return Tuple2 . of ( - 1L , - 1L ) ; } if ( numRecordsReturned == numRecordsUpToCurrentBlock ) { return Tuple2 . of ( currentBlock + 1L , 0L ) ; } long numRecordsOfBlockReturned = numRecordsReturned - numRecordsUpToPreviousBlock ; return Tuple2 . of ( ( long ) currentBlock , numRecordsOfBlockReturned ) ; }
Returns the current read position in the split i . e . the current block and the number of records that were returned from that block .
14,407
public boolean reachEnd ( ) throws IOException { if ( readRecord != null && ! readRecordReturned ) { return false ; } if ( numReadRecords >= numTotalRecords ) { return true ; } return ! readNextRecord ( ) ; }
Checks if the record reader returned all records . This method must be called before a record can be returned .
14,408
private boolean readNextRecord ( ) throws IOException { boolean recordFound = false ; while ( ! recordFound ) { if ( numReadRecords >= numTotalRecords ) { return false ; } try { if ( numReadRecords == numRecordsUpToCurrentBlock ) { PageReadStore pages = reader . readNextRowGroup ( ) ; recordReader = createRecordReader ( pages ) ; numRecordsUpToPreviousBlock = numRecordsUpToCurrentBlock ; numRecordsUpToCurrentBlock += pages . getRowCount ( ) ; currentBlock ++ ; } numReadRecords ++ ; try { readRecord = recordReader . read ( ) ; readRecordReturned = false ; } catch ( RecordMaterializationException e ) { String errorMessage = String . format ( "skipping a corrupt record in block number [%d] record number [%s] of file %s" , currentBlock , numReadRecords - numRecordsUpToPreviousBlock , reader . getFile ( ) ) ; if ( ! skipCorruptedRecord ) { LOG . error ( errorMessage ) ; throw e ; } else { LOG . warn ( errorMessage ) ; } continue ; } if ( readRecord == null ) { readRecordReturned = true ; numReadRecords = numRecordsUpToCurrentBlock ; LOG . debug ( "filtered record reader reached end of block" ) ; continue ; } recordFound = true ; LOG . debug ( "read value: {}" , readRecord ) ; } catch ( RecordMaterializationException e ) { LOG . error ( String . format ( "Can not read value at %d in block %d in file %s" , numReadRecords - numRecordsUpToPreviousBlock , currentBlock , reader . getFile ( ) ) , e ) ; if ( ! skipCorruptedRecord ) { throw e ; } return false ; } } return true ; }
Reads the next record .
14,409
public void shutdown ( ) { synchronized ( globalLock ) { for ( Instance i : allInstances ) { i . removeSlotListener ( ) ; i . cancelAndReleaseAllSlots ( ) ; } allInstances . clear ( ) ; allInstancesByHost . clear ( ) ; instancesWithAvailableResources . clear ( ) ; taskQueue . clear ( ) ; } }
Shuts the scheduler down . After shut down no more tasks can be added to the scheduler .
14,410
protected SimpleSlot getNewSlotForSharingGroup ( ExecutionVertex vertex , Iterable < TaskManagerLocation > requestedLocations , SlotSharingGroupAssignment groupAssignment , CoLocationConstraint constraint , boolean localOnly ) { while ( true ) { Pair < Instance , Locality > instanceLocalityPair = findInstance ( requestedLocations , localOnly ) ; if ( instanceLocalityPair == null ) { return null ; } final Instance instanceToUse = instanceLocalityPair . getLeft ( ) ; final Locality locality = instanceLocalityPair . getRight ( ) ; try { JobVertexID groupID = vertex . getJobvertexId ( ) ; SharedSlot sharedSlot = instanceToUse . allocateSharedSlot ( groupAssignment ) ; if ( instanceToUse . hasResourcesAvailable ( ) ) { this . instancesWithAvailableResources . put ( instanceToUse . getTaskManagerID ( ) , instanceToUse ) ; } if ( sharedSlot != null ) { SimpleSlot slot = constraint == null ? groupAssignment . addSharedSlotAndAllocateSubSlot ( sharedSlot , locality , groupID ) : groupAssignment . addSharedSlotAndAllocateSubSlot ( sharedSlot , locality , constraint ) ; if ( slot != null ) { return slot ; } else { sharedSlot . releaseSlot ( new FlinkException ( "Could not allocate sub-slot." ) ) ; } } } catch ( InstanceDiedException e ) { removeInstance ( instanceToUse ) ; } } }
Tries to allocate a new slot for a vertex that is part of a slot sharing group . If one of the instances has a slot available the method will allocate it as a shared slot add that shared slot to the sharing group and allocate a simple slot from that shared slot .
14,411
public String format ( Description description ) { for ( BlockElement blockElement : description . getBlocks ( ) ) { blockElement . format ( this ) ; } return finalizeFormatting ( ) ; }
Formats the description into a String using format specific tags .
14,412
@ SuppressWarnings ( "unchecked" ) public O withForwardedFieldsFirst ( String ... forwardedFieldsFirst ) { if ( this . udfSemantics == null || this . analyzedUdfSemantics ) { setSemanticProperties ( extractSemanticAnnotationsFromUdf ( getFunction ( ) . getClass ( ) ) ) ; } if ( this . udfSemantics == null || this . analyzedUdfSemantics ) { setSemanticProperties ( new DualInputSemanticProperties ( ) ) ; SemanticPropUtil . getSemanticPropsDualFromString ( this . udfSemantics , forwardedFieldsFirst , null , null , null , null , null , getInput1Type ( ) , getInput2Type ( ) , getResultType ( ) ) ; } else { if ( this . udfWithForwardedFieldsFirstAnnotation ( getFunction ( ) . getClass ( ) ) ) { throw new SemanticProperties . InvalidSemanticAnnotationException ( "Forwarded field information " + "has already been added by a function annotation for the first input of this operator. " + "Cannot overwrite function annotations." ) ; } else { SemanticPropUtil . getSemanticPropsDualFromString ( this . udfSemantics , forwardedFieldsFirst , null , null , null , null , null , getInput1Type ( ) , getInput2Type ( ) , getResultType ( ) ) ; } } O returnType = ( O ) this ; return returnType ; }
Adds semantic information about forwarded fields of the first input of the user - defined function . The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output .
14,413
@ SuppressWarnings ( "unchecked" ) public O withForwardedFieldsSecond ( String ... forwardedFieldsSecond ) { if ( this . udfSemantics == null || this . analyzedUdfSemantics ) { setSemanticProperties ( extractSemanticAnnotationsFromUdf ( getFunction ( ) . getClass ( ) ) ) ; } if ( this . udfSemantics == null || this . analyzedUdfSemantics ) { setSemanticProperties ( new DualInputSemanticProperties ( ) ) ; SemanticPropUtil . getSemanticPropsDualFromString ( this . udfSemantics , null , forwardedFieldsSecond , null , null , null , null , getInput1Type ( ) , getInput2Type ( ) , getResultType ( ) ) ; } else { if ( udfWithForwardedFieldsSecondAnnotation ( getFunction ( ) . getClass ( ) ) ) { throw new SemanticProperties . InvalidSemanticAnnotationException ( "Forwarded field information " + "has already been added by a function annotation for the second input of this operator. " + "Cannot overwrite function annotations." ) ; } else { SemanticPropUtil . getSemanticPropsDualFromString ( this . udfSemantics , null , forwardedFieldsSecond , null , null , null , null , getInput1Type ( ) , getInput2Type ( ) , getResultType ( ) ) ; } } O returnType = ( O ) this ; return returnType ; }
Adds semantic information about forwarded fields of the second input of the user - defined function . The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output .
14,414
public ChannelHandler [ ] getServerChannelHandlers ( ) { PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue ( ) ; PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler ( partitionProvider , taskEventPublisher , queueOfPartitionQueues , creditBasedEnabled ) ; return new ChannelHandler [ ] { messageEncoder , new NettyMessage . NettyMessageDecoder ( ! creditBasedEnabled ) , serverHandler , queueOfPartitionQueues } ; }
Returns the server channel handlers .
14,415
public ChannelHandler [ ] getClientChannelHandlers ( ) { NetworkClientHandler networkClientHandler = creditBasedEnabled ? new CreditBasedPartitionRequestClientHandler ( ) : new PartitionRequestClientHandler ( ) ; return new ChannelHandler [ ] { messageEncoder , new NettyMessage . NettyMessageDecoder ( ! creditBasedEnabled ) , networkClientHandler } ; }
Returns the client channel handlers .
14,416
protected ShardConsumer createShardConsumer ( Integer subscribedShardStateIndex , StreamShardHandle handle , SequenceNumber lastSeqNum , ShardMetricsReporter shardMetricsReporter ) { return new ShardConsumer ( this , subscribedShardStateIndex , handle , lastSeqNum , DynamoDBStreamsProxy . create ( getConsumerConfiguration ( ) ) , shardMetricsReporter ) ; }
Create a new DynamoDB streams shard consumer .
14,417
private void openCli ( SessionContext context , Executor executor ) { CliClient cli = null ; try { cli = new CliClient ( context , executor ) ; if ( options . getUpdateStatement ( ) == null ) { cli . open ( ) ; } else { final boolean success = cli . submitUpdate ( options . getUpdateStatement ( ) ) ; if ( ! success ) { throw new SqlClientException ( "Could not submit given SQL update statement to cluster." ) ; } } } finally { if ( cli != null ) { cli . close ( ) ; } } }
Opens the CLI client for executing SQL statements .
14,418
public static AmazonKinesis createKinesisClient ( Properties configProps , ClientConfiguration awsClientConfig ) { awsClientConfig . setUserAgentPrefix ( String . format ( USER_AGENT_FORMAT , EnvironmentInformation . getVersion ( ) , EnvironmentInformation . getRevisionInformation ( ) . commitId ) ) ; AmazonKinesisClientBuilder builder = AmazonKinesisClientBuilder . standard ( ) . withCredentials ( AWSUtil . getCredentialsProvider ( configProps ) ) . withClientConfiguration ( awsClientConfig ) ; if ( configProps . containsKey ( AWSConfigConstants . AWS_ENDPOINT ) ) { builder . withEndpointConfiguration ( new AwsClientBuilder . EndpointConfiguration ( configProps . getProperty ( AWSConfigConstants . AWS_ENDPOINT ) , null ) ) ; } else { builder . withRegion ( Regions . fromName ( configProps . getProperty ( AWSConfigConstants . AWS_REGION ) ) ) ; } return builder . build ( ) ; }
Creates an Amazon Kinesis Client .
14,419
private static AWSCredentialsProvider getCredentialsProvider ( final Properties configProps , final String configPrefix ) { CredentialProvider credentialProviderType ; if ( ! configProps . containsKey ( configPrefix ) ) { if ( configProps . containsKey ( AWSConfigConstants . accessKeyId ( configPrefix ) ) && configProps . containsKey ( AWSConfigConstants . secretKey ( configPrefix ) ) ) { credentialProviderType = CredentialProvider . BASIC ; } else { credentialProviderType = CredentialProvider . AUTO ; } } else { credentialProviderType = CredentialProvider . valueOf ( configProps . getProperty ( configPrefix ) ) ; } switch ( credentialProviderType ) { case ENV_VAR : return new EnvironmentVariableCredentialsProvider ( ) ; case SYS_PROP : return new SystemPropertiesCredentialsProvider ( ) ; case PROFILE : String profileName = configProps . getProperty ( AWSConfigConstants . profileName ( configPrefix ) , null ) ; String profileConfigPath = configProps . getProperty ( AWSConfigConstants . profilePath ( configPrefix ) , null ) ; return ( profileConfigPath == null ) ? new ProfileCredentialsProvider ( profileName ) : new ProfileCredentialsProvider ( profileConfigPath , profileName ) ; case BASIC : return new AWSCredentialsProvider ( ) { public AWSCredentials getCredentials ( ) { return new BasicAWSCredentials ( configProps . getProperty ( AWSConfigConstants . accessKeyId ( configPrefix ) ) , configProps . getProperty ( AWSConfigConstants . secretKey ( configPrefix ) ) ) ; } public void refresh ( ) { } } ; case ASSUME_ROLE : final AWSSecurityTokenService baseCredentials = AWSSecurityTokenServiceClientBuilder . standard ( ) . withCredentials ( getCredentialsProvider ( configProps , AWSConfigConstants . roleCredentialsProvider ( configPrefix ) ) ) . withRegion ( configProps . getProperty ( AWSConfigConstants . AWS_REGION ) ) . build ( ) ; return new STSAssumeRoleSessionCredentialsProvider . Builder ( configProps . getProperty ( AWSConfigConstants . roleArn ( configPrefix ) ) , configProps . getProperty ( AWSConfigConstants . roleSessionName ( configPrefix ) ) ) . withExternalId ( configProps . getProperty ( AWSConfigConstants . externalId ( configPrefix ) ) ) . withStsClient ( baseCredentials ) . build ( ) ; default : case AUTO : return new DefaultAWSCredentialsProviderChain ( ) ; } }
If the provider is ASSUME_ROLE then the credentials for assuming this role are determined recursively .
14,420
public static boolean isValidRegion ( String region ) { try { Regions . fromName ( region . toLowerCase ( ) ) ; } catch ( IllegalArgumentException e ) { return false ; } return true ; }
Checks whether or not a region ID is valid .
14,421
public static long nextPowerOfTwo ( long x ) { if ( x == 0L ) { return 1L ; } else { -- x ; x |= x >> 1 ; x |= x >> 2 ; x |= x >> 4 ; x |= x >> 8 ; x |= x >> 16 ; return ( x | x >> 32 ) + 1L ; } }
Return the least power of two greater than or equal to the specified value .
14,422
public static int maxFill ( int n , float f ) { return Math . min ( ( int ) Math . ceil ( ( double ) ( ( float ) n * f ) ) , n - 1 ) ; }
Returns the maximum number of entries that can be filled before rehashing .
14,423
public void recover ( ) throws Exception { LOG . info ( "Recovering checkpoints from ZooKeeper." ) ; List < Tuple2 < RetrievableStateHandle < CompletedCheckpoint > , String > > initialCheckpoints ; while ( true ) { try { initialCheckpoints = checkpointsInZooKeeper . getAllAndLock ( ) ; break ; } catch ( ConcurrentModificationException e ) { LOG . warn ( "Concurrent modification while reading from ZooKeeper. Retrying." ) ; } } Collections . sort ( initialCheckpoints , STRING_COMPARATOR ) ; int numberOfInitialCheckpoints = initialCheckpoints . size ( ) ; LOG . info ( "Found {} checkpoints in ZooKeeper." , numberOfInitialCheckpoints ) ; List < CompletedCheckpoint > lastTryRetrievedCheckpoints = new ArrayList < > ( numberOfInitialCheckpoints ) ; List < CompletedCheckpoint > retrievedCheckpoints = new ArrayList < > ( numberOfInitialCheckpoints ) ; do { LOG . info ( "Trying to fetch {} checkpoints from storage." , numberOfInitialCheckpoints ) ; lastTryRetrievedCheckpoints . clear ( ) ; lastTryRetrievedCheckpoints . addAll ( retrievedCheckpoints ) ; retrievedCheckpoints . clear ( ) ; for ( Tuple2 < RetrievableStateHandle < CompletedCheckpoint > , String > checkpointStateHandle : initialCheckpoints ) { CompletedCheckpoint completedCheckpoint = null ; try { completedCheckpoint = retrieveCompletedCheckpoint ( checkpointStateHandle ) ; if ( completedCheckpoint != null ) { retrievedCheckpoints . add ( completedCheckpoint ) ; } } catch ( Exception e ) { LOG . warn ( "Could not retrieve checkpoint, not adding to list of recovered checkpoints." , e ) ; } } } while ( retrievedCheckpoints . size ( ) != numberOfInitialCheckpoints && ! CompletedCheckpoint . checkpointsMatch ( lastTryRetrievedCheckpoints , retrievedCheckpoints ) ) ; completedCheckpoints . clear ( ) ; completedCheckpoints . addAll ( retrievedCheckpoints ) ; if ( completedCheckpoints . isEmpty ( ) && numberOfInitialCheckpoints > 0 ) { throw new FlinkException ( "Could not read any of the " + numberOfInitialCheckpoints + " checkpoints from storage." ) ; } else if ( completedCheckpoints . size ( ) != numberOfInitialCheckpoints ) { LOG . warn ( "Could only fetch {} of {} checkpoints from storage." , completedCheckpoints . size ( ) , numberOfInitialCheckpoints ) ; } }
Gets the latest checkpoint from ZooKeeper and removes all others .
14,424
public void addCheckpoint ( final CompletedCheckpoint checkpoint ) throws Exception { checkNotNull ( checkpoint , "Checkpoint" ) ; final String path = checkpointIdToPath ( checkpoint . getCheckpointID ( ) ) ; checkpointsInZooKeeper . addAndLock ( path , checkpoint ) ; completedCheckpoints . addLast ( checkpoint ) ; while ( completedCheckpoints . size ( ) > maxNumberOfCheckpointsToRetain ) { final CompletedCheckpoint completedCheckpoint = completedCheckpoints . removeFirst ( ) ; tryRemoveCompletedCheckpoint ( completedCheckpoint , CompletedCheckpoint :: discardOnSubsume ) ; } LOG . debug ( "Added {} to {}." , checkpoint , path ) ; }
Synchronously writes the new checkpoints to ZooKeeper and asynchronously removes older ones .
14,425
public static long pathToCheckpointId ( String path ) { try { String numberString ; if ( '/' == path . charAt ( 0 ) ) { numberString = path . substring ( 1 ) ; } else { numberString = path ; } return Long . parseLong ( numberString ) ; } catch ( NumberFormatException e ) { LOG . warn ( "Could not parse checkpoint id from {}. This indicates that the " + "checkpoint id to path conversion has changed." , path ) ; return - 1L ; } }
Converts a path to the checkpoint id .
14,426
private void checkAndPropagateAsyncError ( ) throws Exception { if ( thrownException != null ) { String errorMessages = "" ; if ( thrownException instanceof UserRecordFailedException ) { List < Attempt > attempts = ( ( UserRecordFailedException ) thrownException ) . getResult ( ) . getAttempts ( ) ; for ( Attempt attempt : attempts ) { if ( attempt . getErrorMessage ( ) != null ) { errorMessages += attempt . getErrorMessage ( ) + "\n" ; } } } if ( failOnError ) { throw new RuntimeException ( "An exception was thrown while processing a record: " + errorMessages , thrownException ) ; } else { LOG . warn ( "An exception was thrown while processing a record: {}" , thrownException , errorMessages ) ; thrownException = null ; } } }
Check if there are any asynchronous exceptions . If so rethrow the exception .
14,427
public static QueryableStateConfiguration disabled ( ) { final Iterator < Integer > proxyPorts = NetUtils . getPortRangeFromString ( QueryableStateOptions . PROXY_PORT_RANGE . defaultValue ( ) ) ; final Iterator < Integer > serverPorts = NetUtils . getPortRangeFromString ( QueryableStateOptions . SERVER_PORT_RANGE . defaultValue ( ) ) ; return new QueryableStateConfiguration ( proxyPorts , serverPorts , 0 , 0 , 0 , 0 ) ; }
Gets the configuration describing the queryable state as deactivated .
14,428
public void addBroadcastSetForScatterFunction ( String name , DataSet < ? > data ) { this . bcVarsScatter . add ( new Tuple2 < > ( name , data ) ) ; }
Adds a data set as a broadcast set to the scatter function .
14,429
public void addBroadcastSetForGatherFunction ( String name , DataSet < ? > data ) { this . bcVarsGather . add ( new Tuple2 < > ( name , data ) ) ; }
Adds a data set as a broadcast set to the gather function .
14,430
public List < Map < String , List < EventId > > > extractPatterns ( final NodeId nodeId , final DeweyNumber version ) { List < Map < String , List < EventId > > > result = new ArrayList < > ( ) ; Stack < SharedBufferAccessor . ExtractionState > extractionStates = new Stack < > ( ) ; Lockable < SharedBufferNode > entryLock = sharedBuffer . getEntry ( nodeId ) ; if ( entryLock != null ) { SharedBufferNode entry = entryLock . getElement ( ) ; extractionStates . add ( new SharedBufferAccessor . ExtractionState ( Tuple2 . of ( nodeId , entry ) , version , new Stack < > ( ) ) ) ; while ( ! extractionStates . isEmpty ( ) ) { final SharedBufferAccessor . ExtractionState extractionState = extractionStates . pop ( ) ; final Stack < Tuple2 < NodeId , SharedBufferNode > > currentPath = extractionState . getPath ( ) ; final Tuple2 < NodeId , SharedBufferNode > currentEntry = extractionState . getEntry ( ) ; if ( currentEntry == null ) { final Map < String , List < EventId > > completePath = new LinkedHashMap < > ( ) ; while ( ! currentPath . isEmpty ( ) ) { final NodeId currentPathEntry = currentPath . pop ( ) . f0 ; String page = currentPathEntry . getPageName ( ) ; List < EventId > values = completePath . computeIfAbsent ( page , k -> new ArrayList < > ( ) ) ; values . add ( currentPathEntry . getEventId ( ) ) ; } result . add ( completePath ) ; } else { currentPath . push ( currentEntry ) ; boolean firstMatch = true ; for ( SharedBufferEdge edge : currentEntry . f1 . getEdges ( ) ) { final DeweyNumber currentVersion = extractionState . getVersion ( ) ; if ( currentVersion . isCompatibleWith ( edge . getDeweyNumber ( ) ) ) { final NodeId target = edge . getTarget ( ) ; Stack < Tuple2 < NodeId , SharedBufferNode > > newPath ; if ( firstMatch ) { newPath = currentPath ; firstMatch = false ; } else { newPath = new Stack < > ( ) ; newPath . addAll ( currentPath ) ; } extractionStates . push ( new SharedBufferAccessor . ExtractionState ( target != null ? Tuple2 . of ( target , sharedBuffer . getEntry ( target ) . getElement ( ) ) : null , edge . getDeweyNumber ( ) , newPath ) ) ; } } } } } return result ; }
Returns all elements from the previous relation starting at the given entry .
14,431
public Map < String , List < V > > materializeMatch ( Map < String , List < EventId > > match ) { Map < String , List < V > > materializedMatch = new LinkedHashMap < > ( match . size ( ) ) ; for ( Map . Entry < String , List < EventId > > pattern : match . entrySet ( ) ) { List < V > events = new ArrayList < > ( pattern . getValue ( ) . size ( ) ) ; for ( EventId eventId : pattern . getValue ( ) ) { try { V event = sharedBuffer . getEvent ( eventId ) . getElement ( ) ; events . add ( event ) ; } catch ( Exception ex ) { throw new WrappingRuntimeException ( ex ) ; } } materializedMatch . put ( pattern . getKey ( ) , events ) ; } return materializedMatch ; }
Extracts the real event from the sharedBuffer with pre - extracted eventId .
14,432
public void lockNode ( final NodeId node ) { Lockable < SharedBufferNode > sharedBufferNode = sharedBuffer . getEntry ( node ) ; if ( sharedBufferNode != null ) { sharedBufferNode . lock ( ) ; sharedBuffer . upsertEntry ( node , sharedBufferNode ) ; } }
Increases the reference counter for the given entry so that it is not accidentally removed .
14,433
public void releaseNode ( final NodeId node ) throws Exception { Lockable < SharedBufferNode > sharedBufferNode = sharedBuffer . getEntry ( node ) ; if ( sharedBufferNode != null ) { if ( sharedBufferNode . release ( ) ) { removeNode ( node , sharedBufferNode . getElement ( ) ) ; } else { sharedBuffer . upsertEntry ( node , sharedBufferNode ) ; } } }
Decreases the reference counter for the given entry so that it can be removed once the reference counter reaches 0 .
14,434
private void lockEvent ( EventId eventId ) { Lockable < V > eventWrapper = sharedBuffer . getEvent ( eventId ) ; checkState ( eventWrapper != null , "Referring to non existent event with id %s" , eventId ) ; eventWrapper . lock ( ) ; sharedBuffer . upsertEvent ( eventId , eventWrapper ) ; }
Increases the reference counter for the given event so that it is not accidentally removed .
14,435
public void releaseEvent ( EventId eventId ) throws Exception { Lockable < V > eventWrapper = sharedBuffer . getEvent ( eventId ) ; if ( eventWrapper != null ) { if ( eventWrapper . release ( ) ) { sharedBuffer . removeEvent ( eventId ) ; } else { sharedBuffer . upsertEvent ( eventId , eventWrapper ) ; } } }
Decreases the reference counter for the given event so that it can be removed once the reference counter reaches 0 .
14,436
protected void stop ( String [ ] args ) throws Exception { LOG . info ( "Running 'stop-with-savepoint' command." ) ; final Options commandOptions = CliFrontendParser . getStopCommandOptions ( ) ; final Options commandLineOptions = CliFrontendParser . mergeOptions ( commandOptions , customCommandLineOptions ) ; final CommandLine commandLine = CliFrontendParser . parse ( commandLineOptions , args , false ) ; final StopOptions stopOptions = new StopOptions ( commandLine ) ; if ( stopOptions . isPrintHelp ( ) ) { CliFrontendParser . printHelpForStop ( customCommandLines ) ; return ; } final String [ ] cleanedArgs = stopOptions . getArgs ( ) ; final String targetDirectory = stopOptions . hasSavepointFlag ( ) && cleanedArgs . length > 0 ? stopOptions . getTargetDirectory ( ) : null ; final JobID jobId = cleanedArgs . length != 0 ? parseJobId ( cleanedArgs [ 0 ] ) : parseJobId ( stopOptions . getTargetDirectory ( ) ) ; final boolean advanceToEndOfEventTime = stopOptions . shouldAdvanceToEndOfEventTime ( ) ; logAndSysout ( ( advanceToEndOfEventTime ? "Draining job " : "Suspending job " ) + "\"" + jobId + "\" with a savepoint." ) ; final CustomCommandLine < ? > activeCommandLine = getActiveCustomCommandLine ( commandLine ) ; runClusterAction ( activeCommandLine , commandLine , clusterClient -> { try { clusterClient . stopWithSavepoint ( jobId , advanceToEndOfEventTime , targetDirectory ) ; } catch ( Exception e ) { throw new FlinkException ( "Could not stop with a savepoint job \"" + jobId + "\"." , e ) ; } } ) ; logAndSysout ( ( advanceToEndOfEventTime ? "Drained job " : "Suspended job " ) + "\"" + jobId + "\" with a savepoint." ) ; }
Executes the STOP action .
14,437
protected void cancel ( String [ ] args ) throws Exception { LOG . info ( "Running 'cancel' command." ) ; final Options commandOptions = CliFrontendParser . getCancelCommandOptions ( ) ; final Options commandLineOptions = CliFrontendParser . mergeOptions ( commandOptions , customCommandLineOptions ) ; final CommandLine commandLine = CliFrontendParser . parse ( commandLineOptions , args , false ) ; CancelOptions cancelOptions = new CancelOptions ( commandLine ) ; if ( cancelOptions . isPrintHelp ( ) ) { CliFrontendParser . printHelpForCancel ( customCommandLines ) ; return ; } final CustomCommandLine < ? > activeCommandLine = getActiveCustomCommandLine ( commandLine ) ; final String [ ] cleanedArgs = cancelOptions . getArgs ( ) ; if ( cancelOptions . isWithSavepoint ( ) ) { final JobID jobId ; final String targetDirectory ; if ( cleanedArgs . length > 0 ) { jobId = parseJobId ( cleanedArgs [ 0 ] ) ; targetDirectory = cancelOptions . getSavepointTargetDirectory ( ) ; } else { jobId = parseJobId ( cancelOptions . getSavepointTargetDirectory ( ) ) ; targetDirectory = null ; } if ( targetDirectory == null ) { logAndSysout ( "Cancelling job " + jobId + " with savepoint to default savepoint directory." ) ; } else { logAndSysout ( "Cancelling job " + jobId + " with savepoint to " + targetDirectory + '.' ) ; } runClusterAction ( activeCommandLine , commandLine , clusterClient -> { final String savepointPath ; try { savepointPath = clusterClient . cancelWithSavepoint ( jobId , targetDirectory ) ; } catch ( Exception e ) { throw new FlinkException ( "Could not cancel job " + jobId + '.' , e ) ; } logAndSysout ( "Cancelled job " + jobId + ". Savepoint stored in " + savepointPath + '.' ) ; } ) ; } else { final JobID jobId ; if ( cleanedArgs . length > 0 ) { jobId = parseJobId ( cleanedArgs [ 0 ] ) ; } else { throw new CliArgsException ( "Missing JobID. Specify a JobID to cancel a job." ) ; } logAndSysout ( "Cancelling job " + jobId + '.' ) ; runClusterAction ( activeCommandLine , commandLine , clusterClient -> { try { clusterClient . cancel ( jobId ) ; } catch ( Exception e ) { throw new FlinkException ( "Could not cancel job " + jobId + '.' , e ) ; } } ) ; logAndSysout ( "Cancelled job " + jobId + '.' ) ; } }
Executes the CANCEL action .
14,438
protected void savepoint ( String [ ] args ) throws Exception { LOG . info ( "Running 'savepoint' command." ) ; final Options commandOptions = CliFrontendParser . getSavepointCommandOptions ( ) ; final Options commandLineOptions = CliFrontendParser . mergeOptions ( commandOptions , customCommandLineOptions ) ; final CommandLine commandLine = CliFrontendParser . parse ( commandLineOptions , args , false ) ; final SavepointOptions savepointOptions = new SavepointOptions ( commandLine ) ; if ( savepointOptions . isPrintHelp ( ) ) { CliFrontendParser . printHelpForSavepoint ( customCommandLines ) ; return ; } final CustomCommandLine < ? > activeCommandLine = getActiveCustomCommandLine ( commandLine ) ; if ( savepointOptions . isDispose ( ) ) { runClusterAction ( activeCommandLine , commandLine , clusterClient -> disposeSavepoint ( clusterClient , savepointOptions . getSavepointPath ( ) ) ) ; } else { String [ ] cleanedArgs = savepointOptions . getArgs ( ) ; final JobID jobId ; if ( cleanedArgs . length >= 1 ) { String jobIdString = cleanedArgs [ 0 ] ; jobId = parseJobId ( jobIdString ) ; } else { throw new CliArgsException ( "Missing JobID. " + "Specify a Job ID to trigger a savepoint." ) ; } final String savepointDirectory ; if ( cleanedArgs . length >= 2 ) { savepointDirectory = cleanedArgs [ 1 ] ; } else { savepointDirectory = null ; } if ( cleanedArgs . length >= 3 ) { logAndSysout ( "Provided more arguments than required. Ignoring not needed arguments." ) ; } runClusterAction ( activeCommandLine , commandLine , clusterClient -> triggerSavepoint ( clusterClient , jobId , savepointDirectory ) ) ; } }
Executes the SAVEPOINT action .
14,439
private String triggerSavepoint ( ClusterClient < ? > clusterClient , JobID jobId , String savepointDirectory ) throws FlinkException { logAndSysout ( "Triggering savepoint for job " + jobId + '.' ) ; CompletableFuture < String > savepointPathFuture = clusterClient . triggerSavepoint ( jobId , savepointDirectory ) ; logAndSysout ( "Waiting for response..." ) ; final String savepointPath ; try { savepointPath = savepointPathFuture . get ( ) ; } catch ( Exception e ) { Throwable cause = ExceptionUtils . stripExecutionException ( e ) ; throw new FlinkException ( "Triggering a savepoint for the job " + jobId + " failed." , cause ) ; } logAndSysout ( "Savepoint completed. Path: " + savepointPath ) ; logAndSysout ( "You can resume your program from this savepoint with the run command." ) ; return savepointPath ; }
Sends a SavepointTriggerMessage to the job manager .
14,440
private void disposeSavepoint ( ClusterClient < ? > clusterClient , String savepointPath ) throws FlinkException { Preconditions . checkNotNull ( savepointPath , "Missing required argument: savepoint path. " + "Usage: bin/flink savepoint -d <savepoint-path>" ) ; logAndSysout ( "Disposing savepoint '" + savepointPath + "'." ) ; final CompletableFuture < Acknowledge > disposeFuture = clusterClient . disposeSavepoint ( savepointPath ) ; logAndSysout ( "Waiting for response..." ) ; try { disposeFuture . get ( clientTimeout . toMillis ( ) , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { throw new FlinkException ( "Disposing the savepoint '" + savepointPath + "' failed." , e ) ; } logAndSysout ( "Savepoint '" + savepointPath + "' disposed." ) ; }
Sends a SavepointDisposalRequest to the job manager .
14,441
PackagedProgram buildProgram ( ProgramOptions options ) throws FileNotFoundException , ProgramInvocationException { String [ ] programArgs = options . getProgramArgs ( ) ; String jarFilePath = options . getJarFilePath ( ) ; List < URL > classpaths = options . getClasspaths ( ) ; if ( jarFilePath == null ) { throw new IllegalArgumentException ( "The program JAR file was not specified." ) ; } File jarFile = new File ( jarFilePath ) ; if ( ! jarFile . exists ( ) ) { throw new FileNotFoundException ( "JAR file does not exist: " + jarFile ) ; } else if ( ! jarFile . isFile ( ) ) { throw new FileNotFoundException ( "JAR file is not a file: " + jarFile ) ; } String entryPointClass = options . getEntryPointClassName ( ) ; PackagedProgram program = entryPointClass == null ? new PackagedProgram ( jarFile , classpaths , programArgs ) : new PackagedProgram ( jarFile , classpaths , entryPointClass , programArgs ) ; program . setSavepointRestoreSettings ( options . getSavepointRestoreSettings ( ) ) ; return program ; }
Creates a Packaged program from the given command line options .
14,442
private static int handleParametrizationException ( ProgramParametrizationException e ) { LOG . error ( "Program has not been parametrized properly." , e ) ; System . err . println ( e . getMessage ( ) ) ; return 1 ; }
Displays an optional exception message for incorrect program parametrization .
14,443
private static int handleError ( Throwable t ) { LOG . error ( "Error while running the command." , t ) ; System . err . println ( ) ; System . err . println ( "------------------------------------------------------------" ) ; System . err . println ( " The program finished with the following exception:" ) ; System . err . println ( ) ; if ( t . getCause ( ) instanceof InvalidProgramException ) { System . err . println ( t . getCause ( ) . getMessage ( ) ) ; StackTraceElement [ ] trace = t . getCause ( ) . getStackTrace ( ) ; for ( StackTraceElement ele : trace ) { System . err . println ( "\t" + ele ) ; if ( ele . getMethodName ( ) . equals ( "main" ) ) { break ; } } } else { t . printStackTrace ( ) ; } return 1 ; }
Displays an exception message .
14,444
public static void main ( final String [ ] args ) { EnvironmentInformation . logEnvironmentInfo ( LOG , "Command Line Client" , args ) ; final String configurationDirectory = getConfigurationDirectoryFromEnv ( ) ; final Configuration configuration = GlobalConfiguration . loadConfiguration ( configurationDirectory ) ; final List < CustomCommandLine < ? > > customCommandLines = loadCustomCommandLines ( configuration , configurationDirectory ) ; try { final CliFrontend cli = new CliFrontend ( configuration , customCommandLines ) ; SecurityUtils . install ( new SecurityConfiguration ( cli . configuration ) ) ; int retCode = SecurityUtils . getInstalledContext ( ) . runSecured ( ( ) -> cli . parseParameters ( args ) ) ; System . exit ( retCode ) ; } catch ( Throwable t ) { final Throwable strippedThrowable = ExceptionUtils . stripException ( t , UndeclaredThrowableException . class ) ; LOG . error ( "Fatal error while running command line interface." , strippedThrowable ) ; strippedThrowable . printStackTrace ( ) ; System . exit ( 31 ) ; } }
Submits the job based on the arguments .
14,445
static void setJobManagerAddressInConfig ( Configuration config , InetSocketAddress address ) { config . setString ( JobManagerOptions . ADDRESS , address . getHostString ( ) ) ; config . setInteger ( JobManagerOptions . PORT , address . getPort ( ) ) ; config . setString ( RestOptions . ADDRESS , address . getHostString ( ) ) ; config . setInteger ( RestOptions . PORT , address . getPort ( ) ) ; }
Writes the given job manager address to the associated configuration object .
14,446
public CustomCommandLine < ? > getActiveCustomCommandLine ( CommandLine commandLine ) { for ( CustomCommandLine < ? > cli : customCommandLines ) { if ( cli . isActive ( commandLine ) ) { return cli ; } } throw new IllegalStateException ( "No command-line ran." ) ; }
Gets the custom command - line for the arguments .
14,447
private static CustomCommandLine < ? > loadCustomCommandLine ( String className , Object ... params ) throws IllegalAccessException , InvocationTargetException , InstantiationException , ClassNotFoundException , NoSuchMethodException { Class < ? extends CustomCommandLine > customCliClass = Class . forName ( className ) . asSubclass ( CustomCommandLine . class ) ; Class < ? > [ ] types = new Class < ? > [ params . length ] ; for ( int i = 0 ; i < params . length ; i ++ ) { Preconditions . checkNotNull ( params [ i ] , "Parameters for custom command-lines may not be null." ) ; types [ i ] = params [ i ] . getClass ( ) ; } Constructor < ? extends CustomCommandLine > constructor = customCliClass . getConstructor ( types ) ; return constructor . newInstance ( params ) ; }
Loads a class from the classpath that implements the CustomCommandLine interface .
14,448
public O withForwardedFields ( String ... forwardedFields ) { if ( this . udfSemantics == null ) { setSemanticProperties ( extractSemanticAnnotations ( getFunction ( ) . getClass ( ) ) ) ; } if ( this . udfSemantics == null || this . analyzedUdfSemantics ) { setSemanticProperties ( new SingleInputSemanticProperties ( ) ) ; SemanticPropUtil . getSemanticPropsSingleFromString ( this . udfSemantics , forwardedFields , null , null , this . getInputType ( ) , this . getResultType ( ) ) ; } else { if ( udfWithForwardedFieldsAnnotation ( getFunction ( ) . getClass ( ) ) ) { throw new SemanticProperties . InvalidSemanticAnnotationException ( "Forwarded field information " + "has already been added by a function annotation for this operator. " + "Cannot overwrite function annotations." ) ; } else { SemanticPropUtil . getSemanticPropsSingleFromString ( this . udfSemantics , forwardedFields , null , null , this . getInputType ( ) , this . getResultType ( ) ) ; } } @ SuppressWarnings ( "unchecked" ) O returnType = ( O ) this ; return returnType ; }
Adds semantic information about forwarded fields of the user - defined function . The forwarded fields information declares fields which are never modified by the function and which are forwarded at the same position to the output or unchanged copied to another position in the output .
14,449
private static JobVertexBackPressureInfo . VertexBackPressureLevel getBackPressureLevel ( double backPressureRatio ) { if ( backPressureRatio <= 0.10 ) { return JobVertexBackPressureInfo . VertexBackPressureLevel . OK ; } else if ( backPressureRatio <= 0.5 ) { return JobVertexBackPressureInfo . VertexBackPressureLevel . LOW ; } else { return JobVertexBackPressureInfo . VertexBackPressureLevel . HIGH ; } }
Returns the back pressure level as a String .
14,450
public boolean schemaEquals ( Object obj ) { return equals ( obj ) && Arrays . equals ( fieldNames , ( ( RowTypeInfo ) obj ) . fieldNames ) ; }
Tests whether an other object describes the same schema - equivalent row information .
14,451
public void discardState ( ) throws Exception { FileSystem fs = getFileSystem ( ) ; fs . delete ( filePath , false ) ; }
Discard the state by deleting the file that stores the state . If the parent directory of the state is empty after deleting the state file it is also deleted .
14,452
public final void setNewVertexValue ( VV newValue ) { if ( setNewVertexValueCalled ) { throw new IllegalStateException ( "setNewVertexValue should only be called at most once per updateVertex" ) ; } setNewVertexValueCalled = true ; outVertex . f1 = newValue ; out . collect ( Either . Left ( outVertex ) ) ; }
Sets the new value of this vertex .
14,453
public void setNewVertexValue ( VV newValue ) { if ( setNewVertexValueCalled ) { throw new IllegalStateException ( "setNewVertexValue should only be called at most once per updateVertex" ) ; } setNewVertexValueCalled = true ; if ( isOptDegrees ( ) ) { outValWithDegrees . f1 . f0 = newValue ; outWithDegrees . collect ( outValWithDegrees ) ; } else { outVal . setValue ( newValue ) ; out . collect ( outVal ) ; } }
Sets the new value of this vertex . Setting a new value triggers the sending of outgoing messages from this vertex .
14,454
private static Path validatePath ( Path path ) { final URI uri = path . toUri ( ) ; final String scheme = uri . getScheme ( ) ; final String pathPart = uri . getPath ( ) ; if ( scheme == null ) { throw new IllegalArgumentException ( "The scheme (hdfs://, file://, etc) is null. " + "Please specify the file system scheme explicitly in the URI." ) ; } if ( pathPart == null ) { throw new IllegalArgumentException ( "The path to store the checkpoint data in is null. " + "Please specify a directory path for the checkpoint data." ) ; } if ( pathPart . length ( ) == 0 || pathPart . equals ( "/" ) ) { throw new IllegalArgumentException ( "Cannot use the root directory for checkpoints." ) ; } return path ; }
Checks the validity of the path s scheme and path .
14,455
public void transferAllStateDataToDirectory ( IncrementalRemoteKeyedStateHandle restoreStateHandle , Path dest , CloseableRegistry closeableRegistry ) throws Exception { final Map < StateHandleID , StreamStateHandle > sstFiles = restoreStateHandle . getSharedState ( ) ; final Map < StateHandleID , StreamStateHandle > miscFiles = restoreStateHandle . getPrivateState ( ) ; downloadDataForAllStateHandles ( sstFiles , dest , closeableRegistry ) ; downloadDataForAllStateHandles ( miscFiles , dest , closeableRegistry ) ; }
Transfer all state data to the target directory using specified number of threads .
14,456
private void downloadDataForStateHandle ( Path restoreFilePath , StreamStateHandle remoteFileHandle , CloseableRegistry closeableRegistry ) throws IOException { FSDataInputStream inputStream = null ; FSDataOutputStream outputStream = null ; try { FileSystem restoreFileSystem = restoreFilePath . getFileSystem ( ) ; inputStream = remoteFileHandle . openInputStream ( ) ; closeableRegistry . registerCloseable ( inputStream ) ; outputStream = restoreFileSystem . create ( restoreFilePath , FileSystem . WriteMode . OVERWRITE ) ; closeableRegistry . registerCloseable ( outputStream ) ; byte [ ] buffer = new byte [ 8 * 1024 ] ; while ( true ) { int numBytes = inputStream . read ( buffer ) ; if ( numBytes == - 1 ) { break ; } outputStream . write ( buffer , 0 , numBytes ) ; } } finally { if ( closeableRegistry . unregisterCloseable ( inputStream ) ) { inputStream . close ( ) ; } if ( closeableRegistry . unregisterCloseable ( outputStream ) ) { outputStream . close ( ) ; } } }
Copies the file from a single state handle to the given path .
14,457
public static < OUT > void checkCollection ( Collection < OUT > elements , Class < OUT > viewedAs ) { for ( OUT elem : elements ) { if ( elem == null ) { throw new IllegalArgumentException ( "The collection contains a null element" ) ; } if ( ! viewedAs . isAssignableFrom ( elem . getClass ( ) ) ) { throw new IllegalArgumentException ( "The elements in the collection are not all subclasses of " + viewedAs . getCanonicalName ( ) ) ; } } }
Verifies that all elements in the collection are non - null and are of the given class or a subclass thereof .
14,458
public void initializeCache ( Object key ) throws Exception { this . sortedWindows = cachedSortedWindows . get ( key ) ; if ( sortedWindows == null ) { this . sortedWindows = new TreeSet < > ( ) ; Iterator < Map . Entry < W , W > > keyValues = mapping . iterator ( ) ; if ( keyValues != null ) { while ( keyValues . hasNext ( ) ) { Map . Entry < W , W > keyValue = keyValues . next ( ) ; this . sortedWindows . add ( keyValue . getKey ( ) ) ; } } cachedSortedWindows . put ( key , sortedWindows ) ; } }
Set current key context of this window set .
14,459
public final boolean isResolved ( ) { return getPathParameters ( ) . stream ( ) . filter ( MessageParameter :: isMandatory ) . allMatch ( MessageParameter :: isResolved ) && getQueryParameters ( ) . stream ( ) . filter ( MessageParameter :: isMandatory ) . allMatch ( MessageParameter :: isResolved ) ; }
Returns whether all mandatory parameters have been resolved .
14,460
public static Database createHiveDatabase ( String dbName , CatalogDatabase db ) { Map < String , String > props = db . getProperties ( ) ; return new Database ( dbName , db . getDescription ( ) . isPresent ( ) ? db . getDescription ( ) . get ( ) : null , null , props ) ; }
Creates a Hive database from CatalogDatabase .
14,461
private static int indexOfName ( List < UnresolvedReferenceExpression > inputFieldReferences , String targetName ) { int i ; for ( i = 0 ; i < inputFieldReferences . size ( ) ; ++ i ) { if ( inputFieldReferences . get ( i ) . getName ( ) . equals ( targetName ) ) { break ; } } return i == inputFieldReferences . size ( ) ? - 1 : i ; }
Find the index of targetName in the list . Return - 1 if not found .
14,462
private static boolean checkBegin ( BinaryString pattern , MemorySegment [ ] segments , int start , int len ) { int lenSub = pattern . getSizeInBytes ( ) ; return len >= lenSub && SegmentsUtil . equals ( pattern . getSegments ( ) , 0 , segments , start , lenSub ) ; }
Matches the beginning of each string to a pattern .
14,463
private static int indexMiddle ( BinaryString pattern , MemorySegment [ ] segments , int start , int len ) { return SegmentsUtil . find ( segments , start , len , pattern . getSegments ( ) , pattern . getOffset ( ) , pattern . getSizeInBytes ( ) ) ; }
Matches the middle of each string to its pattern .
14,464
public < C extends RpcGateway > C getSelfGateway ( Class < C > selfGatewayType ) { if ( selfGatewayType . isInstance ( rpcServer ) ) { @ SuppressWarnings ( "unchecked" ) C selfGateway = ( ( C ) rpcServer ) ; return selfGateway ; } else { throw new RuntimeException ( "RpcEndpoint does not implement the RpcGateway interface of type " + selfGatewayType + '.' ) ; } }
Returns a self gateway of the specified type which can be used to issue asynchronous calls against the RpcEndpoint .
14,465
public static void closeSafetyNetAndGuardedResourcesForThread ( ) { SafetyNetCloseableRegistry registry = REGISTRIES . get ( ) ; if ( null != registry ) { REGISTRIES . remove ( ) ; IOUtils . closeQuietly ( registry ) ; } }
Closes the safety net for a thread . This closes all remaining unclosed streams that were opened by safety - net - guarded file systems . After this method was called no streams can be opened any more from any FileSystem instance that was obtained while the thread was guarded by the safety net .
14,466
public void addHeuristicNetworkCost ( double cost ) { if ( cost <= 0 ) { throw new IllegalArgumentException ( "Heuristic costs must be positive." ) ; } this . heuristicNetworkCost += cost ; if ( this . heuristicNetworkCost < 0 ) { this . heuristicNetworkCost = Double . MAX_VALUE ; } }
Adds the heuristic costs for network to the current heuristic network costs for this Costs object .
14,467
public void addHeuristicDiskCost ( double cost ) { if ( cost <= 0 ) { throw new IllegalArgumentException ( "Heuristic costs must be positive." ) ; } this . heuristicDiskCost += cost ; if ( this . heuristicDiskCost < 0 ) { this . heuristicDiskCost = Double . MAX_VALUE ; } }
Adds the heuristic costs for disk to the current heuristic disk costs for this Costs object .
14,468
public void addHeuristicCpuCost ( double cost ) { if ( cost <= 0 ) { throw new IllegalArgumentException ( "Heuristic costs must be positive." ) ; } this . heuristicCpuCost += cost ; if ( this . heuristicCpuCost < 0 ) { this . heuristicCpuCost = Double . MAX_VALUE ; } }
Adds the given heuristic CPU cost to the current heuristic CPU cost for this Costs object .
14,469
public void subtractCosts ( Costs other ) { if ( this . networkCost != UNKNOWN && other . networkCost != UNKNOWN ) { this . networkCost -= other . networkCost ; if ( this . networkCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } } if ( this . diskCost != UNKNOWN && other . diskCost != UNKNOWN ) { this . diskCost -= other . diskCost ; if ( this . diskCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } } if ( this . cpuCost != UNKNOWN && other . cpuCost != UNKNOWN ) { this . cpuCost -= other . cpuCost ; if ( this . cpuCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } } this . heuristicNetworkCost -= other . heuristicNetworkCost ; if ( this . heuristicNetworkCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } this . heuristicDiskCost -= other . heuristicDiskCost ; if ( this . heuristicDiskCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } this . heuristicCpuCost -= other . heuristicCpuCost ; if ( this . heuristicCpuCost < 0 ) { throw new IllegalArgumentException ( "Cannot subtract more cost then there is." ) ; } }
Subtracts the given costs from these costs . If the given costs are unknown then these costs are remain unchanged .
14,470
public JoinOperator < I1 , I2 , OUT > withPartitioner ( Partitioner < ? > partitioner ) { if ( partitioner != null ) { keys1 . validateCustomPartitioner ( partitioner , null ) ; keys2 . validateCustomPartitioner ( partitioner , null ) ; } this . customPartitioner = getInput1 ( ) . clean ( partitioner ) ; return this ; }
Sets a custom partitioner for this join . The partitioner will be called on the join keys to determine the partition a key should be assigned to . The partitioner is evaluated on both join inputs in the same way .
14,471
public BinaryRow append ( LookupInfo info , BinaryRow value ) throws IOException { try { if ( numElements >= growthThreshold ) { growAndRehash ( ) ; lookup ( info . key ) ; } BinaryRow toAppend = hashSetMode ? reusedValue : value ; long pointerToAppended = recordArea . appendRecord ( info . key , toAppend ) ; bucketSegments . get ( info . bucketSegmentIndex ) . putLong ( info . bucketOffset , pointerToAppended ) ; bucketSegments . get ( info . bucketSegmentIndex ) . putInt ( info . bucketOffset + ELEMENT_POINT_LENGTH , info . keyHashCode ) ; numElements ++ ; recordArea . setReadPosition ( pointerToAppended ) ; recordArea . skipKey ( ) ; return recordArea . readValue ( reusedValue ) ; } catch ( EOFException e ) { numSpillFiles ++ ; spillInBytes += recordArea . segments . size ( ) * ( ( long ) segmentSize ) ; throw e ; } }
Append an value into the hash map s record area .
14,472
public void reset ( ) { int numBuckets = bucketSegments . size ( ) * numBucketsPerSegment ; this . log2NumBuckets = MathUtils . log2strict ( numBuckets ) ; this . numBucketsMask = ( 1 << MathUtils . log2strict ( numBuckets ) ) - 1 ; this . numBucketsMask2 = ( 1 << MathUtils . log2strict ( numBuckets >> 1 ) ) - 1 ; this . growthThreshold = ( int ) ( numBuckets * LOAD_FACTOR ) ; recordArea . reset ( ) ; resetBucketSegments ( bucketSegments ) ; numElements = 0 ; destructiveIterator = null ; LOG . info ( "reset BytesHashMap with record memory segments {}, {} in bytes, init allocating {} for bucket area." , freeMemorySegments . size ( ) , freeMemorySegments . size ( ) * segmentSize , bucketSegments . size ( ) ) ; }
reset the map s record and bucket area s memory segments for reusing .
14,473
public static int calculateHeapSize ( int memory , org . apache . flink . configuration . Configuration conf ) { float memoryCutoffRatio = conf . getFloat ( ResourceManagerOptions . CONTAINERIZED_HEAP_CUTOFF_RATIO ) ; int minCutoff = conf . getInteger ( ResourceManagerOptions . CONTAINERIZED_HEAP_CUTOFF_MIN ) ; if ( memoryCutoffRatio > 1 || memoryCutoffRatio < 0 ) { throw new IllegalArgumentException ( "The configuration value '" + ResourceManagerOptions . CONTAINERIZED_HEAP_CUTOFF_RATIO . key ( ) + "' must be between 0 and 1. Value given=" + memoryCutoffRatio ) ; } if ( minCutoff > memory ) { throw new IllegalArgumentException ( "The configuration value '" + ResourceManagerOptions . CONTAINERIZED_HEAP_CUTOFF_MIN . key ( ) + "' is higher (" + minCutoff + ") than the requested amount of memory " + memory ) ; } int heapLimit = ( int ) ( ( float ) memory * memoryCutoffRatio ) ; if ( heapLimit < minCutoff ) { heapLimit = minCutoff ; } return memory - heapLimit ; }
See documentation .
14,474
static Tuple2 < Path , LocalResource > setupLocalResource ( FileSystem fs , String appId , Path localSrcPath , Path homedir , String relativeTargetPath ) throws IOException { File localFile = new File ( localSrcPath . toUri ( ) . getPath ( ) ) ; if ( localFile . isDirectory ( ) ) { throw new IllegalArgumentException ( "File to copy must not be a directory: " + localSrcPath ) ; } String suffix = ".flink/" + appId + ( relativeTargetPath . isEmpty ( ) ? "" : "/" + relativeTargetPath ) + "/" + localSrcPath . getName ( ) ; Path dst = new Path ( homedir , suffix ) ; LOG . debug ( "Copying from {} to {}" , localSrcPath , dst ) ; fs . copyFromLocalFile ( false , true , localSrcPath , dst ) ; fs . setTimes ( dst , localFile . lastModified ( ) , - 1 ) ; LocalResource resource = registerLocalResource ( dst , localFile . length ( ) , localFile . lastModified ( ) ) ; return Tuple2 . of ( dst , resource ) ; }
Copy a local file to a remote file system .
14,475
private static LocalResource registerLocalResource ( Path remoteRsrcPath , long resourceSize , long resourceModificationTime ) { LocalResource localResource = Records . newRecord ( LocalResource . class ) ; localResource . setResource ( ConverterUtils . getYarnUrlFromURI ( remoteRsrcPath . toUri ( ) ) ) ; localResource . setSize ( resourceSize ) ; localResource . setTimestamp ( resourceModificationTime ) ; localResource . setType ( LocalResourceType . FILE ) ; localResource . setVisibility ( LocalResourceVisibility . APPLICATION ) ; return localResource ; }
Creates a YARN resource for the remote object at the given location .
14,476
private static void obtainTokenForHBase ( Credentials credentials , Configuration conf ) throws IOException { if ( UserGroupInformation . isSecurityEnabled ( ) ) { LOG . info ( "Attempting to obtain Kerberos security token for HBase" ) ; try { Class . forName ( "org.apache.hadoop.hbase.HBaseConfiguration" ) . getMethod ( "addHbaseResources" , Configuration . class ) . invoke ( null , conf ) ; LOG . info ( "HBase security setting: {}" , conf . get ( "hbase.security.authentication" ) ) ; if ( ! "kerberos" . equals ( conf . get ( "hbase.security.authentication" ) ) ) { LOG . info ( "HBase has not been configured to use Kerberos." ) ; return ; } LOG . info ( "Obtaining Kerberos security token for HBase" ) ; Token < ? > token = ( Token < ? > ) Class . forName ( "org.apache.hadoop.hbase.security.token.TokenUtil" ) . getMethod ( "obtainToken" , Configuration . class ) . invoke ( null , conf ) ; if ( token == null ) { LOG . error ( "No Kerberos security token for HBase available" ) ; return ; } credentials . addToken ( token . getService ( ) , token ) ; LOG . info ( "Added HBase Kerberos security token to credentials." ) ; } catch ( ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InvocationTargetException e ) { LOG . info ( "HBase is not available (not packaged with this application): {} : \"{}\"." , e . getClass ( ) . getSimpleName ( ) , e . getMessage ( ) ) ; } } }
Obtain Kerberos security token for HBase .
14,477
public static Map < String , String > getEnvironmentVariables ( String envPrefix , org . apache . flink . configuration . Configuration flinkConfiguration ) { Map < String , String > result = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : flinkConfiguration . toMap ( ) . entrySet ( ) ) { if ( entry . getKey ( ) . startsWith ( envPrefix ) && entry . getKey ( ) . length ( ) > envPrefix . length ( ) ) { String key = entry . getKey ( ) . substring ( envPrefix . length ( ) ) ; result . put ( key , entry . getValue ( ) ) ; } } return result ; }
Method to extract environment variables from the flinkConfiguration based on the given prefix String .
14,478
static void require ( boolean condition , String message , Object ... values ) { if ( ! condition ) { throw new RuntimeException ( String . format ( message , values ) ) ; } }
Validates a condition throwing a RuntimeException if the condition is violated .
14,479
public QueryScopeInfo getQueryServiceMetricInfo ( CharacterFilter filter ) { if ( queryServiceScopeInfo == null ) { queryServiceScopeInfo = createQueryServiceMetricInfo ( filter ) ; } return queryServiceScopeInfo ; }
Returns the metric query service scope for this group .
14,480
protected void addMetric ( String name , Metric metric ) { if ( metric == null ) { LOG . warn ( "Ignoring attempted registration of a metric due to being null for name {}." , name ) ; return ; } synchronized ( this ) { if ( ! closed ) { Metric prior = metrics . put ( name , metric ) ; if ( prior == null ) { if ( groups . containsKey ( name ) ) { LOG . warn ( "Name collision: Adding a metric with the same name as a metric subgroup: '" + name + "'. Metric might not get properly reported. " + Arrays . toString ( scopeComponents ) ) ; } registry . register ( metric , name , this ) ; } else { metrics . put ( name , prior ) ; LOG . warn ( "Name collision: Group already contains a Metric with the name '" + name + "'. Metric will not be reported." + Arrays . toString ( scopeComponents ) ) ; } } } }
Adds the given metric to the group and registers it at the registry if the group is not yet closed and if no metric with the same name has been registered before .
14,481
private static Calendar valueAsCalendar ( Object value ) { Date date = ( Date ) value ; Calendar cal = Calendar . getInstance ( ) ; cal . setTime ( date ) ; return cal ; }
Convert a Date value to a Calendar . Calcite s fromCalendarField functions use the Calendar . get methods so the raw values of the individual fields are preserved when converted to the String formats .
14,482
public static boolean isFunctionOfType ( Expression expr , FunctionDefinition . Type type ) { return expr instanceof CallExpression && ( ( CallExpression ) expr ) . getFunctionDefinition ( ) . getType ( ) == type ; }
Checks if the expression is a function call of given type .
14,483
private static String stripHostname ( final String originalHostname ) { final int index = originalHostname . indexOf ( DOMAIN_SEPARATOR ) ; if ( index == - 1 ) { return originalHostname ; } final Matcher matcher = IPV4_PATTERN . matcher ( originalHostname ) ; if ( matcher . matches ( ) ) { return originalHostname ; } if ( index == 0 ) { throw new IllegalStateException ( "Hostname " + originalHostname + " starts with a " + DOMAIN_SEPARATOR ) ; } return originalHostname . substring ( 0 , index ) ; }
Looks for a domain suffix in a FQDN and strips it if present .
14,484
private void onBarrier ( int channelIndex ) throws IOException { if ( ! blockedChannels [ channelIndex ] ) { blockedChannels [ channelIndex ] = true ; numBarriersReceived ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "{}: Received barrier from channel {}." , inputGate . getOwningTaskName ( ) , channelIndex ) ; } } else { throw new IOException ( "Stream corrupt: Repeated barrier for same checkpoint on input " + channelIndex ) ; } }
Blocks the given channel index from which a barrier has been received .
14,485
private void releaseBlocksAndResetBarriers ( ) throws IOException { LOG . debug ( "{}: End of stream alignment, feeding buffered data back." , inputGate . getOwningTaskName ( ) ) ; for ( int i = 0 ; i < blockedChannels . length ; i ++ ) { blockedChannels [ i ] = false ; } if ( currentBuffered == null ) { currentBuffered = bufferBlocker . rollOverReusingResources ( ) ; if ( currentBuffered != null ) { currentBuffered . open ( ) ; } } else { LOG . debug ( "{}: Checkpoint skipped via buffered data:" + "Pushing back current alignment buffers and feeding back new alignment data first." , inputGate . getOwningTaskName ( ) ) ; BufferOrEventSequence bufferedNow = bufferBlocker . rollOverWithoutReusingResources ( ) ; if ( bufferedNow != null ) { bufferedNow . open ( ) ; queuedBuffered . addFirst ( currentBuffered ) ; numQueuedBytes += currentBuffered . size ( ) ; currentBuffered = bufferedNow ; } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "{}: Size of buffered data: {} bytes" , inputGate . getOwningTaskName ( ) , currentBuffered == null ? 0L : currentBuffered . size ( ) ) ; } numBarriersReceived = 0 ; if ( startOfAlignmentTimestamp > 0 ) { latestAlignmentDurationNanos = System . nanoTime ( ) - startOfAlignmentTimestamp ; startOfAlignmentTimestamp = 0 ; } }
Releases the blocks on all channels and resets the barrier count . Makes sure the just written data is the next to be consumed .
14,486
public static void initDefaultsFromConfiguration ( Configuration configuration ) { final boolean overwrite = configuration . getBoolean ( CoreOptions . FILESYTEM_DEFAULT_OVERRIDE ) ; DEFAULT_WRITE_MODE = overwrite ? WriteMode . OVERWRITE : WriteMode . NO_OVERWRITE ; final boolean alwaysCreateDirectory = configuration . getBoolean ( CoreOptions . FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY ) ; DEFAULT_OUTPUT_DIRECTORY_MODE = alwaysCreateDirectory ? OutputDirectoryMode . ALWAYS : OutputDirectoryMode . PARONLY ; }
Initialize defaults for output format . Needs to be a static method because it is configured for local cluster execution .
14,487
public void initializeGlobal ( int parallelism ) throws IOException { final Path path = getOutputFilePath ( ) ; final FileSystem fs = path . getFileSystem ( ) ; if ( fs . isDistributedFS ( ) ) { final WriteMode writeMode = getWriteMode ( ) ; final OutputDirectoryMode outDirMode = getOutputDirectoryMode ( ) ; if ( parallelism == 1 && outDirMode == OutputDirectoryMode . PARONLY ) { if ( ! fs . initOutPathDistFS ( path , writeMode , false ) ) { throw new IOException ( "Output path could not be initialized." ) ; } } else { if ( ! fs . initOutPathDistFS ( path , writeMode , true ) ) { throw new IOException ( "Output directory could not be created." ) ; } } } }
Initialization of the distributed file system if it is used .
14,488
public static < T extends Throwable > Optional < T > findThrowable ( Throwable throwable , Class < T > searchType ) { if ( throwable == null || searchType == null ) { return Optional . empty ( ) ; } Throwable t = throwable ; while ( t != null ) { if ( searchType . isAssignableFrom ( t . getClass ( ) ) ) { return Optional . of ( searchType . cast ( t ) ) ; } else { t = t . getCause ( ) ; } } return Optional . empty ( ) ; }
Checks whether a throwable chain contains a specific type of exception and returns it .
14,489
public static Optional < Throwable > findThrowable ( Throwable throwable , Predicate < Throwable > predicate ) { if ( throwable == null || predicate == null ) { return Optional . empty ( ) ; } Throwable t = throwable ; while ( t != null ) { if ( predicate . test ( t ) ) { return Optional . of ( t ) ; } else { t = t . getCause ( ) ; } } return Optional . empty ( ) ; }
Checks whether a throwable chain contains an exception matching a predicate and returns it .
14,490
public static Optional < Throwable > findThrowableWithMessage ( Throwable throwable , String searchMessage ) { if ( throwable == null || searchMessage == null ) { return Optional . empty ( ) ; } Throwable t = throwable ; while ( t != null ) { if ( t . getMessage ( ) != null && t . getMessage ( ) . contains ( searchMessage ) ) { return Optional . of ( t ) ; } else { t = t . getCause ( ) ; } } return Optional . empty ( ) ; }
Checks whether a throwable chain contains a specific error message and returns the corresponding throwable .
14,491
public static int optimalNumOfBits ( long inputEntries , double fpp ) { int numBits = ( int ) ( - inputEntries * Math . log ( fpp ) / ( Math . log ( 2 ) * Math . log ( 2 ) ) ) ; return numBits ; }
Compute optimal bits number with given input entries and expected false positive probability .
14,492
static int optimalNumOfHashFunctions ( long expectEntries , long bitSize ) { return Math . max ( 1 , ( int ) Math . round ( ( double ) bitSize / expectEntries * Math . log ( 2 ) ) ) ; }
compute the optimal hash function number with given input entries and bits size which would make the false positive probability lowest .
14,493
@ SuppressWarnings ( "unchecked" ) public List < RecordWriter < SerializationDelegate < T > > > getWriters ( ) { return Collections . unmodifiableList ( Arrays . asList ( this . writers ) ) ; }
List of writers that are associated with this output collector
14,494
public boolean tryAssignPayload ( Payload payload ) { Preconditions . checkNotNull ( payload ) ; if ( isCanceled ( ) ) { return false ; } if ( ! PAYLOAD_UPDATER . compareAndSet ( this , null , payload ) ) { return false ; } if ( isCanceled ( ) ) { this . payload = null ; return false ; } return true ; }
Atomically sets the executed vertex if no vertex has been assigned to this slot so far .
14,495
public static ScopeFormats fromConfig ( Configuration config ) { String jmFormat = config . getString ( MetricOptions . SCOPE_NAMING_JM ) ; String jmJobFormat = config . getString ( MetricOptions . SCOPE_NAMING_JM_JOB ) ; String tmFormat = config . getString ( MetricOptions . SCOPE_NAMING_TM ) ; String tmJobFormat = config . getString ( MetricOptions . SCOPE_NAMING_TM_JOB ) ; String taskFormat = config . getString ( MetricOptions . SCOPE_NAMING_TASK ) ; String operatorFormat = config . getString ( MetricOptions . SCOPE_NAMING_OPERATOR ) ; return new ScopeFormats ( jmFormat , jmJobFormat , tmFormat , tmJobFormat , taskFormat , operatorFormat ) ; }
Creates the scope formats as defined in the given configuration .
14,496
private static void initDefaultsFromConfiguration ( Configuration configuration ) { final long to = configuration . getLong ( ConfigConstants . FS_STREAM_OPENING_TIMEOUT_KEY , ConfigConstants . DEFAULT_FS_STREAM_OPENING_TIMEOUT ) ; if ( to < 0 ) { LOG . error ( "Invalid timeout value for filesystem stream opening: " + to + ". Using default value of " + ConfigConstants . DEFAULT_FS_STREAM_OPENING_TIMEOUT ) ; DEFAULT_OPENING_TIMEOUT = ConfigConstants . DEFAULT_FS_STREAM_OPENING_TIMEOUT ; } else if ( to == 0 ) { DEFAULT_OPENING_TIMEOUT = 300000 ; } else { DEFAULT_OPENING_TIMEOUT = to ; } }
Initialize defaults for input format . Needs to be a static method because it is configured for local cluster execution .
14,497
public Path [ ] getFilePaths ( ) { if ( supportsMultiPaths ( ) ) { if ( this . filePaths == null ) { return new Path [ 0 ] ; } return this . filePaths ; } else { if ( this . filePath == null ) { return new Path [ 0 ] ; } return new Path [ ] { filePath } ; } }
Returns the paths of all files to be read by the FileInputFormat .
14,498
private long addFilesInDir ( Path path , List < FileStatus > files , boolean logExcludedFiles ) throws IOException { final FileSystem fs = path . getFileSystem ( ) ; long length = 0 ; for ( FileStatus dir : fs . listStatus ( path ) ) { if ( dir . isDir ( ) ) { if ( acceptFile ( dir ) && enumerateNestedFiles ) { length += addFilesInDir ( dir . getPath ( ) , files , logExcludedFiles ) ; } else { if ( logExcludedFiles && LOG . isDebugEnabled ( ) ) { LOG . debug ( "Directory " + dir . getPath ( ) . toString ( ) + " did not pass the file-filter and is excluded." ) ; } } } else { if ( acceptFile ( dir ) ) { files . add ( dir ) ; length += dir . getLen ( ) ; testForUnsplittable ( dir ) ; } else { if ( logExcludedFiles && LOG . isDebugEnabled ( ) ) { LOG . debug ( "Directory " + dir . getPath ( ) . toString ( ) + " did not pass the file-filter and is excluded." ) ; } } } } return length ; }
Enumerate all files in the directory and recursive if enumerateNestedFiles is true .
14,499
public synchronized void addOpenChannels ( List < FileIOChannel > toOpen ) { checkArgument ( ! closed ) ; for ( FileIOChannel channel : toOpen ) { openChannels . add ( channel ) ; channels . remove ( channel . getChannelID ( ) ) ; } }
Open File channels .