idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,800 | protected boolean registerAllRequestsProcessedListener ( NotificationListener listener ) throws IOException { checkNotNull ( listener ) ; synchronized ( listenerLock ) { if ( allRequestsProcessedListener == null ) { if ( requestsNotReturned . get ( ) == 0 ) { return false ; } allRequestsProcessedListener = listener ; return true ; } } throw new IllegalStateException ( "Already subscribed." ) ; } | Registers a listener to be notified when all outstanding requests have been processed . |
14,801 | public DataSet < Vertex < K , VV > > createResult ( ) { if ( this . initialVertices == null ) { throw new IllegalStateException ( "The input data set has not been set." ) ; } TypeInformation < K > keyType = ( ( TupleTypeInfo < ? > ) initialVertices . getType ( ) ) . getTypeAt ( 0 ) ; TypeInformation < Tuple2 < K , Message > > messageTypeInfo = new TupleTypeInfo < > ( keyType , messageType ) ; Graph < K , VV , EV > graph = Graph . fromDataSet ( initialVertices , edgesWithValue , initialVertices . getExecutionEnvironment ( ) ) ; DataSet < LongValue > numberOfVertices = null ; if ( this . configuration != null && this . configuration . isOptNumVertices ( ) ) { try { numberOfVertices = GraphUtils . count ( this . initialVertices ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } } if ( this . configuration != null ) { scatterFunction . setDirection ( this . configuration . getDirection ( ) ) ; } else { scatterFunction . setDirection ( EdgeDirection . OUT ) ; } EdgeDirection messagingDirection = scatterFunction . getDirection ( ) ; if ( this . configuration != null && this . configuration . isOptDegrees ( ) ) { return createResultVerticesWithDegrees ( graph , messagingDirection , messageTypeInfo , numberOfVertices ) ; } else { return createResultSimpleVertex ( messagingDirection , messageTypeInfo , numberOfVertices ) ; } } | Creates the operator that represents this scatter - gather graph computation . |
14,802 | private CoGroupOperator < ? , ? , Tuple2 < K , Message > > buildScatterFunctionVerticesWithDegrees ( DeltaIteration < Vertex < K , Tuple3 < VV , LongValue , LongValue > > , Vertex < K , Tuple3 < VV , LongValue , LongValue > > > iteration , TypeInformation < Tuple2 < K , Message > > messageTypeInfo , int whereArg , int equalToArg , DataSet < LongValue > numberOfVertices ) { CoGroupOperator < ? , ? , Tuple2 < K , Message > > messages ; ScatterUdfWithEdgeValues < K , Tuple3 < VV , LongValue , LongValue > , VV , Message , EV > messenger = new ScatterUdfWithEVsVVWithDegrees < > ( scatterFunction , messageTypeInfo ) ; messages = this . edgesWithValue . coGroup ( iteration . getWorkset ( ) ) . where ( whereArg ) . equalTo ( equalToArg ) . with ( messenger ) ; messages = messages . name ( "Messaging" ) ; if ( this . configuration != null ) { for ( Tuple2 < String , DataSet < ? > > e : this . configuration . getScatterBcastVars ( ) ) { messages = messages . withBroadcastSet ( e . f1 , e . f0 ) ; } if ( this . configuration . isOptNumVertices ( ) ) { messages = messages . withBroadcastSet ( numberOfVertices , "number of vertices" ) ; } } return messages ; } | Method that builds the scatter function using a coGroup operator for a vertex containing degree information . It afterwards configures the function with a custom name and broadcast variables . |
14,803 | private DataSet < Vertex < K , VV > > createResultSimpleVertex ( EdgeDirection messagingDirection , TypeInformation < Tuple2 < K , Message > > messageTypeInfo , DataSet < LongValue > numberOfVertices ) { DataSet < Tuple2 < K , Message > > messages ; TypeInformation < Vertex < K , VV > > vertexTypes = initialVertices . getType ( ) ; final DeltaIteration < Vertex < K , VV > , Vertex < K , VV > > iteration = initialVertices . iterateDelta ( initialVertices , this . maximumNumberOfIterations , 0 ) ; setUpIteration ( iteration ) ; switch ( messagingDirection ) { case IN : messages = buildScatterFunction ( iteration , messageTypeInfo , 1 , 0 , numberOfVertices ) ; break ; case OUT : messages = buildScatterFunction ( iteration , messageTypeInfo , 0 , 0 , numberOfVertices ) ; break ; case ALL : messages = buildScatterFunction ( iteration , messageTypeInfo , 1 , 0 , numberOfVertices ) . union ( buildScatterFunction ( iteration , messageTypeInfo , 0 , 0 , numberOfVertices ) ) ; break ; default : throw new IllegalArgumentException ( "Illegal edge direction" ) ; } GatherUdf < K , VV , Message > updateUdf = new GatherUdfSimpleVV < > ( gatherFunction , vertexTypes ) ; CoGroupOperator < ? , ? , Vertex < K , VV > > updates = messages . coGroup ( iteration . getSolutionSet ( ) ) . where ( 0 ) . equalTo ( 0 ) . with ( updateUdf ) ; if ( this . configuration != null && this . configuration . isOptNumVertices ( ) ) { updates = updates . withBroadcastSet ( numberOfVertices , "number of vertices" ) ; } configureUpdateFunction ( updates ) ; return iteration . closeWith ( updates , updates ) ; } | Creates the operator that represents this scatter - gather graph computation for a simple vertex . |
14,804 | public O setParallelism ( int parallelism ) { Preconditions . checkArgument ( parallelism > 0 || parallelism == ExecutionConfig . PARALLELISM_DEFAULT , "The parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default)." ) ; this . parallelism = parallelism ; @ SuppressWarnings ( "unchecked" ) O returnType = ( O ) this ; return returnType ; } | Sets the parallelism for this operator . The parallelism must be 1 or more . |
14,805 | private O setResources ( ResourceSpec minResources , ResourceSpec preferredResources ) { Preconditions . checkNotNull ( minResources , "The min resources must be not null." ) ; Preconditions . checkNotNull ( preferredResources , "The preferred resources must be not null." ) ; Preconditions . checkArgument ( minResources . isValid ( ) && preferredResources . isValid ( ) && minResources . lessThanOrEqual ( preferredResources ) , "The values in resources must be not less than 0 and the preferred resources must be greater than the min resources." ) ; this . minResources = minResources ; this . preferredResources = preferredResources ; @ SuppressWarnings ( "unchecked" ) O returnType = ( O ) this ; return returnType ; } | Sets the minimum and preferred resources for this operator . This overrides the default resources . The lower and upper resource limits will be considered in dynamic resource resize feature for future plan . |
14,806 | private O setResources ( ResourceSpec resources ) { Preconditions . checkNotNull ( resources , "The resources must be not null." ) ; Preconditions . checkArgument ( resources . isValid ( ) , "The values in resources must be not less than 0." ) ; this . minResources = resources ; this . preferredResources = resources ; @ SuppressWarnings ( "unchecked" ) O returnType = ( O ) this ; return returnType ; } | Sets the resources for this operator . This overrides the default minimum and preferred resources . |
14,807 | public GraphAlgorithmWrappingBase < K , VV , EV , R > setParallelism ( int parallelism ) { Preconditions . checkArgument ( parallelism > 0 || parallelism == PARALLELISM_DEFAULT , "The parallelism must be at least one, or ExecutionConfig.PARALLELISM_DEFAULT (use system default)." ) ; this . parallelism = parallelism ; return this ; } | Set the parallelism for this algorithm s operators . This parameter is necessary because processing a small amount of data with high operator parallelism is slow and wasteful with memory and buffers . |
14,808 | public int getNumberOfAvailableSlotsForGroup ( AbstractID groupId ) { synchronized ( lock ) { Map < ResourceID , List < SharedSlot > > available = availableSlotsPerJid . get ( groupId ) ; if ( available != null ) { Set < SharedSlot > set = new HashSet < SharedSlot > ( ) ; for ( List < SharedSlot > list : available . values ( ) ) { for ( SharedSlot slot : list ) { set . add ( slot ) ; } } return set . size ( ) ; } else { return allSlots . size ( ) ; } } } | Gets the number of shared slots into which the given group can place subtasks or nested task groups . |
14,809 | public SimpleSlot getSlotForTask ( CoLocationConstraint constraint , Iterable < TaskManagerLocation > locationPreferences ) { synchronized ( lock ) { if ( constraint . isAssignedAndAlive ( ) ) { final SharedSlot shared = constraint . getSharedSlot ( ) ; SimpleSlot subslot = shared . allocateSubSlot ( null ) ; subslot . setLocality ( Locality . LOCAL ) ; return subslot ; } else if ( constraint . isAssigned ( ) ) { SharedSlot previous = constraint . getSharedSlot ( ) ; if ( previous == null ) { throw new IllegalStateException ( "Bug: Found assigned co-location constraint without a slot." ) ; } TaskManagerLocation location = previous . getTaskManagerLocation ( ) ; Tuple2 < SharedSlot , Locality > p = getSharedSlotForTask ( constraint . getGroupId ( ) , Collections . singleton ( location ) , true ) ; if ( p == null ) { return null ; } else { SharedSlot newSharedSlot = p . f0 ; SharedSlot constraintGroupSlot = newSharedSlot . allocateSharedSlot ( constraint . getGroupId ( ) ) ; if ( constraintGroupSlot != null ) { constraint . setSharedSlot ( constraintGroupSlot ) ; SimpleSlot subSlot = constraintGroupSlot . allocateSubSlot ( null ) ; subSlot . setLocality ( Locality . LOCAL ) ; return subSlot ; } else { return null ; } } } else { Tuple2 < SharedSlot , Locality > p = getSharedSlotForTask ( constraint . getGroupId ( ) , locationPreferences , false ) ; if ( p == null ) { return null ; } else { final SharedSlot availableShared = p . f0 ; final Locality l = p . f1 ; SharedSlot constraintGroupSlot = availableShared . allocateSharedSlot ( constraint . getGroupId ( ) ) ; constraint . setSharedSlot ( constraintGroupSlot ) ; SimpleSlot sub = constraintGroupSlot . allocateSubSlot ( null ) ; sub . setLocality ( l ) ; return sub ; } } } } | Gets a slot for a task that has a co - location constraint . This method tries to grab a slot form the location - constraint s shared slot . If that slot has not been initialized then the method tries to grab another slot that is available for the location - constraint - group . |
14,810 | void releaseSimpleSlot ( SimpleSlot simpleSlot ) { synchronized ( lock ) { if ( simpleSlot . markCancelled ( ) ) { if ( simpleSlot . isAlive ( ) ) { throw new IllegalStateException ( "slot is still alive" ) ; } if ( simpleSlot . markReleased ( ) ) { LOG . debug ( "Release simple slot {}." , simpleSlot ) ; AbstractID groupID = simpleSlot . getGroupID ( ) ; SharedSlot parent = simpleSlot . getParent ( ) ; if ( groupID != null && ! allSlots . contains ( parent ) ) { throw new IllegalArgumentException ( "Slot was not associated with this SlotSharingGroup before." ) ; } int parentRemaining = parent . removeDisposedChildSlot ( simpleSlot ) ; if ( parentRemaining > 0 ) { if ( groupID != null ) { Map < ResourceID , List < SharedSlot > > slotsForJid = availableSlotsPerJid . get ( groupID ) ; if ( slotsForJid == null ) { throw new IllegalStateException ( "Trying to return a slot for group " + groupID + " when available slots indicated that all slots were available." ) ; } putIntoMultiMap ( slotsForJid , parent . getTaskManagerID ( ) , parent ) ; } } else { parent . markCancelled ( ) ; internalDisposeEmptySharedSlot ( parent ) ; } } } } } | Releases the simple slot from the assignment group . |
14,811 | public int [ ] toArray ( ) { int [ ] a = new int [ this . collection . size ( ) ] ; int i = 0 ; for ( int col : this . collection ) { a [ i ++ ] = col ; } return a ; } | Transforms the field set into an array of field IDs . Whether the IDs are ordered or unordered depends on the specific subclass of the field set . |
14,812 | public AdamicAdar < K , VV , EV > setMinimumScore ( float score ) { Preconditions . checkArgument ( score >= 0 , "Minimum score must be non-negative" ) ; this . minimumScore = score ; return this ; } | Filter out Adamic - Adar scores less than the given minimum . |
14,813 | public AdamicAdar < K , VV , EV > setMinimumRatio ( float ratio ) { Preconditions . checkArgument ( ratio >= 0 , "Minimum ratio must be non-negative" ) ; this . minimumRatio = ratio ; return this ; } | Filter out Adamic - Adar scores less than the given ratio times the average score . |
14,814 | public Schema schema ( TableSchema schema ) { tableSchema . clear ( ) ; lastField = null ; for ( int i = 0 ; i < schema . getFieldCount ( ) ; i ++ ) { field ( schema . getFieldName ( i ) . get ( ) , schema . getFieldType ( i ) . get ( ) ) ; } return this ; } | Sets the schema with field names and the types . Required . |
14,815 | public Schema field ( String fieldName , TypeInformation < ? > fieldType ) { field ( fieldName , TypeStringUtils . writeTypeInfo ( fieldType ) ) ; return this ; } | Adds a field with the field name and the type information . Required . This method can be called multiple times . The call order of this method defines also the order of the fields in a row . |
14,816 | public Schema field ( String fieldName , String fieldType ) { if ( tableSchema . containsKey ( fieldName ) ) { throw new ValidationException ( "Duplicate field name $fieldName." ) ; } LinkedHashMap < String , String > fieldProperties = new LinkedHashMap < > ( ) ; fieldProperties . put ( SCHEMA_TYPE , fieldType ) ; tableSchema . put ( fieldName , fieldProperties ) ; lastField = fieldName ; return this ; } | Adds a field with the field name and the type string . Required . This method can be called multiple times . The call order of this method defines also the order of the fields in a row . |
14,817 | public Schema from ( String originFieldName ) { if ( lastField == null ) { throw new ValidationException ( "No field previously defined. Use field() before." ) ; } tableSchema . get ( lastField ) . put ( SCHEMA_FROM , originFieldName ) ; lastField = null ; return this ; } | Specifies the origin of the previously defined field . The origin field is defined by a connector or format . |
14,818 | public Schema proctime ( ) { if ( lastField == null ) { throw new ValidationException ( "No field defined previously. Use field() before." ) ; } tableSchema . get ( lastField ) . put ( SCHEMA_PROCTIME , "true" ) ; lastField = null ; return this ; } | Specifies the previously defined field as a processing - time attribute . |
14,819 | public Schema rowtime ( Rowtime rowtime ) { if ( lastField == null ) { throw new ValidationException ( "No field defined previously. Use field() before." ) ; } tableSchema . get ( lastField ) . putAll ( rowtime . toProperties ( ) ) ; lastField = null ; return this ; } | Specifies the previously defined field as an event - time attribute . |
14,820 | public void onComplete ( final Consumer < StreamElementQueueEntry < T > > completeFunction , Executor executor ) { final StreamElementQueueEntry < T > thisReference = this ; getFuture ( ) . whenCompleteAsync ( ( value , throwable ) -> completeFunction . accept ( thisReference ) , executor ) ; } | Register the given complete function to be called once this queue entry has been completed . |
14,821 | public static String formatSystemProperties ( Configuration jvmArgs ) { StringBuilder sb = new StringBuilder ( ) ; for ( Map . Entry < String , String > entry : jvmArgs . toMap ( ) . entrySet ( ) ) { if ( sb . length ( ) > 0 ) { sb . append ( " " ) ; } boolean quoted = entry . getValue ( ) . contains ( " " ) ; if ( quoted ) { sb . append ( "\"" ) ; } sb . append ( "-D" ) . append ( entry . getKey ( ) ) . append ( '=' ) . append ( entry . getValue ( ) ) ; if ( quoted ) { sb . append ( "\"" ) ; } } return sb . toString ( ) ; } | Format the system properties as a shell - compatible command - line argument . |
14,822 | public void registerTimeout ( final K key , final long delay , final TimeUnit unit ) { Preconditions . checkState ( timeoutListener != null , "The " + getClass ( ) . getSimpleName ( ) + " has not been started." ) ; if ( timeouts . containsKey ( key ) ) { unregisterTimeout ( key ) ; } timeouts . put ( key , new Timeout < > ( timeoutListener , key , delay , unit , scheduledExecutorService ) ) ; } | Register a timeout for the given key which shall occur in the given delay . |
14,823 | public void unregisterTimeout ( K key ) { Timeout < K > timeout = timeouts . remove ( key ) ; if ( timeout != null ) { timeout . cancel ( ) ; } } | Unregister the timeout for the given key . |
14,824 | protected void unregisterAllTimeouts ( ) { for ( Timeout < K > timeout : timeouts . values ( ) ) { timeout . cancel ( ) ; } timeouts . clear ( ) ; } | Unregister all timeouts . |
14,825 | public static MemorySegment allocateUnpooledOffHeapMemory ( int size , Object owner ) { ByteBuffer memory = ByteBuffer . allocateDirect ( size ) ; return wrapPooledOffHeapMemory ( memory , owner ) ; } | Allocates some unpooled off - heap memory and creates a new memory segment that represents that memory . |
14,826 | public void writeBlock ( Buffer buffer ) throws IOException { try { addRequest ( new BufferWriteRequest ( this , buffer ) ) ; } catch ( Throwable e ) { buffer . recycleBuffer ( ) ; ExceptionUtils . rethrowIOException ( e ) ; } } | Writes the given block asynchronously . |
14,827 | public void setNumFields ( final int numFields ) { final int oldNumFields = this . numFields ; if ( numFields > oldNumFields ) { makeSpace ( numFields ) ; for ( int i = oldNumFields ; i < numFields ; i ++ ) { this . offsets [ i ] = NULL_INDICATOR_OFFSET ; } markModified ( oldNumFields ) ; } else { markModified ( numFields ) ; } this . numFields = numFields ; } | Sets the number of fields in the record . If the new number of fields is longer than the current number of fields then null fields are appended . If the new number of fields is smaller than the current number of fields then the last fields are truncated . |
14,828 | public void makeSpace ( int numFields ) { final int oldNumFields = this . numFields ; if ( this . offsets == null ) { this . offsets = new int [ numFields ] ; } else if ( this . offsets . length < numFields ) { int [ ] newOffs = new int [ Math . max ( numFields + 1 , oldNumFields << 1 ) ] ; System . arraycopy ( this . offsets , 0 , newOffs , 0 , oldNumFields ) ; this . offsets = newOffs ; } if ( this . lengths == null ) { this . lengths = new int [ numFields ] ; } else if ( this . lengths . length < numFields ) { int [ ] newLens = new int [ Math . max ( numFields + 1 , oldNumFields << 1 ) ] ; System . arraycopy ( this . lengths , 0 , newLens , 0 , oldNumFields ) ; this . lengths = newLens ; } if ( this . readFields == null ) { this . readFields = new Value [ numFields ] ; } else if ( this . readFields . length < numFields ) { Value [ ] newFields = new Value [ Math . max ( numFields + 1 , oldNumFields << 1 ) ] ; System . arraycopy ( this . readFields , 0 , newFields , 0 , oldNumFields ) ; this . readFields = newFields ; } if ( this . writeFields == null ) { this . writeFields = new Value [ numFields ] ; } else if ( this . writeFields . length < numFields ) { Value [ ] newFields = new Value [ Math . max ( numFields + 1 , oldNumFields << 1 ) ] ; System . arraycopy ( this . writeFields , 0 , newFields , 0 , oldNumFields ) ; this . writeFields = newFields ; } } | Reserves space for at least the given number of fields in the internal arrays . |
14,829 | @ SuppressWarnings ( "unchecked" ) public < T extends Value > T getField ( final int fieldNum , final Class < T > type ) { if ( fieldNum < 0 || fieldNum >= this . numFields ) { throw new IndexOutOfBoundsException ( fieldNum + " for range [0.." + ( this . numFields - 1 ) + "]" ) ; } final int offset = this . offsets [ fieldNum ] ; if ( offset == NULL_INDICATOR_OFFSET ) { return null ; } else if ( offset == MODIFIED_INDICATOR_OFFSET ) { return ( T ) this . writeFields [ fieldNum ] ; } final int limit = offset + this . lengths [ fieldNum ] ; final Value oldField = this . readFields [ fieldNum ] ; final T field ; if ( oldField != null && oldField . getClass ( ) == type ) { field = ( T ) oldField ; } else { field = InstantiationUtil . instantiate ( type , Value . class ) ; this . readFields [ fieldNum ] = field ; } deserialize ( field , offset , limit , fieldNum ) ; return field ; } | Gets the field at the given position from the record . This method checks internally if this instance of the record has previously returned a value for this field . If so it reuses the object if not it creates one from the supplied class . |
14,830 | public boolean getFieldInto ( int fieldNum , Value target ) { if ( fieldNum < 0 || fieldNum >= this . numFields ) { throw new IndexOutOfBoundsException ( ) ; } int offset = this . offsets [ fieldNum ] ; if ( offset == NULL_INDICATOR_OFFSET ) { return false ; } else if ( offset == MODIFIED_INDICATOR_OFFSET ) { updateBinaryRepresenation ( ) ; offset = this . offsets [ fieldNum ] ; } final int limit = offset + this . lengths [ fieldNum ] ; deserialize ( target , offset , limit , fieldNum ) ; return true ; } | Gets the field at the given position . If the field at that position is null then this method leaves the target field unchanged and returns false . |
14,831 | public boolean getFieldsInto ( int [ ] positions , Value [ ] targets ) { for ( int i = 0 ; i < positions . length ; i ++ ) { if ( ! getFieldInto ( positions [ i ] , targets [ i ] ) ) { return false ; } } return true ; } | Gets the fields at the given positions into an array . If at any position a field is null then this method returns false . All fields that have been successfully read until the failing read are correctly contained in the record . All other fields are not set . |
14,832 | public final boolean equalsFields ( int [ ] positions , Value [ ] searchValues , Value [ ] deserializationHolders ) { for ( int i = 0 ; i < positions . length ; i ++ ) { final Value v = getField ( positions [ i ] , deserializationHolders [ i ] ) ; if ( v == null || ( ! v . equals ( searchValues [ i ] ) ) ) { return false ; } } return true ; } | Checks the values of this record and a given list of values at specified positions for equality . The values of this record are deserialized and compared against the corresponding search value . The position specify which values are compared . The method returns true if the values on all positions are equal and false otherwise . |
14,833 | public void updateBinaryRepresenation ( ) { final int firstModified = this . firstModifiedPos ; if ( firstModified == Integer . MAX_VALUE ) { return ; } final InternalDeSerializer serializer = this . serializer ; final int [ ] offsets = this . offsets ; final int numFields = this . numFields ; serializer . memory = this . switchBuffer != null ? this . switchBuffer : ( this . binaryLen > 0 ? new byte [ this . binaryLen ] : new byte [ numFields * DEFAULT_FIELD_LEN_ESTIMATE + 1 ] ) ; serializer . position = 0 ; if ( numFields > 0 ) { int offset = 0 ; if ( firstModified > 0 ) { for ( int i = firstModified - 1 ; i >= 0 ; i -- ) { if ( this . offsets [ i ] != NULL_INDICATOR_OFFSET ) { offset = this . offsets [ i ] + this . lengths [ i ] ; break ; } } } try { if ( offset > 0 ) { serializer . write ( this . binaryData , 0 , offset ) ; } for ( int i = firstModified ; i < numFields ; i ++ ) { final int co = offsets [ i ] ; if ( co == NULL_INDICATOR_OFFSET ) { continue ; } offsets [ i ] = offset ; if ( co == MODIFIED_INDICATOR_OFFSET ) { final Value writeField = this . writeFields [ i ] ; if ( writeField == RESERVE_SPACE ) { final int length = this . lengths [ i ] ; if ( serializer . position >= serializer . memory . length - length - 1 ) { serializer . resize ( length ) ; } serializer . position += length ; } else { this . writeFields [ i ] . write ( serializer ) ; } } else { serializer . write ( this . binaryData , co , this . lengths [ i ] ) ; } this . lengths [ i ] = serializer . position - offset ; offset = serializer . position ; } } catch ( Exception e ) { throw new RuntimeException ( "Error in data type serialization: " + e . getMessage ( ) , e ) ; } } serializeHeader ( serializer , offsets , numFields ) ; this . switchBuffer = this . binaryData ; this . binaryData = serializer . memory ; this . binaryLen = serializer . position ; this . firstModifiedPos = Integer . MAX_VALUE ; } | Updates the binary representation of the data such that it reflects the state of the currently stored fields . If the binary representation is already up to date nothing happens . Otherwise this function triggers the modified fields to serialize themselves into the records buffer and afterwards updates the offset table . |
14,834 | public static < K , VV , EV , Message > VertexCentricIteration < K , VV , EV , Message > withEdges ( DataSet < Edge < K , EV > > edgesWithValue , ComputeFunction < K , VV , EV , Message > cf , int maximumNumberOfIterations ) { return new VertexCentricIteration < > ( cf , edgesWithValue , null , maximumNumberOfIterations ) ; } | Creates a new vertex - centric iteration operator . |
14,835 | private void setUpIteration ( DeltaIteration < ? , ? > iteration ) { if ( this . configuration != null ) { iteration . name ( this . configuration . getName ( "Vertex-centric iteration (" + computeFunction + ")" ) ) ; iteration . parallelism ( this . configuration . getParallelism ( ) ) ; iteration . setSolutionSetUnManaged ( this . configuration . isSolutionSetUnmanagedMemory ( ) ) ; for ( Map . Entry < String , Aggregator < ? > > entry : this . configuration . getAggregators ( ) . entrySet ( ) ) { iteration . registerAggregator ( entry . getKey ( ) , entry . getValue ( ) ) ; } } else { iteration . name ( "Vertex-centric iteration (" + computeFunction + ")" ) ; } } | Helper method which sets up an iteration with the given vertex value . |
14,836 | private static List < String > getTopics ( List < KafkaTopicPartitionState < TopicAndPartition > > partitions ) { HashSet < String > uniqueTopics = new HashSet < > ( ) ; for ( KafkaTopicPartitionState < TopicAndPartition > fp : partitions ) { uniqueTopics . add ( fp . getTopic ( ) ) ; } return new ArrayList < > ( uniqueTopics ) ; } | Returns a list of unique topics from for the given partitions . |
14,837 | private static Map < Node , List < KafkaTopicPartitionState < TopicAndPartition > > > findLeaderForPartitions ( List < KafkaTopicPartitionState < TopicAndPartition > > partitionsToAssign , Properties kafkaProperties ) throws Exception { if ( partitionsToAssign . isEmpty ( ) ) { throw new IllegalArgumentException ( "Leader request for empty partitions list" ) ; } LOG . info ( "Refreshing leader information for partitions {}" , partitionsToAssign ) ; PartitionInfoFetcher infoFetcher = new PartitionInfoFetcher ( getTopics ( partitionsToAssign ) , kafkaProperties ) ; infoFetcher . start ( ) ; KillerWatchDog watchDog = new KillerWatchDog ( infoFetcher , 60000 ) ; watchDog . start ( ) ; List < KafkaTopicPartitionLeader > topicPartitionWithLeaderList = infoFetcher . getPartitions ( ) ; List < KafkaTopicPartitionState < TopicAndPartition > > unassignedPartitions = new ArrayList < > ( partitionsToAssign ) ; Map < Node , List < KafkaTopicPartitionState < TopicAndPartition > > > leaderToPartitions = new HashMap < > ( ) ; for ( KafkaTopicPartitionLeader partitionLeader : topicPartitionWithLeaderList ) { if ( unassignedPartitions . size ( ) == 0 ) { break ; } Iterator < KafkaTopicPartitionState < TopicAndPartition > > unassignedPartitionsIterator = unassignedPartitions . iterator ( ) ; while ( unassignedPartitionsIterator . hasNext ( ) ) { KafkaTopicPartitionState < TopicAndPartition > unassignedPartition = unassignedPartitionsIterator . next ( ) ; if ( unassignedPartition . getKafkaTopicPartition ( ) . equals ( partitionLeader . getTopicPartition ( ) ) ) { Node leader = partitionLeader . getLeader ( ) ; List < KafkaTopicPartitionState < TopicAndPartition > > partitionsOfLeader = leaderToPartitions . get ( leader ) ; if ( partitionsOfLeader == null ) { partitionsOfLeader = new ArrayList < > ( ) ; leaderToPartitions . put ( leader , partitionsOfLeader ) ; } partitionsOfLeader . add ( unassignedPartition ) ; unassignedPartitionsIterator . remove ( ) ; break ; } } } if ( unassignedPartitions . size ( ) > 0 ) { throw new RuntimeException ( "Unable to find a leader for partitions: " + unassignedPartitions ) ; } LOG . debug ( "Partitions with assigned leaders {}" , leaderToPartitions ) ; return leaderToPartitions ; } | Find leaders for the partitions . |
14,838 | public static void validateAwsConfiguration ( Properties config ) { if ( config . containsKey ( AWSConfigConstants . AWS_CREDENTIALS_PROVIDER ) ) { String credentialsProviderType = config . getProperty ( AWSConfigConstants . AWS_CREDENTIALS_PROVIDER ) ; CredentialProvider providerType ; try { providerType = CredentialProvider . valueOf ( credentialsProviderType ) ; } catch ( IllegalArgumentException e ) { StringBuilder sb = new StringBuilder ( ) ; for ( CredentialProvider type : CredentialProvider . values ( ) ) { sb . append ( type . toString ( ) ) . append ( ", " ) ; } throw new IllegalArgumentException ( "Invalid AWS Credential Provider Type set in config. Valid values are: " + sb . toString ( ) ) ; } if ( providerType == CredentialProvider . BASIC ) { if ( ! config . containsKey ( AWSConfigConstants . AWS_ACCESS_KEY_ID ) || ! config . containsKey ( AWSConfigConstants . AWS_SECRET_ACCESS_KEY ) ) { throw new IllegalArgumentException ( "Please set values for AWS Access Key ID ('" + AWSConfigConstants . AWS_ACCESS_KEY_ID + "') " + "and Secret Key ('" + AWSConfigConstants . AWS_SECRET_ACCESS_KEY + "') when using the BASIC AWS credential provider type." ) ; } } } if ( config . containsKey ( AWSConfigConstants . AWS_REGION ) ) { if ( ! AWSUtil . isValidRegion ( config . getProperty ( AWSConfigConstants . AWS_REGION ) ) ) { StringBuilder sb = new StringBuilder ( ) ; for ( Regions region : Regions . values ( ) ) { sb . append ( region . getName ( ) ) . append ( ", " ) ; } throw new IllegalArgumentException ( "Invalid AWS region set in config. Valid values are: " + sb . toString ( ) ) ; } } } | Validate configuration properties related to Amazon AWS service . |
14,839 | public static String getMemoryUsageStatsAsString ( MemoryMXBean memoryMXBean ) { MemoryUsage heap = memoryMXBean . getHeapMemoryUsage ( ) ; MemoryUsage nonHeap = memoryMXBean . getNonHeapMemoryUsage ( ) ; long heapUsed = heap . getUsed ( ) >> 20 ; long heapCommitted = heap . getCommitted ( ) >> 20 ; long heapMax = heap . getMax ( ) >> 20 ; long nonHeapUsed = nonHeap . getUsed ( ) >> 20 ; long nonHeapCommitted = nonHeap . getCommitted ( ) >> 20 ; long nonHeapMax = nonHeap . getMax ( ) >> 20 ; return String . format ( "Memory usage stats: [HEAP: %d/%d/%d MB, " + "NON HEAP: %d/%d/%d MB (used/committed/max)]" , heapUsed , heapCommitted , heapMax , nonHeapUsed , nonHeapCommitted , nonHeapMax ) ; } | Gets the memory footprint of the JVM in a string representation . |
14,840 | public static String getMemoryPoolStatsAsString ( List < MemoryPoolMXBean > poolBeans ) { StringBuilder bld = new StringBuilder ( "Off-heap pool stats: " ) ; int count = 0 ; for ( MemoryPoolMXBean bean : poolBeans ) { if ( bean . getType ( ) == MemoryType . NON_HEAP ) { if ( count > 0 ) { bld . append ( ", " ) ; } count ++ ; MemoryUsage usage = bean . getUsage ( ) ; long used = usage . getUsed ( ) >> 20 ; long committed = usage . getCommitted ( ) >> 20 ; long max = usage . getMax ( ) >> 20 ; bld . append ( '[' ) . append ( bean . getName ( ) ) . append ( ": " ) ; bld . append ( used ) . append ( '/' ) . append ( committed ) . append ( '/' ) . append ( max ) ; bld . append ( " MB (used/committed/max)]" ) ; } } return bld . toString ( ) ; } | Gets the memory pool statistics from the JVM . |
14,841 | public static String getGarbageCollectorStatsAsString ( List < GarbageCollectorMXBean > gcMXBeans ) { StringBuilder bld = new StringBuilder ( "Garbage collector stats: " ) ; for ( GarbageCollectorMXBean bean : gcMXBeans ) { bld . append ( '[' ) . append ( bean . getName ( ) ) . append ( ", GC TIME (ms): " ) . append ( bean . getCollectionTime ( ) ) ; bld . append ( ", GC COUNT: " ) . append ( bean . getCollectionCount ( ) ) . append ( ']' ) ; bld . append ( ", " ) ; } if ( ! gcMXBeans . isEmpty ( ) ) { bld . setLength ( bld . length ( ) - 2 ) ; } return bld . toString ( ) ; } | Gets the garbage collection statistics from the JVM . |
14,842 | public < T extends Comparable < T > > Graph < T , NullValue , NullValue > simplify ( Graph < T , NullValue , NullValue > graph , int parallelism ) throws Exception { switch ( value ) { case DIRECTED : graph = graph . run ( new org . apache . flink . graph . asm . simple . directed . Simplify < T , NullValue , NullValue > ( ) . setParallelism ( parallelism ) ) ; break ; case UNDIRECTED : graph = graph . run ( new org . apache . flink . graph . asm . simple . undirected . Simplify < T , NullValue , NullValue > ( false ) . setParallelism ( parallelism ) ) ; break ; case UNDIRECTED_CLIP_AND_FLIP : graph = graph . run ( new org . apache . flink . graph . asm . simple . undirected . Simplify < T , NullValue , NullValue > ( true ) . setParallelism ( parallelism ) ) ; break ; } return graph ; } | Simplify the given graph based on the configured value . |
14,843 | public Iterator < T > sampleInCoordinator ( Iterator < IntermediateSampleData < T > > input ) { if ( numSamples == 0 ) { return emptyIterable ; } PriorityQueue < IntermediateSampleData < T > > reservoir = new PriorityQueue < IntermediateSampleData < T > > ( numSamples ) ; int index = 0 ; IntermediateSampleData < T > smallest = null ; while ( input . hasNext ( ) ) { IntermediateSampleData < T > element = input . next ( ) ; if ( index < numSamples ) { reservoir . add ( element ) ; smallest = reservoir . peek ( ) ; } else { if ( element . getWeight ( ) > smallest . getWeight ( ) ) { reservoir . remove ( ) ; reservoir . add ( element ) ; smallest = reservoir . peek ( ) ; } } index ++ ; } final Iterator < IntermediateSampleData < T > > itr = reservoir . iterator ( ) ; return new Iterator < T > ( ) { public boolean hasNext ( ) { return itr . hasNext ( ) ; } public T next ( ) { return itr . next ( ) . getElement ( ) ; } public void remove ( ) { itr . remove ( ) ; } } ; } | Sample algorithm for the second phase . This operation should be executed as the UDF of an all reduce operation . |
14,844 | public Iterator < T > sample ( Iterator < T > input ) { return sampleInCoordinator ( sampleInPartition ( input ) ) ; } | Combine the first phase and second phase in sequence implemented for test purpose only . |
14,845 | public static FixedDelayRestartStrategyFactory createFactory ( Configuration configuration ) throws Exception { int maxAttempts = configuration . getInteger ( ConfigConstants . RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS , 1 ) ; String delayString = configuration . getString ( ConfigConstants . RESTART_STRATEGY_FIXED_DELAY_DELAY ) ; long delay ; try { delay = Duration . apply ( delayString ) . toMillis ( ) ; } catch ( NumberFormatException nfe ) { throw new Exception ( "Invalid config value for " + ConfigConstants . RESTART_STRATEGY_FIXED_DELAY_DELAY + ": " + delayString + ". Value must be a valid duration (such as '100 milli' or '10 s')" ) ; } return new FixedDelayRestartStrategyFactory ( maxAttempts , delay ) ; } | Creates a FixedDelayRestartStrategy from the given Configuration . |
14,846 | public static boolean isInSSSP ( final Edge < Long , Double > edgeToBeRemoved , DataSet < Edge < Long , Double > > edgesInSSSP ) throws Exception { return edgesInSSSP . filter ( new FilterFunction < Edge < Long , Double > > ( ) { public boolean filter ( Edge < Long , Double > edge ) throws Exception { return edge . equals ( edgeToBeRemoved ) ; } } ) . count ( ) > 0 ; } | Function that verifies whether the edge to be removed is part of the SSSP or not . If it is the src vertex will be invalidated . |
14,847 | public boolean add ( Task task ) { Preconditions . checkArgument ( task . getJobID ( ) . equals ( jobId ) , "The task's job id does not match the " + "job id for which the slot has been allocated." ) ; Preconditions . checkArgument ( task . getAllocationId ( ) . equals ( allocationId ) , "The task's allocation " + "id does not match the allocation id for which the slot has been allocated." ) ; Preconditions . checkState ( TaskSlotState . ACTIVE == state , "The task slot is not in state active." ) ; Task oldTask = tasks . put ( task . getExecutionId ( ) , task ) ; if ( oldTask != null ) { tasks . put ( task . getExecutionId ( ) , oldTask ) ; return false ; } else { return true ; } } | Add the given task to the task slot . This is only possible if there is not already another task with the same execution attempt id added to the task slot . In this case the method returns true . Otherwise the task slot is left unchanged and false is returned . |
14,848 | public boolean markActive ( ) { if ( TaskSlotState . ALLOCATED == state || TaskSlotState . ACTIVE == state ) { state = TaskSlotState . ACTIVE ; return true ; } else { return false ; } } | Mark this slot as active . A slot can only be marked active if it s in state allocated . |
14,849 | public boolean markFree ( ) { if ( isEmpty ( ) ) { state = TaskSlotState . FREE ; this . jobId = null ; this . allocationId = null ; return true ; } else { return false ; } } | Mark the slot as free . A slot can only be marked as free if it s empty . |
14,850 | public SlotOffer generateSlotOffer ( ) { Preconditions . checkState ( TaskSlotState . ACTIVE == state || TaskSlotState . ALLOCATED == state , "The task slot is not in state active or allocated." ) ; Preconditions . checkState ( allocationId != null , "The task slot are not allocated" ) ; return new SlotOffer ( allocationId , index , resourceProfile ) ; } | Generate the slot offer from this TaskSlot . |
14,851 | public RocksDBStateBackend configure ( Configuration config , ClassLoader classLoader ) { return new RocksDBStateBackend ( this , config , classLoader ) ; } | Creates a copy of this state backend that uses the values defined in the configuration for fields where that were not yet specified in this state backend . |
14,852 | public String [ ] getDbStoragePaths ( ) { if ( localRocksDbDirectories == null ) { return null ; } else { String [ ] paths = new String [ localRocksDbDirectories . length ] ; for ( int i = 0 ; i < paths . length ; i ++ ) { paths [ i ] = localRocksDbDirectories [ i ] . toString ( ) ; } return paths ; } } | Gets the configured local DB storage paths or null if none were configured . |
14,853 | public void assignExclusiveSegments ( NetworkBufferPool networkBufferPool , int networkBuffersPerChannel ) throws IOException { checkState ( this . isCreditBased , "Bug in input gate setup logic: exclusive buffers only exist with credit-based flow control." ) ; checkState ( this . networkBufferPool == null , "Bug in input gate setup logic: global buffer pool has" + "already been set for this input gate." ) ; this . networkBufferPool = checkNotNull ( networkBufferPool ) ; this . networkBuffersPerChannel = networkBuffersPerChannel ; synchronized ( requestLock ) { for ( InputChannel inputChannel : inputChannels . values ( ) ) { if ( inputChannel instanceof RemoteInputChannel ) { ( ( RemoteInputChannel ) inputChannel ) . assignExclusiveSegments ( networkBufferPool . requestMemorySegments ( networkBuffersPerChannel ) ) ; } } } } | Assign the exclusive buffers to all remote input channels directly for credit - based mode . |
14,854 | public void retriggerPartitionRequest ( IntermediateResultPartitionID partitionId ) throws IOException , InterruptedException { synchronized ( requestLock ) { if ( ! isReleased ) { final InputChannel ch = inputChannels . get ( partitionId ) ; checkNotNull ( ch , "Unknown input channel with ID " + partitionId ) ; LOG . debug ( "{}: Retriggering partition request {}:{}." , owningTaskName , ch . partitionId , consumedSubpartitionIndex ) ; if ( ch . getClass ( ) == RemoteInputChannel . class ) { final RemoteInputChannel rch = ( RemoteInputChannel ) ch ; rch . retriggerSubpartitionRequest ( consumedSubpartitionIndex ) ; } else if ( ch . getClass ( ) == LocalInputChannel . class ) { final LocalInputChannel ich = ( LocalInputChannel ) ch ; if ( retriggerLocalRequestTimer == null ) { retriggerLocalRequestTimer = new Timer ( true ) ; } ich . retriggerSubpartitionRequest ( retriggerLocalRequestTimer , consumedSubpartitionIndex ) ; } else { throw new IllegalStateException ( "Unexpected type of channel to retrigger partition: " + ch . getClass ( ) ) ; } } } } | Retriggers a partition request . |
14,855 | public static SingleInputGate create ( String owningTaskName , JobID jobId , InputGateDeploymentDescriptor igdd , NetworkEnvironment networkEnvironment , TaskEventPublisher taskEventPublisher , TaskActions taskActions , InputChannelMetrics metrics , Counter numBytesInCounter ) { final IntermediateDataSetID consumedResultId = checkNotNull ( igdd . getConsumedResultId ( ) ) ; final ResultPartitionType consumedPartitionType = checkNotNull ( igdd . getConsumedPartitionType ( ) ) ; final int consumedSubpartitionIndex = igdd . getConsumedSubpartitionIndex ( ) ; checkArgument ( consumedSubpartitionIndex >= 0 ) ; final InputChannelDeploymentDescriptor [ ] icdd = checkNotNull ( igdd . getInputChannelDeploymentDescriptors ( ) ) ; final NetworkEnvironmentConfiguration networkConfig = networkEnvironment . getConfiguration ( ) ; final SingleInputGate inputGate = new SingleInputGate ( owningTaskName , jobId , consumedResultId , consumedPartitionType , consumedSubpartitionIndex , icdd . length , taskActions , numBytesInCounter , networkConfig . isCreditBased ( ) ) ; final InputChannel [ ] inputChannels = new InputChannel [ icdd . length ] ; int numLocalChannels = 0 ; int numRemoteChannels = 0 ; int numUnknownChannels = 0 ; for ( int i = 0 ; i < inputChannels . length ; i ++ ) { final ResultPartitionID partitionId = icdd [ i ] . getConsumedPartitionId ( ) ; final ResultPartitionLocation partitionLocation = icdd [ i ] . getConsumedPartitionLocation ( ) ; if ( partitionLocation . isLocal ( ) ) { inputChannels [ i ] = new LocalInputChannel ( inputGate , i , partitionId , networkEnvironment . getResultPartitionManager ( ) , taskEventPublisher , networkConfig . partitionRequestInitialBackoff ( ) , networkConfig . partitionRequestMaxBackoff ( ) , metrics ) ; numLocalChannels ++ ; } else if ( partitionLocation . isRemote ( ) ) { inputChannels [ i ] = new RemoteInputChannel ( inputGate , i , partitionId , partitionLocation . getConnectionId ( ) , networkEnvironment . getConnectionManager ( ) , networkConfig . partitionRequestInitialBackoff ( ) , networkConfig . partitionRequestMaxBackoff ( ) , metrics ) ; numRemoteChannels ++ ; } else if ( partitionLocation . isUnknown ( ) ) { inputChannels [ i ] = new UnknownInputChannel ( inputGate , i , partitionId , networkEnvironment . getResultPartitionManager ( ) , taskEventPublisher , networkEnvironment . getConnectionManager ( ) , networkConfig . partitionRequestInitialBackoff ( ) , networkConfig . partitionRequestMaxBackoff ( ) , metrics ) ; numUnknownChannels ++ ; } else { throw new IllegalStateException ( "Unexpected partition location." ) ; } inputGate . setInputChannel ( partitionId . getPartitionId ( ) , inputChannels [ i ] ) ; } LOG . debug ( "{}: Created {} input channels (local: {}, remote: {}, unknown: {})." , owningTaskName , inputChannels . length , numLocalChannels , numRemoteChannels , numUnknownChannels ) ; return inputGate ; } | Creates an input gate and all of its input channels . |
14,856 | public static int getAvailablePort ( ) { for ( int i = 0 ; i < 50 ; i ++ ) { try ( ServerSocket serverSocket = new ServerSocket ( 0 ) ) { int port = serverSocket . getLocalPort ( ) ; if ( port != 0 ) { return port ; } } catch ( IOException ignored ) { } } throw new RuntimeException ( "Could not find a free permitted port on the machine." ) ; } | Find a non - occupied port . |
14,857 | public static String unresolvedHostToNormalizedString ( String host ) { if ( host == null ) { host = InetAddress . getLoopbackAddress ( ) . getHostAddress ( ) ; } else { host = host . trim ( ) . toLowerCase ( ) ; } if ( IPAddressUtil . isIPv6LiteralAddress ( host ) ) { byte [ ] ipV6Address = IPAddressUtil . textToNumericFormatV6 ( host ) ; host = getIPv6UrlRepresentation ( ipV6Address ) ; } else if ( ! IPAddressUtil . isIPv4LiteralAddress ( host ) ) { try { Preconditions . checkArgument ( ! host . startsWith ( "." ) ) ; Preconditions . checkArgument ( ! host . endsWith ( "." ) ) ; Preconditions . checkArgument ( ! host . contains ( ":" ) ) ; } catch ( Exception e ) { throw new IllegalConfigurationException ( "The configured hostname is not valid" , e ) ; } } return host ; } | Returns an address in a normalized format for Akka . When an IPv6 address is specified it normalizes the IPv6 address to avoid complications with the exact URL match policy of Akka . |
14,858 | public static String ipAddressToUrlString ( InetAddress address ) { if ( address == null ) { throw new NullPointerException ( "address is null" ) ; } else if ( address instanceof Inet4Address ) { return address . getHostAddress ( ) ; } else if ( address instanceof Inet6Address ) { return getIPv6UrlRepresentation ( ( Inet6Address ) address ) ; } else { throw new IllegalArgumentException ( "Unrecognized type of InetAddress: " + address ) ; } } | Encodes an IP address properly as a URL string . This method makes sure that IPv6 addresses have the proper formatting to be included in URLs . |
14,859 | public static String socketAddressToUrlString ( InetSocketAddress address ) { if ( address . isUnresolved ( ) ) { throw new IllegalArgumentException ( "Address cannot be resolved: " + address . getHostString ( ) ) ; } return ipAddressAndPortToUrlString ( address . getAddress ( ) , address . getPort ( ) ) ; } | Encodes an IP address and port to be included in URL . in particular this method makes sure that IPv6 addresses have the proper formatting to be included in URLs . |
14,860 | public static String hostAndPortToUrlString ( String host , int port ) throws UnknownHostException { return ipAddressAndPortToUrlString ( InetAddress . getByName ( host ) , port ) ; } | Normalizes and encodes a hostname and port to be included in URL . In particular this method makes sure that IPv6 address literals have the proper formatting to be included in URLs . |
14,861 | private static String getIPv6UrlRepresentation ( byte [ ] addressBytes ) { int [ ] hextets = new int [ 8 ] ; for ( int i = 0 ; i < hextets . length ; i ++ ) { hextets [ i ] = ( addressBytes [ 2 * i ] & 0xFF ) << 8 | ( addressBytes [ 2 * i + 1 ] & 0xFF ) ; } int bestRunStart = - 1 ; int bestRunLength = - 1 ; int runStart = - 1 ; for ( int i = 0 ; i < hextets . length + 1 ; i ++ ) { if ( i < hextets . length && hextets [ i ] == 0 ) { if ( runStart < 0 ) { runStart = i ; } } else if ( runStart >= 0 ) { int runLength = i - runStart ; if ( runLength > bestRunLength ) { bestRunStart = runStart ; bestRunLength = runLength ; } runStart = - 1 ; } } if ( bestRunLength >= 2 ) { Arrays . fill ( hextets , bestRunStart , bestRunStart + bestRunLength , - 1 ) ; } StringBuilder buf = new StringBuilder ( 40 ) ; buf . append ( '[' ) ; boolean lastWasNumber = false ; for ( int i = 0 ; i < hextets . length ; i ++ ) { boolean thisIsNumber = hextets [ i ] >= 0 ; if ( thisIsNumber ) { if ( lastWasNumber ) { buf . append ( ':' ) ; } buf . append ( Integer . toHexString ( hextets [ i ] ) ) ; } else { if ( i == 0 || lastWasNumber ) { buf . append ( "::" ) ; } } lastWasNumber = thisIsNumber ; } buf . append ( ']' ) ; return buf . toString ( ) ; } | Creates a compressed URL style representation of an Inet6Address . |
14,862 | public static Iterator < Integer > getPortRangeFromString ( String rangeDefinition ) throws NumberFormatException { final String [ ] ranges = rangeDefinition . trim ( ) . split ( "," ) ; UnionIterator < Integer > iterators = new UnionIterator < > ( ) ; for ( String rawRange : ranges ) { Iterator < Integer > rangeIterator ; String range = rawRange . trim ( ) ; int dashIdx = range . indexOf ( '-' ) ; if ( dashIdx == - 1 ) { final int port = Integer . valueOf ( range ) ; if ( port < 0 || port > 65535 ) { throw new IllegalConfigurationException ( "Invalid port configuration. Port must be between 0" + "and 65535, but was " + port + "." ) ; } rangeIterator = Collections . singleton ( Integer . valueOf ( range ) ) . iterator ( ) ; } else { final int start = Integer . valueOf ( range . substring ( 0 , dashIdx ) ) ; if ( start < 0 || start > 65535 ) { throw new IllegalConfigurationException ( "Invalid port configuration. Port must be between 0" + "and 65535, but was " + start + "." ) ; } final int end = Integer . valueOf ( range . substring ( dashIdx + 1 , range . length ( ) ) ) ; if ( end < 0 || end > 65535 ) { throw new IllegalConfigurationException ( "Invalid port configuration. Port must be between 0" + "and 65535, but was " + end + "." ) ; } rangeIterator = new Iterator < Integer > ( ) { int i = start ; public boolean hasNext ( ) { return i <= end ; } public Integer next ( ) { return i ++ ; } public void remove ( ) { throw new UnsupportedOperationException ( "Remove not supported" ) ; } } ; } iterators . add ( rangeIterator ) ; } return iterators ; } | Returns an iterator over available ports defined by the range definition . |
14,863 | public static ServerSocket createSocketFromPorts ( Iterator < Integer > portsIterator , SocketFactory factory ) { while ( portsIterator . hasNext ( ) ) { int port = portsIterator . next ( ) ; LOG . debug ( "Trying to open socket on port {}" , port ) ; try { return factory . createSocket ( port ) ; } catch ( IOException | IllegalArgumentException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Unable to allocate socket on port" , e ) ; } else { LOG . info ( "Unable to allocate on port {}, due to error: {}" , port , e . getMessage ( ) ) ; } } } return null ; } | Tries to allocate a socket from the given sets of ports . |
14,864 | public void setRangePartitioned ( Ordering ordering , DataDistribution distribution ) { if ( ordering == null ) { throw new NullPointerException ( ) ; } this . partitioning = PartitioningProperty . RANGE_PARTITIONED ; this . ordering = ordering ; this . partitioningFields = ordering . getInvolvedIndexes ( ) ; this . distribution = distribution ; } | Set the parameters for range partition . |
14,865 | public static < K , VV , EV > Graph < K , VV , EV > fromCollection ( Collection < Vertex < K , VV > > vertices , Collection < Edge < K , EV > > edges , ExecutionEnvironment context ) { return fromDataSet ( context . fromCollection ( vertices ) , context . fromCollection ( edges ) , context ) ; } | Creates a graph from a Collection of vertices and a Collection of edges . |
14,866 | public static < K , EV > Graph < K , NullValue , EV > fromCollection ( Collection < Edge < K , EV > > edges , ExecutionEnvironment context ) { return fromDataSet ( context . fromCollection ( edges ) , context ) ; } | Creates a graph from a Collection of edges . Vertices are created automatically and their values are set to NullValue . |
14,867 | public static < K , VV , EV > Graph < K , VV , EV > fromCollection ( Collection < Edge < K , EV > > edges , final MapFunction < K , VV > vertexValueInitializer , ExecutionEnvironment context ) { return fromDataSet ( context . fromCollection ( edges ) , vertexValueInitializer , context ) ; } | Creates a graph from a Collection of edges . Vertices are created automatically and their values are set by applying the provided map function to the vertex IDs . |
14,868 | public static < K , VV , EV > Graph < K , VV , EV > fromDataSet ( DataSet < Vertex < K , VV > > vertices , DataSet < Edge < K , EV > > edges , ExecutionEnvironment context ) { return new Graph < > ( vertices , edges , context ) ; } | Creates a graph from a DataSet of vertices and a DataSet of edges . |
14,869 | public static < K , EV > Graph < K , NullValue , EV > fromDataSet ( DataSet < Edge < K , EV > > edges , ExecutionEnvironment context ) { DataSet < Vertex < K , NullValue > > vertices = edges . flatMap ( new EmitSrcAndTarget < > ( ) ) . name ( "Source and target IDs" ) . distinct ( ) . name ( "IDs" ) ; return new Graph < > ( vertices , edges , context ) ; } | Creates a graph from a DataSet of edges . Vertices are created automatically and their values are set to NullValue . |
14,870 | public static < K , VV , EV > Graph < K , VV , EV > fromDataSet ( DataSet < Edge < K , EV > > edges , final MapFunction < K , VV > vertexValueInitializer , ExecutionEnvironment context ) { TypeInformation < K > keyType = ( ( TupleTypeInfo < ? > ) edges . getType ( ) ) . getTypeAt ( 0 ) ; TypeInformation < VV > valueType = TypeExtractor . createTypeInfo ( MapFunction . class , vertexValueInitializer . getClass ( ) , 1 , keyType , null ) ; @ SuppressWarnings ( { "unchecked" , "rawtypes" } ) TypeInformation < Vertex < K , VV > > returnType = ( TypeInformation < Vertex < K , VV > > ) new TupleTypeInfo ( Vertex . class , keyType , valueType ) ; DataSet < Vertex < K , VV > > vertices = edges . flatMap ( new EmitSrcAndTargetAsTuple1 < > ( ) ) . name ( "Source and target IDs" ) . distinct ( ) . name ( "IDs" ) . map ( new MapFunction < Tuple1 < K > , Vertex < K , VV > > ( ) { private Vertex < K , VV > output = new Vertex < > ( ) ; public Vertex < K , VV > map ( Tuple1 < K > value ) throws Exception { output . f0 = value . f0 ; output . f1 = vertexValueInitializer . map ( value . f0 ) ; return output ; } } ) . returns ( returnType ) . withForwardedFields ( "f0" ) . name ( "Initialize vertex values" ) ; return new Graph < > ( vertices , edges , context ) ; } | Creates a graph from a DataSet of edges . Vertices are created automatically and their values are set by applying the provided map function to the vertex IDs . |
14,871 | public static < K , VV , EV > Graph < K , VV , EV > fromTupleDataSet ( DataSet < Tuple2 < K , VV > > vertices , DataSet < Tuple3 < K , K , EV > > edges , ExecutionEnvironment context ) { DataSet < Vertex < K , VV > > vertexDataSet = vertices . map ( new Tuple2ToVertexMap < > ( ) ) . name ( "Type conversion" ) ; DataSet < Edge < K , EV > > edgeDataSet = edges . map ( new Tuple3ToEdgeMap < > ( ) ) . name ( "Type conversion" ) ; return fromDataSet ( vertexDataSet , edgeDataSet , context ) ; } | Creates a graph from a DataSet of Tuple2 objects for vertices and Tuple3 objects for edges . |
14,872 | public static < K > Graph < K , NullValue , NullValue > fromTuple2DataSet ( DataSet < Tuple2 < K , K > > edges , ExecutionEnvironment context ) { DataSet < Edge < K , NullValue > > edgeDataSet = edges . map ( new Tuple2ToEdgeMap < > ( ) ) . name ( "To Edge" ) ; return fromDataSet ( edgeDataSet , context ) ; } | Creates a graph from a DataSet of Tuple2 objects for edges . Each Tuple2 will become one Edge where the source ID will be the first field of the Tuple2 and the target ID will be the second field of the Tuple2 . |
14,873 | public static GraphCsvReader fromCsvReader ( String verticesPath , String edgesPath , ExecutionEnvironment context ) { return new GraphCsvReader ( verticesPath , edgesPath , context ) ; } | Creates a Graph from a CSV file of vertices and a CSV file of edges . |
14,874 | public static < K , VV > GraphCsvReader fromCsvReader ( String edgesPath , final MapFunction < K , VV > vertexValueInitializer , ExecutionEnvironment context ) { return new GraphCsvReader ( edgesPath , vertexValueInitializer , context ) ; } | Creates a graph from a CSV file of edges . Vertices will be created automatically and Vertex values can be initialized using a user - defined mapper . |
14,875 | public DataSet < Triplet < K , VV , EV > > getTriplets ( ) { return this . getVertices ( ) . join ( this . getEdges ( ) ) . where ( 0 ) . equalTo ( 0 ) . with ( new ProjectEdgeWithSrcValue < > ( ) ) . name ( "Project edge with source value" ) . join ( this . getVertices ( ) ) . where ( 1 ) . equalTo ( 0 ) . with ( new ProjectEdgeWithVertexValues < > ( ) ) . name ( "Project edge with vertex values" ) ; } | This method allows access to the graph s edge values along with its source and target vertex values . |
14,876 | public < T > Graph < K , VV , EV > joinWithVertices ( DataSet < Tuple2 < K , T > > inputDataSet , final VertexJoinFunction < VV , T > vertexJoinFunction ) { DataSet < Vertex < K , VV > > resultedVertices = this . getVertices ( ) . coGroup ( inputDataSet ) . where ( 0 ) . equalTo ( 0 ) . with ( new ApplyCoGroupToVertexValues < > ( vertexJoinFunction ) ) . name ( "Join with vertices" ) ; return new Graph < > ( resultedVertices , this . edges , this . context ) ; } | Joins the vertex DataSet of this graph with an input Tuple2 DataSet and applies a user - defined transformation on the values of the matched records . The vertex ID and the first field of the Tuple2 DataSet are used as the join keys . |
14,877 | public < T > Graph < K , VV , EV > joinWithEdges ( DataSet < Tuple3 < K , K , T > > inputDataSet , final EdgeJoinFunction < EV , T > edgeJoinFunction ) { DataSet < Edge < K , EV > > resultedEdges = this . getEdges ( ) . coGroup ( inputDataSet ) . where ( 0 , 1 ) . equalTo ( 0 , 1 ) . with ( new ApplyCoGroupToEdgeValues < > ( edgeJoinFunction ) ) . name ( "Join with edges" ) ; return new Graph < > ( this . vertices , resultedEdges , this . context ) ; } | Joins the edge DataSet with an input DataSet on the composite key of both source and target IDs and applies a user - defined transformation on the values of the matched records . The first two fields of the input DataSet are used as join keys . |
14,878 | public < T > Graph < K , VV , EV > joinWithEdgesOnSource ( DataSet < Tuple2 < K , T > > inputDataSet , final EdgeJoinFunction < EV , T > edgeJoinFunction ) { DataSet < Edge < K , EV > > resultedEdges = this . getEdges ( ) . coGroup ( inputDataSet ) . where ( 0 ) . equalTo ( 0 ) . with ( new ApplyCoGroupToEdgeValuesOnEitherSourceOrTarget < > ( edgeJoinFunction ) ) . name ( "Join with edges on source" ) ; return new Graph < > ( this . vertices , resultedEdges , this . context ) ; } | Joins the edge DataSet with an input Tuple2 DataSet and applies a user - defined transformation on the values of the matched records . The source ID of the edges input and the first field of the input DataSet are used as join keys . |
14,879 | public Graph < K , VV , EV > filterOnVertices ( FilterFunction < Vertex < K , VV > > vertexFilter ) { DataSet < Vertex < K , VV > > filteredVertices = this . vertices . filter ( vertexFilter ) ; DataSet < Edge < K , EV > > remainingEdges = this . edges . join ( filteredVertices ) . where ( 0 ) . equalTo ( 0 ) . with ( new ProjectEdge < > ( ) ) . join ( filteredVertices ) . where ( 1 ) . equalTo ( 0 ) . with ( new ProjectEdge < > ( ) ) . name ( "Filter on vertices" ) ; return new Graph < > ( filteredVertices , remainingEdges , this . context ) ; } | Apply a filtering function to the graph and return a sub - graph that satisfies the predicates only for the vertices . |
14,880 | public Graph < K , VV , EV > filterOnEdges ( FilterFunction < Edge < K , EV > > edgeFilter ) { DataSet < Edge < K , EV > > filteredEdges = this . edges . filter ( edgeFilter ) . name ( "Filter on edges" ) ; return new Graph < > ( this . vertices , filteredEdges , this . context ) ; } | Apply a filtering function to the graph and return a sub - graph that satisfies the predicates only for the edges . |
14,881 | public DataSet < Tuple2 < K , LongValue > > outDegrees ( ) { return vertices . coGroup ( edges ) . where ( 0 ) . equalTo ( 0 ) . with ( new CountNeighborsCoGroup < > ( ) ) . name ( "Out-degree" ) ; } | Return the out - degree of all vertices in the graph . |
14,882 | public DataSet < Tuple2 < K , LongValue > > getDegrees ( ) { return outDegrees ( ) . union ( inDegrees ( ) ) . name ( "In- and out-degree" ) . groupBy ( 0 ) . sum ( 1 ) . name ( "Sum" ) ; } | Return the degree of all vertices in the graph . |
14,883 | public Graph < K , VV , EV > getUndirected ( ) { DataSet < Edge < K , EV > > undirectedEdges = edges . flatMap ( new RegularAndReversedEdgesMap < > ( ) ) . name ( "To undirected graph" ) ; return new Graph < > ( vertices , undirectedEdges , this . context ) ; } | This operation adds all inverse - direction edges to the graph . |
14,884 | public < T > DataSet < T > groupReduceOnEdges ( EdgesFunctionWithVertexValue < K , VV , EV , T > edgesFunction , EdgeDirection direction ) throws IllegalArgumentException { switch ( direction ) { case IN : return vertices . coGroup ( edges ) . where ( 0 ) . equalTo ( 1 ) . with ( new ApplyCoGroupFunction < > ( edgesFunction ) ) . name ( "GroupReduce on in-edges" ) ; case OUT : return vertices . coGroup ( edges ) . where ( 0 ) . equalTo ( 0 ) . with ( new ApplyCoGroupFunction < > ( edgesFunction ) ) . name ( "GroupReduce on out-edges" ) ; case ALL : return vertices . coGroup ( edges . flatMap ( new EmitOneEdgePerNode < > ( ) ) . name ( "Emit edge" ) ) . where ( 0 ) . equalTo ( 0 ) . with ( new ApplyCoGroupFunctionOnAllEdges < > ( edgesFunction ) ) . name ( "GroupReduce on in- and out-edges" ) ; default : throw new IllegalArgumentException ( "Illegal edge direction" ) ; } } | Groups by vertex and computes a GroupReduce transformation over the edge values of each vertex . The edgesFunction applied on the edges has access to both the id and the value of the grouping vertex . |
14,885 | public Graph < K , VV , EV > reverse ( ) throws UnsupportedOperationException { DataSet < Edge < K , EV > > reversedEdges = edges . map ( new ReverseEdgesMap < > ( ) ) . name ( "Reverse edges" ) ; return new Graph < > ( vertices , reversedEdges , this . context ) ; } | Reverse the direction of the edges in the graph . |
14,886 | public Graph < K , VV , EV > addVertex ( final Vertex < K , VV > vertex ) { List < Vertex < K , VV > > newVertex = new ArrayList < > ( ) ; newVertex . add ( vertex ) ; return addVertices ( newVertex ) ; } | Adds the input vertex to the graph . If the vertex already exists in the graph it will not be added again . |
14,887 | public Graph < K , VV , EV > addVertices ( List < Vertex < K , VV > > verticesToAdd ) { DataSet < Vertex < K , VV > > newVertices = this . vertices . coGroup ( this . context . fromCollection ( verticesToAdd ) ) . where ( 0 ) . equalTo ( 0 ) . with ( new VerticesUnionCoGroup < > ( ) ) . name ( "Add vertices" ) ; return new Graph < > ( newVertices , this . edges , this . context ) ; } | Adds the list of vertices passed as input to the graph . If the vertices already exist in the graph they will not be added once more . |
14,888 | public Graph < K , VV , EV > addEdge ( Vertex < K , VV > source , Vertex < K , VV > target , EV edgeValue ) { Graph < K , VV , EV > partialGraph = fromCollection ( Arrays . asList ( source , target ) , Collections . singletonList ( new Edge < > ( source . f0 , target . f0 , edgeValue ) ) , this . context ) ; return this . union ( partialGraph ) ; } | Adds the given edge to the graph . If the source and target vertices do not exist in the graph they will also be added . |
14,889 | public Graph < K , VV , EV > addEdges ( List < Edge < K , EV > > newEdges ) { DataSet < Edge < K , EV > > newEdgesDataSet = this . context . fromCollection ( newEdges ) ; DataSet < Edge < K , EV > > validNewEdges = this . getVertices ( ) . join ( newEdgesDataSet ) . where ( 0 ) . equalTo ( 0 ) . with ( new JoinVerticesWithEdgesOnSrc < > ( ) ) . name ( "Join with source" ) . join ( this . getVertices ( ) ) . where ( 1 ) . equalTo ( 0 ) . with ( new JoinWithVerticesOnTrg < > ( ) ) . name ( "Join with target" ) ; return Graph . fromDataSet ( this . vertices , this . edges . union ( validNewEdges ) , this . context ) ; } | Adds the given list edges to the graph . |
14,890 | public Graph < K , VV , EV > removeVertex ( Vertex < K , VV > vertex ) { List < Vertex < K , VV > > vertexToBeRemoved = new ArrayList < > ( ) ; vertexToBeRemoved . add ( vertex ) ; return removeVertices ( vertexToBeRemoved ) ; } | Removes the given vertex and its edges from the graph . |
14,891 | public Graph < K , VV , EV > removeEdge ( Edge < K , EV > edge ) { DataSet < Edge < K , EV > > newEdges = getEdges ( ) . filter ( new EdgeRemovalEdgeFilter < > ( edge ) ) . name ( "Remove edge" ) ; return new Graph < > ( this . vertices , newEdges , this . context ) ; } | Removes all edges that match the given edge from the graph . |
14,892 | public Graph < K , VV , EV > removeEdges ( List < Edge < K , EV > > edgesToBeRemoved ) { DataSet < Edge < K , EV > > newEdges = getEdges ( ) . coGroup ( this . context . fromCollection ( edgesToBeRemoved ) ) . where ( 0 , 1 ) . equalTo ( 0 , 1 ) . with ( new EdgeRemovalCoGroup < > ( ) ) . name ( "Remove edges" ) ; return new Graph < > ( this . vertices , newEdges , context ) ; } | Removes all the edges that match the edges in the given data set from the graph . |
14,893 | public Graph < K , VV , EV > union ( Graph < K , VV , EV > graph ) { DataSet < Vertex < K , VV > > unionedVertices = graph . getVertices ( ) . union ( this . getVertices ( ) ) . name ( "Vertices" ) . distinct ( ) . name ( "Vertices" ) ; DataSet < Edge < K , EV > > unionedEdges = graph . getEdges ( ) . union ( this . getEdges ( ) ) . name ( "Edges" ) ; return new Graph < > ( unionedVertices , unionedEdges , this . context ) ; } | Performs union on the vertices and edges sets of the input graphs removing duplicate vertices but maintaining duplicate edges . |
14,894 | public Graph < K , NullValue , EV > intersect ( Graph < K , VV , EV > graph , boolean distinctEdges ) { DataSet < Edge < K , EV > > intersectEdges ; if ( distinctEdges ) { intersectEdges = getDistinctEdgeIntersection ( graph . getEdges ( ) ) ; } else { intersectEdges = getPairwiseEdgeIntersection ( graph . getEdges ( ) ) ; } return Graph . fromDataSet ( intersectEdges , getContext ( ) ) ; } | Performs intersect on the edge sets of the input graphs . Edges are considered equal if they have the same source identifier target identifier and edge value . |
14,895 | private DataSet < Edge < K , EV > > getDistinctEdgeIntersection ( DataSet < Edge < K , EV > > edges ) { return this . getEdges ( ) . join ( edges ) . where ( 0 , 1 , 2 ) . equalTo ( 0 , 1 , 2 ) . with ( new JoinFunction < Edge < K , EV > , Edge < K , EV > , Edge < K , EV > > ( ) { public Edge < K , EV > join ( Edge < K , EV > first , Edge < K , EV > second ) throws Exception { return first ; } } ) . withForwardedFieldsFirst ( "*" ) . name ( "Intersect edges" ) . distinct ( ) . name ( "Edges" ) ; } | Computes the intersection between the edge set and the given edge set . For all matching pairs only one edge will be in the resulting data set . |
14,896 | private DataSet < Edge < K , EV > > getPairwiseEdgeIntersection ( DataSet < Edge < K , EV > > edges ) { return this . getEdges ( ) . coGroup ( edges ) . where ( 0 , 1 , 2 ) . equalTo ( 0 , 1 , 2 ) . with ( new MatchingEdgeReducer < > ( ) ) . name ( "Intersect edges" ) ; } | Computes the intersection between the edge set and the given edge set . For all matching pairs both edges will be in the resulting data set . |
14,897 | public < M > Graph < K , VV , EV > runScatterGatherIteration ( ScatterFunction < K , VV , M , EV > scatterFunction , org . apache . flink . graph . spargel . GatherFunction < K , VV , M > gatherFunction , int maximumNumberOfIterations ) { return this . runScatterGatherIteration ( scatterFunction , gatherFunction , maximumNumberOfIterations , null ) ; } | Runs a ScatterGather iteration on the graph . No configuration options are provided . |
14,898 | public < M > Graph < K , VV , EV > runScatterGatherIteration ( ScatterFunction < K , VV , M , EV > scatterFunction , org . apache . flink . graph . spargel . GatherFunction < K , VV , M > gatherFunction , int maximumNumberOfIterations , ScatterGatherConfiguration parameters ) { ScatterGatherIteration < K , VV , M , EV > iteration = ScatterGatherIteration . withEdges ( edges , scatterFunction , gatherFunction , maximumNumberOfIterations ) ; iteration . configure ( parameters ) ; DataSet < Vertex < K , VV > > newVertices = this . getVertices ( ) . runOperation ( iteration ) ; return new Graph < > ( newVertices , this . edges , this . context ) ; } | Runs a ScatterGather iteration on the graph with configuration options . |
14,899 | public < M > Graph < K , VV , EV > runGatherSumApplyIteration ( org . apache . flink . graph . gsa . GatherFunction < VV , EV , M > gatherFunction , SumFunction < VV , EV , M > sumFunction , ApplyFunction < K , VV , M > applyFunction , int maximumNumberOfIterations ) { return this . runGatherSumApplyIteration ( gatherFunction , sumFunction , applyFunction , maximumNumberOfIterations , null ) ; } | Runs a Gather - Sum - Apply iteration on the graph . No configuration options are provided . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.