idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
15,500
public static < OUT > TypeInfoFactory < OUT > getTypeInfoFactory ( Type t ) { final Class < ? > factoryClass ; if ( registeredTypeInfoFactories . containsKey ( t ) ) { factoryClass = registeredTypeInfoFactories . get ( t ) ; } else { if ( ! isClassType ( t ) || ! typeToClass ( t ) . isAnnotationPresent ( TypeInfo . class ) ) { return null ; } final TypeInfo typeInfoAnnotation = typeToClass ( t ) . getAnnotation ( TypeInfo . class ) ; factoryClass = typeInfoAnnotation . value ( ) ; if ( ! TypeInfoFactory . class . isAssignableFrom ( factoryClass ) ) { throw new InvalidTypesException ( "TypeInfo annotation does not specify a valid TypeInfoFactory." ) ; } } return ( TypeInfoFactory < OUT > ) InstantiationUtil . instantiate ( factoryClass ) ; }
Returns the type information factory for a type using the factory registry or annotations .
15,501
private static < OUT > TypeInfoFactory < ? super OUT > getClosestFactory ( ArrayList < Type > typeHierarchy , Type t ) { TypeInfoFactory factory = null ; while ( factory == null && isClassType ( t ) && ! ( typeToClass ( t ) . equals ( Object . class ) ) ) { typeHierarchy . add ( t ) ; factory = getTypeInfoFactory ( t ) ; t = typeToClass ( t ) . getGenericSuperclass ( ) ; if ( t == null ) { break ; } } return factory ; }
Traverses the type hierarchy up until a type information factory can be found .
15,502
public static boolean isProperClass ( Class < ? > clazz ) { int mods = clazz . getModifiers ( ) ; return ! ( Modifier . isAbstract ( mods ) || Modifier . isInterface ( mods ) || Modifier . isNative ( mods ) ) ; }
Checks whether the class is a proper class i . e . not abstract or an interface and not a primitive type .
15,503
private void tryDenseMode ( ) { if ( numSpillFiles != 0 ) { return ; } long minKey = Long . MAX_VALUE ; long maxKey = Long . MIN_VALUE ; long recordCount = 0 ; for ( LongHashPartition p : this . partitionsBeingBuilt ) { long partitionRecords = p . getBuildSideRecordCount ( ) ; recordCount += partitionRecords ; if ( partitionRecords > 0 ) { if ( p . getMinKey ( ) < minKey ) { minKey = p . getMinKey ( ) ; } if ( p . getMaxKey ( ) > maxKey ) { maxKey = p . getMaxKey ( ) ; } } } if ( buildSpillRetBufferNumbers != 0 ) { throw new RuntimeException ( "buildSpillRetBufferNumbers should be 0: " + buildSpillRetBufferNumbers ) ; } long range = maxKey - minKey + 1 ; if ( range <= recordCount * 4 || range <= segmentSize / 8 ) { int buffers = ( int ) Math . ceil ( ( ( double ) ( range * 8 ) ) / segmentSize ) ; MemorySegment [ ] denseBuckets = new MemorySegment [ buffers ] ; for ( int i = 0 ; i < buffers ; i ++ ) { MemorySegment seg = getNextBuffer ( ) ; if ( seg == null ) { returnAll ( Arrays . asList ( denseBuckets ) ) ; return ; } denseBuckets [ i ] = seg ; for ( int j = 0 ; j < segmentSize ; j += 8 ) { seg . putLong ( j , INVALID_ADDRESS ) ; } } denseMode = true ; LOG . info ( "LongHybridHashTable: Use dense mode!" ) ; this . minKey = minKey ; this . maxKey = maxKey ; buildSpillReturnBuffers . drainTo ( availableMemory ) ; ArrayList < MemorySegment > dataBuffers = new ArrayList < > ( ) ; long addressOffset = 0 ; for ( LongHashPartition p : this . partitionsBeingBuilt ) { p . iteratorToDenseBucket ( denseBuckets , addressOffset , minKey ) ; p . updateDenseAddressOffset ( addressOffset ) ; dataBuffers . addAll ( Arrays . asList ( p . getPartitionBuffers ( ) ) ) ; addressOffset += ( p . getPartitionBuffers ( ) . length << segmentSizeBits ) ; returnAll ( Arrays . asList ( p . getBuckets ( ) ) ) ; } this . denseBuckets = denseBuckets ; this . densePartition = new LongHashPartition ( this , buildSideSerializer , dataBuffers . toArray ( new MemorySegment [ dataBuffers . size ( ) ] ) ) ; freeCurrent ( ) ; } }
After build end try to use dense mode .
15,504
public RocksDBRestoreResult restore ( ) throws IOException , StateMigrationException , RocksDBException { openDB ( ) ; for ( KeyedStateHandle keyedStateHandle : restoreStateHandles ) { if ( keyedStateHandle != null ) { if ( ! ( keyedStateHandle instanceof KeyGroupsStateHandle ) ) { throw new IllegalStateException ( "Unexpected state handle type, " + "expected: " + KeyGroupsStateHandle . class + ", but found: " + keyedStateHandle . getClass ( ) ) ; } this . currentKeyGroupsStateHandle = ( KeyGroupsStateHandle ) keyedStateHandle ; restoreKeyGroupsInStateHandle ( ) ; } } return new RocksDBRestoreResult ( this . db , defaultColumnFamilyHandle , nativeMetricMonitor , - 1 , null , null ) ; }
Restores all key - groups data that is referenced by the passed state handles .
15,505
private void restoreKeyGroupsInStateHandle ( ) throws IOException , StateMigrationException , RocksDBException { try { currentStateHandleInStream = currentKeyGroupsStateHandle . openInputStream ( ) ; cancelStreamRegistry . registerCloseable ( currentStateHandleInStream ) ; currentStateHandleInView = new DataInputViewStreamWrapper ( currentStateHandleInStream ) ; restoreKVStateMetaData ( ) ; restoreKVStateData ( ) ; } finally { if ( cancelStreamRegistry . unregisterCloseable ( currentStateHandleInStream ) ) { IOUtils . closeQuietly ( currentStateHandleInStream ) ; } } }
Restore one key groups state handle .
15,506
public void runDetached ( JobGraph job ) throws JobExecutionException , InterruptedException { checkNotNull ( job , "job is null" ) ; final CompletableFuture < JobSubmissionResult > submissionFuture = submitJob ( job ) ; try { submissionFuture . get ( ) ; } catch ( ExecutionException e ) { throw new JobExecutionException ( job . getJobID ( ) , ExceptionUtils . stripExecutionException ( e ) ) ; } }
This method executes a job in detached mode . The method returns immediately after the job has been added to the
15,507
public JobExecutionResult executeJobBlocking ( JobGraph job ) throws JobExecutionException , InterruptedException { checkNotNull ( job , "job is null" ) ; final CompletableFuture < JobSubmissionResult > submissionFuture = submitJob ( job ) ; final CompletableFuture < JobResult > jobResultFuture = submissionFuture . thenCompose ( ( JobSubmissionResult ignored ) -> requestJobResult ( job . getJobID ( ) ) ) ; final JobResult jobResult ; try { jobResult = jobResultFuture . get ( ) ; } catch ( ExecutionException e ) { throw new JobExecutionException ( job . getJobID ( ) , "Could not retrieve JobResult." , ExceptionUtils . stripExecutionException ( e ) ) ; } try { return jobResult . toJobExecutionResult ( Thread . currentThread ( ) . getContextClassLoader ( ) ) ; } catch ( IOException | ClassNotFoundException e ) { throw new JobExecutionException ( job . getJobID ( ) , e ) ; } }
This method runs a job in blocking mode . The method returns only after the job completed successfully or after it failed terminally .
15,508
protected MetricRegistryImpl createMetricRegistry ( Configuration config ) { return new MetricRegistryImpl ( MetricRegistryConfiguration . fromConfiguration ( config ) , ReporterSetup . fromConfiguration ( config ) ) ; }
Factory method to create the metric registry for the mini cluster .
15,509
protected RpcService createRpcService ( AkkaRpcServiceConfiguration akkaRpcServiceConfig , boolean remoteEnabled , String bindAddress ) { final Config akkaConfig ; if ( remoteEnabled ) { akkaConfig = AkkaUtils . getAkkaConfig ( akkaRpcServiceConfig . getConfiguration ( ) , bindAddress , 0 ) ; } else { akkaConfig = AkkaUtils . getAkkaConfig ( akkaRpcServiceConfig . getConfiguration ( ) ) ; } final Config effectiveAkkaConfig = AkkaUtils . testDispatcherConfig ( ) . withFallback ( akkaConfig ) ; final ActorSystem actorSystem = AkkaUtils . createActorSystem ( effectiveAkkaConfig ) ; return new AkkaRpcService ( actorSystem , akkaRpcServiceConfig ) ; }
Factory method to instantiate the RPC service .
15,510
private boolean generateNodeHash ( StreamNode node , HashFunction hashFunction , Map < Integer , byte [ ] > hashes , boolean isChainingEnabled , StreamGraph streamGraph ) { String userSpecifiedHash = node . getTransformationUID ( ) ; if ( userSpecifiedHash == null ) { for ( StreamEdge inEdge : node . getInEdges ( ) ) { if ( ! hashes . containsKey ( inEdge . getSourceId ( ) ) ) { return false ; } } Hasher hasher = hashFunction . newHasher ( ) ; byte [ ] hash = generateDeterministicHash ( node , hasher , hashes , isChainingEnabled , streamGraph ) ; if ( hashes . put ( node . getId ( ) , hash ) != null ) { throw new IllegalStateException ( "Unexpected state. Tried to add node hash " + "twice. This is probably a bug in the JobGraph generator." ) ; } return true ; } else { Hasher hasher = hashFunction . newHasher ( ) ; byte [ ] hash = generateUserSpecifiedHash ( node , hasher ) ; for ( byte [ ] previousHash : hashes . values ( ) ) { if ( Arrays . equals ( previousHash , hash ) ) { throw new IllegalArgumentException ( "Hash collision on user-specified ID " + "\"" + userSpecifiedHash + "\". " + "Most likely cause is a non-unique ID. Please check that all IDs " + "specified via `uid(String)` are unique." ) ; } } if ( hashes . put ( node . getId ( ) , hash ) != null ) { throw new IllegalStateException ( "Unexpected state. Tried to add node hash " + "twice. This is probably a bug in the JobGraph generator." ) ; } return true ; } }
Generates a hash for the node and returns whether the operation was successful .
15,511
private byte [ ] generateUserSpecifiedHash ( StreamNode node , Hasher hasher ) { hasher . putString ( node . getTransformationUID ( ) , Charset . forName ( "UTF-8" ) ) ; return hasher . hash ( ) . asBytes ( ) ; }
Generates a hash from a user - specified ID .
15,512
private byte [ ] generateDeterministicHash ( StreamNode node , Hasher hasher , Map < Integer , byte [ ] > hashes , boolean isChainingEnabled , StreamGraph streamGraph ) { generateNodeLocalHash ( hasher , hashes . size ( ) ) ; for ( StreamEdge outEdge : node . getOutEdges ( ) ) { if ( isChainable ( outEdge , isChainingEnabled , streamGraph ) ) { generateNodeLocalHash ( hasher , hashes . size ( ) ) ; } } byte [ ] hash = hasher . hash ( ) . asBytes ( ) ; for ( StreamEdge inEdge : node . getInEdges ( ) ) { byte [ ] otherHash = hashes . get ( inEdge . getSourceId ( ) ) ; if ( otherHash == null ) { throw new IllegalStateException ( "Missing hash for input node " + streamGraph . getSourceVertex ( inEdge ) + ". Cannot generate hash for " + node + "." ) ; } for ( int j = 0 ; j < hash . length ; j ++ ) { hash [ j ] = ( byte ) ( hash [ j ] * 37 ^ otherHash [ j ] ) ; } } if ( LOG . isDebugEnabled ( ) ) { String udfClassName = "" ; if ( node . getOperator ( ) instanceof AbstractUdfStreamOperator ) { udfClassName = ( ( AbstractUdfStreamOperator < ? , ? > ) node . getOperator ( ) ) . getUserFunction ( ) . getClass ( ) . getName ( ) ; } LOG . debug ( "Generated hash '" + byteToHexString ( hash ) + "' for node " + "'" + node . toString ( ) + "' {id: " + node . getId ( ) + ", " + "parallelism: " + node . getParallelism ( ) + ", " + "user function: " + udfClassName + "}" ) ; } return hash ; }
Generates a deterministic hash from node - local properties and input and output edges .
15,513
public static int murmurHash ( int code ) { code *= 0xcc9e2d51 ; code = Integer . rotateLeft ( code , 15 ) ; code *= 0x1b873593 ; code = Integer . rotateLeft ( code , 13 ) ; code = code * 5 + 0xe6546b64 ; code ^= 4 ; code = bitMix ( code ) ; if ( code >= 0 ) { return code ; } else if ( code != Integer . MIN_VALUE ) { return - code ; } else { return 0 ; } }
This function hashes an integer value .
15,514
public static int roundUpToPowerOfTwo ( int x ) { x = x - 1 ; x |= x >> 1 ; x |= x >> 2 ; x |= x >> 4 ; x |= x >> 8 ; x |= x >> 16 ; return x + 1 ; }
Round the given number to the next power of two .
15,515
public CompletableFuture < List < StackTraceElement [ ] > > requestStackTraceSample ( final StackTraceSampleableTask task , final int numSamples , final Time delayBetweenSamples , final int maxStackTraceDepth ) { checkNotNull ( task , "task must not be null" ) ; checkArgument ( numSamples > 0 , "numSamples must be positive" ) ; checkNotNull ( delayBetweenSamples , "delayBetweenSamples must not be null" ) ; return requestStackTraceSample ( task , numSamples , delayBetweenSamples , maxStackTraceDepth , new ArrayList < > ( numSamples ) , new CompletableFuture < > ( ) ) ; }
Returns a future that completes with a given number of stack trace samples of a task thread .
15,516
protected int require ( int required ) throws KryoException { if ( required > capacity ) { throw new KryoException ( "Buffer too small: capacity: " + capacity + ", " + "required: " + required ) ; } position = 0 ; int bytesRead = 0 ; int count ; while ( true ) { count = fill ( buffer , bytesRead , required - bytesRead ) ; if ( count == - 1 ) { throw new KryoException ( new EOFException ( "No more bytes left." ) ) ; } bytesRead += count ; if ( bytesRead == required ) { break ; } } limit = required ; return required ; }
Require makes sure that at least required number of bytes are kept in the buffer . If not then it will load exactly the difference between required and currently available number of bytes . Thus it will only load the data which is required and never prefetch data .
15,517
protected final void acknowledgeIDs ( long checkpointId , Set < UId > uniqueIds ) { LOG . debug ( "Acknowledging ids for checkpoint {}" , checkpointId ) ; Iterator < Tuple2 < Long , List < SessionId > > > iterator = sessionIdsPerSnapshot . iterator ( ) ; while ( iterator . hasNext ( ) ) { final Tuple2 < Long , List < SessionId > > next = iterator . next ( ) ; long id = next . f0 ; if ( id <= checkpointId ) { acknowledgeSessionIDs ( next . f1 ) ; iterator . remove ( ) ; } } }
Acknowledges the session ids .
15,518
public final String functionIdentifier ( ) { final String md5 = EncodingUtils . hex ( EncodingUtils . md5 ( EncodingUtils . encodeObjectToString ( this ) ) ) ; return getClass ( ) . getCanonicalName ( ) . replace ( '.' , '$' ) . concat ( "$" ) . concat ( md5 ) ; }
Returns a unique serialized representation for this function .
15,519
public static JobID fromHexString ( String hexString ) { try { return new JobID ( StringUtils . hexStringToByte ( hexString ) ) ; } catch ( Exception e ) { throw new IllegalArgumentException ( "Cannot parse JobID from \"" + hexString + "\". The expected format is " + "[0-9a-fA-F]{32}, e.g. fd72014d4c864993a2e5a9287b4a9c5d." , e ) ; } }
Parses a JobID from the given string .
15,520
protected long initRankEnd ( BaseRow row ) throws Exception { if ( isConstantRankEnd ) { return rankEnd ; } else { Long rankEndValue = rankEndState . value ( ) ; long curRankEnd = rankEndFetcher . apply ( row ) ; if ( rankEndValue == null ) { rankEnd = curRankEnd ; rankEndState . update ( rankEnd ) ; return rankEnd ; } else { rankEnd = rankEndValue ; if ( rankEnd != curRankEnd ) { invalidCounter . inc ( ) ; } return rankEnd ; } } }
Initialize rank end .
15,521
protected boolean checkSortKeyInBufferRange ( BaseRow sortKey , TopNBuffer buffer ) { Comparator < BaseRow > comparator = buffer . getSortKeyComparator ( ) ; Map . Entry < BaseRow , Collection < BaseRow > > worstEntry = buffer . lastEntry ( ) ; if ( worstEntry == null ) { return true ; } else { BaseRow worstKey = worstEntry . getKey ( ) ; int compare = comparator . compare ( sortKey , worstKey ) ; if ( compare < 0 ) { return true ; } else { return buffer . getCurrentTopNum ( ) < getDefaultTopNSize ( ) ; } } }
Checks whether the record should be put into the buffer .
15,522
public void open ( RuntimeContext cepRuntimeContext , Configuration conf ) throws Exception { for ( State < T > state : getStates ( ) ) { for ( StateTransition < T > transition : state . getStateTransitions ( ) ) { IterativeCondition condition = transition . getCondition ( ) ; FunctionUtils . setFunctionRuntimeContext ( condition , cepRuntimeContext ) ; FunctionUtils . openFunction ( condition , conf ) ; } } }
Initialization method for the NFA . It is called before any element is passed and thus suitable for one time setup work .
15,523
public void close ( ) throws Exception { for ( State < T > state : getStates ( ) ) { for ( StateTransition < T > transition : state . getStateTransitions ( ) ) { IterativeCondition condition = transition . getCondition ( ) ; FunctionUtils . closeFunction ( condition ) ; } } }
Tear - down method for the NFA .
15,524
public Collection < Map < String , List < T > > > process ( final SharedBufferAccessor < T > sharedBufferAccessor , final NFAState nfaState , final T event , final long timestamp , final AfterMatchSkipStrategy afterMatchSkipStrategy , final TimerService timerService ) throws Exception { try ( EventWrapper eventWrapper = new EventWrapper ( event , timestamp , sharedBufferAccessor ) ) { return doProcess ( sharedBufferAccessor , nfaState , eventWrapper , afterMatchSkipStrategy , timerService ) ; } }
Processes the next input event . If some of the computations reach a final state then the resulting event sequences are returned . If computations time out and timeout handling is activated then the timed out event patterns are returned .
15,525
private Map < String , List < EventId > > extractCurrentMatches ( final SharedBufferAccessor < T > sharedBufferAccessor , final ComputationState computationState ) throws Exception { if ( computationState . getPreviousBufferEntry ( ) == null ) { return new HashMap < > ( ) ; } List < Map < String , List < EventId > > > paths = sharedBufferAccessor . extractPatterns ( computationState . getPreviousBufferEntry ( ) , computationState . getVersion ( ) ) ; if ( paths . isEmpty ( ) ) { return new HashMap < > ( ) ; } Preconditions . checkState ( paths . size ( ) == 1 ) ; return paths . get ( 0 ) ; }
Extracts all the sequences of events from the start to the given computation state . An event sequence is returned as a map which contains the events and the names of the states to which the events were mapped .
15,526
@ SuppressWarnings ( "unchecked" ) public static < T > TypeInformation < T > convert ( String jsonSchema ) { Preconditions . checkNotNull ( jsonSchema , "JSON schema" ) ; final ObjectMapper mapper = new ObjectMapper ( ) ; mapper . getFactory ( ) . enable ( JsonParser . Feature . ALLOW_COMMENTS ) . enable ( JsonParser . Feature . ALLOW_UNQUOTED_FIELD_NAMES ) . enable ( JsonParser . Feature . ALLOW_SINGLE_QUOTES ) ; final JsonNode node ; try { node = mapper . readTree ( jsonSchema ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( "Invalid JSON schema." , e ) ; } return ( TypeInformation < T > ) convertType ( "<root>" , node , node ) ; }
Converts a JSON schema into Flink s type information . Throws an exception if the schema cannot converted because of loss of precision or too flexible schema .
15,527
public Throwable getError ( ClassLoader userCodeClassloader ) { if ( this . throwable == null ) { return null ; } else { return this . throwable . deserializeError ( userCodeClassloader ) ; } }
Gets the attached exception which is in serialized form . Returns null if the status update is no failure with an associated exception .
15,528
protected char [ ] getPasswordFromCredentialProviders ( String name ) throws IOException { char [ ] pass = null ; try { List < CredentialProvider > providers = CredentialProviderFactory . getProviders ( this ) ; if ( providers != null ) { for ( CredentialProvider provider : providers ) { try { CredentialEntry entry = provider . getCredentialEntry ( name ) ; if ( entry != null ) { pass = entry . getCredential ( ) ; break ; } } catch ( IOException ioe ) { throw new IOException ( "Can't get key " + name + " from key provider" + "of type: " + provider . getClass ( ) . getName ( ) + "." , ioe ) ; } } } } catch ( IOException ioe ) { throw new IOException ( "Configuration problem with provider path." , ioe ) ; } return pass ; }
Try and resolve the provided element name as a credential provider alias .
15,529
public DataSet < T > closeWith ( DataSet < T > iterationResult ) { return new BulkIterationResultSet < T > ( getExecutionEnvironment ( ) , getType ( ) , this , iterationResult ) ; }
Closes the iteration . This method defines the end of the iterative program part .
15,530
public byte [ ] getBytes ( ) { byte [ ] bytes = new byte [ SIZE ] ; longToByteArray ( lowerPart , bytes , 0 ) ; longToByteArray ( upperPart , bytes , SIZE_OF_LONG ) ; return bytes ; }
Gets the bytes underlying this ID .
15,531
public final String toHexString ( ) { if ( this . hexString == null ) { final byte [ ] ba = new byte [ SIZE ] ; longToByteArray ( this . lowerPart , ba , 0 ) ; longToByteArray ( this . upperPart , ba , SIZE_OF_LONG ) ; this . hexString = StringUtils . byteToHexString ( ba ) ; } return this . hexString ; }
Returns pure String representation of the ID in hexadecimal . This method should be used to construct things like paths etc . that require a stable representation and is therefore final .
15,532
private static long byteArrayToLong ( byte [ ] ba , int offset ) { long l = 0 ; for ( int i = 0 ; i < SIZE_OF_LONG ; ++ i ) { l |= ( ba [ offset + SIZE_OF_LONG - 1 - i ] & 0xffL ) << ( i << 3 ) ; } return l ; }
Converts the given byte array to a long .
15,533
private static void longToByteArray ( long l , byte [ ] ba , int offset ) { for ( int i = 0 ; i < SIZE_OF_LONG ; ++ i ) { final int shift = i << 3 ; ba [ offset + SIZE_OF_LONG - 1 - i ] = ( byte ) ( ( l & ( 0xffL << shift ) ) >>> shift ) ; } }
Converts a long to a byte array .
15,534
public static int assign ( KafkaTopicPartition partition , int numParallelSubtasks ) { int startIndex = ( ( partition . getTopic ( ) . hashCode ( ) * 31 ) & 0x7FFFFFFF ) % numParallelSubtasks ; return ( startIndex + partition . getPartition ( ) ) % numParallelSubtasks ; }
Returns the index of the target subtask that a specific Kafka partition should be assigned to .
15,535
public static < K , VV , EV , M > GatherSumApplyIteration < K , VV , EV , M > withEdges ( DataSet < Edge < K , EV > > edges , GatherFunction < VV , EV , M > gather , SumFunction < VV , EV , M > sum , ApplyFunction < K , VV , M > apply , int maximumNumberOfIterations ) { return new GatherSumApplyIteration < > ( gather , sum , apply , edges , maximumNumberOfIterations ) ; }
Creates a new gather - sum - apply iteration operator for graphs .
15,536
public int put ( BaseRow sortKey , BaseRow value ) { currentTopNum += 1 ; Collection < BaseRow > collection = treeMap . get ( sortKey ) ; if ( collection == null ) { collection = valueSupplier . get ( ) ; treeMap . put ( sortKey , collection ) ; } collection . add ( value ) ; return collection . size ( ) ; }
Appends a record into the buffer .
15,537
void removeAll ( BaseRow sortKey ) { Collection < BaseRow > list = treeMap . get ( sortKey ) ; if ( list != null ) { currentTopNum -= list . size ( ) ; treeMap . remove ( sortKey ) ; } }
Removes all record list from the buffer under the sortKey .
15,538
BaseRow removeLast ( ) { Map . Entry < BaseRow , Collection < BaseRow > > last = treeMap . lastEntry ( ) ; BaseRow lastElement = null ; if ( last != null ) { Collection < BaseRow > list = last . getValue ( ) ; lastElement = getLastElement ( list ) ; if ( lastElement != null ) { if ( list . remove ( lastElement ) ) { currentTopNum -= 1 ; } if ( list . size ( ) == 0 ) { treeMap . remove ( last . getKey ( ) ) ; } } } return lastElement ; }
Removes the last record of the last Entry in the buffer .
15,539
BaseRow getElement ( int rank ) { int curRank = 0 ; Iterator < Map . Entry < BaseRow , Collection < BaseRow > > > iter = treeMap . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Map . Entry < BaseRow , Collection < BaseRow > > entry = iter . next ( ) ; Collection < BaseRow > list = entry . getValue ( ) ; Iterator < BaseRow > listIter = list . iterator ( ) ; while ( listIter . hasNext ( ) ) { BaseRow elem = listIter . next ( ) ; curRank += 1 ; if ( curRank == rank ) { return elem ; } } } return null ; }
Gets record which rank is given value .
15,540
public < K > DataStream < T > partitionCustom ( Partitioner < K > partitioner , int field ) { Keys . ExpressionKeys < T > outExpressionKeys = new Keys . ExpressionKeys < > ( new int [ ] { field } , getType ( ) ) ; return partitionCustom ( partitioner , outExpressionKeys ) ; }
Partitions a tuple DataStream on the specified key fields using a custom partitioner . This method takes the key position to partition on and a partitioner that accepts the key type .
15,541
public < K > DataStream < T > partitionCustom ( Partitioner < K > partitioner , KeySelector < T , K > keySelector ) { return setConnectionType ( new CustomPartitionerWrapper < > ( clean ( partitioner ) , clean ( keySelector ) ) ) ; }
Partitions a DataStream on the key returned by the selector using a custom partitioner . This method takes the key selector to get the key to partition on and a partitioner that accepts the key type .
15,542
private < K > DataStream < T > partitionCustom ( Partitioner < K > partitioner , Keys < T > keys ) { KeySelector < T , K > keySelector = KeySelectorUtil . getSelectorForOneKey ( keys , partitioner , getType ( ) , getExecutionConfig ( ) ) ; return setConnectionType ( new CustomPartitionerWrapper < > ( clean ( partitioner ) , clean ( keySelector ) ) ) ; }
private helper method for custom partitioning
15,543
public SingleOutputStreamOperator < T > assignTimestamps ( TimestampExtractor < T > extractor ) { int inputParallelism = getTransformation ( ) . getParallelism ( ) ; ExtractTimestampsOperator < T > operator = new ExtractTimestampsOperator < > ( clean ( extractor ) ) ; return transform ( "ExtractTimestamps" , getTransformation ( ) . getOutputType ( ) , operator ) . setParallelism ( inputParallelism ) ; }
Extracts a timestamp from an element and assigns it as the internal timestamp of that element . The internal timestamps are for example used to to event - time window operations .
15,544
public SingleOutputStreamOperator < T > assignTimestampsAndWatermarks ( AssignerWithPeriodicWatermarks < T > timestampAndWatermarkAssigner ) { final int inputParallelism = getTransformation ( ) . getParallelism ( ) ; final AssignerWithPeriodicWatermarks < T > cleanedAssigner = clean ( timestampAndWatermarkAssigner ) ; TimestampsAndPeriodicWatermarksOperator < T > operator = new TimestampsAndPeriodicWatermarksOperator < > ( cleanedAssigner ) ; return transform ( "Timestamps/Watermarks" , getTransformation ( ) . getOutputType ( ) , operator ) . setParallelism ( inputParallelism ) ; }
Assigns timestamps to the elements in the data stream and periodically creates watermarks to signal event time progress .
15,545
public SingleOutputStreamOperator < T > assignTimestampsAndWatermarks ( AssignerWithPunctuatedWatermarks < T > timestampAndWatermarkAssigner ) { final int inputParallelism = getTransformation ( ) . getParallelism ( ) ; final AssignerWithPunctuatedWatermarks < T > cleanedAssigner = clean ( timestampAndWatermarkAssigner ) ; TimestampsAndPunctuatedWatermarksOperator < T > operator = new TimestampsAndPunctuatedWatermarksOperator < > ( cleanedAssigner ) ; return transform ( "Timestamps/Watermarks" , getTransformation ( ) . getOutputType ( ) , operator ) . setParallelism ( inputParallelism ) ; }
Assigns timestamps to the elements in the data stream and creates watermarks to signal event time progress based on the elements themselves .
15,546
@ SuppressWarnings ( "unchecked" ) public < X extends Tuple > DataStreamSink < T > writeAsCsv ( String path , WriteMode writeMode , String rowDelimiter , String fieldDelimiter ) { Preconditions . checkArgument ( getType ( ) . isTupleType ( ) , "The writeAsCsv() method can only be used on data streams of tuples." ) ; CsvOutputFormat < X > of = new CsvOutputFormat < > ( new Path ( path ) , rowDelimiter , fieldDelimiter ) ; if ( writeMode != null ) { of . setWriteMode ( writeMode ) ; } return writeUsingOutputFormat ( ( OutputFormat < T > ) of ) ; }
Writes a DataStream to the file specified by the path parameter . The writing is performed periodically every millis milliseconds .
15,547
public DataStreamSink < T > writeUsingOutputFormat ( OutputFormat < T > format ) { return addSink ( new OutputFormatSinkFunction < > ( format ) ) ; }
Writes the dataStream into an output described by an OutputFormat .
15,548
public < R > SingleOutputStreamOperator < R > transform ( String operatorName , TypeInformation < R > outTypeInfo , OneInputStreamOperator < T , R > operator ) { transformation . getOutputType ( ) ; OneInputTransformation < T , R > resultTransform = new OneInputTransformation < > ( this . transformation , operatorName , operator , outTypeInfo , environment . getParallelism ( ) ) ; @ SuppressWarnings ( { "unchecked" , "rawtypes" } ) SingleOutputStreamOperator < R > returnStream = new SingleOutputStreamOperator ( environment , resultTransform ) ; getExecutionEnvironment ( ) . addOperator ( resultTransform ) ; return returnStream ; }
Method for passing user defined operators along with the type information that will transform the DataStream .
15,549
protected DataStream < T > setConnectionType ( StreamPartitioner < T > partitioner ) { return new DataStream < > ( this . getExecutionEnvironment ( ) , new PartitionTransformation < > ( this . getTransformation ( ) , partitioner ) ) ; }
Internal function for setting the partitioner for the DataStream .
15,550
public Event next ( int minIp , int maxIp ) { final double p = rnd . nextDouble ( ) ; if ( p * 1000 >= states . size ( ) ) { final int nextIP = rnd . nextInt ( maxIp - minIp ) + minIp ; if ( ! states . containsKey ( nextIP ) ) { EventTypeAndState eventAndState = State . Initial . randomTransition ( rnd ) ; states . put ( nextIP , eventAndState . state ) ; return new Event ( eventAndState . eventType , nextIP ) ; } else { return next ( minIp , maxIp ) ; } } else { int numToSkip = Math . min ( 20 , rnd . nextInt ( states . size ( ) ) ) ; Iterator < Entry < Integer , State > > iter = states . entrySet ( ) . iterator ( ) ; for ( int i = numToSkip ; i > 0 ; -- i ) { iter . next ( ) ; } Entry < Integer , State > entry = iter . next ( ) ; State currentState = entry . getValue ( ) ; int address = entry . getKey ( ) ; iter . remove ( ) ; if ( p < errorProb ) { EventType event = currentState . randomInvalidTransition ( rnd ) ; return new Event ( event , address ) ; } else { EventTypeAndState eventAndState = currentState . randomTransition ( rnd ) ; if ( ! eventAndState . state . isTerminal ( ) ) { states . put ( address , eventAndState . state ) ; } return new Event ( eventAndState . eventType , address ) ; } } }
Creates a new random event . This method randomly pick either one of its currently running state machines or start a new state machine for a random IP address .
15,551
private static long getSizeOfPhysicalMemoryForWindows ( ) { BufferedReader bi = null ; try { Process proc = Runtime . getRuntime ( ) . exec ( "wmic memorychip get capacity" ) ; bi = new BufferedReader ( new InputStreamReader ( proc . getInputStream ( ) ) ) ; String line = bi . readLine ( ) ; if ( line == null ) { return - 1L ; } if ( ! line . startsWith ( "Capacity" ) ) { return - 1L ; } long sizeOfPhyiscalMemory = 0L ; while ( ( line = bi . readLine ( ) ) != null ) { if ( line . isEmpty ( ) ) { continue ; } line = line . replaceAll ( " " , "" ) ; sizeOfPhyiscalMemory += Long . parseLong ( line ) ; } return sizeOfPhyiscalMemory ; } catch ( Throwable t ) { LOG . error ( "Cannot determine the size of the physical memory for Windows host " + "(using 'wmic memorychip')" , t ) ; return - 1L ; } finally { if ( bi != null ) { try { bi . close ( ) ; } catch ( Throwable ignored ) { } } } }
Returns the size of the physical memory in bytes on Windows .
15,552
public < R > MapOperator < T , R > map ( MapFunction < T , R > mapper ) { if ( mapper == null ) { throw new NullPointerException ( "Map function must not be null." ) ; } String callLocation = Utils . getCallLocationName ( ) ; TypeInformation < R > resultType = TypeExtractor . getMapReturnTypes ( mapper , getType ( ) , callLocation , true ) ; return new MapOperator < > ( this , resultType , clean ( mapper ) , callLocation ) ; }
Applies a Map transformation on this DataSet .
15,553
public < R > MapPartitionOperator < T , R > mapPartition ( MapPartitionFunction < T , R > mapPartition ) { if ( mapPartition == null ) { throw new NullPointerException ( "MapPartition function must not be null." ) ; } String callLocation = Utils . getCallLocationName ( ) ; TypeInformation < R > resultType = TypeExtractor . getMapPartitionReturnTypes ( mapPartition , getType ( ) , callLocation , true ) ; return new MapPartitionOperator < > ( this , resultType , clean ( mapPartition ) , callLocation ) ; }
Applies a Map - style operation to the entire partition of the data . The function is called once per parallel partition of the data and the entire partition is available through the given Iterator . The number of elements that each instance of the MapPartition function sees is non deterministic and depends on the parallelism of the operation .
15,554
public List < T > collect ( ) throws Exception { final String id = new AbstractID ( ) . toString ( ) ; final TypeSerializer < T > serializer = getType ( ) . createSerializer ( getExecutionEnvironment ( ) . getConfig ( ) ) ; this . output ( new Utils . CollectHelper < > ( id , serializer ) ) . name ( "collect()" ) ; JobExecutionResult res = getExecutionEnvironment ( ) . execute ( ) ; ArrayList < byte [ ] > accResult = res . getAccumulatorResult ( id ) ; if ( accResult != null ) { try { return SerializedListAccumulator . deserializeList ( accResult , serializer ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Cannot find type class of collected data type." , e ) ; } catch ( IOException e ) { throw new RuntimeException ( "Serialization error while deserializing collected data" , e ) ; } } else { throw new RuntimeException ( "The call to collect() could not retrieve the DataSet." ) ; } }
Convenience method to get the elements of a DataSet as a List . As DataSet can contain a lot of data this method should be used with caution .
15,555
@ SuppressWarnings ( { "unchecked" , "rawtypes" } ) public ReduceOperator < T > minBy ( int ... fields ) { if ( ! getType ( ) . isTupleType ( ) ) { throw new InvalidProgramException ( "DataSet#minBy(int...) only works on Tuple types." ) ; } return new ReduceOperator < > ( this , new SelectByMinFunction ( ( TupleTypeInfo ) getType ( ) , fields ) , Utils . getCallLocationName ( ) ) ; }
Selects an element with minimum value .
15,556
@ SuppressWarnings ( { "unchecked" , "rawtypes" } ) public ReduceOperator < T > maxBy ( int ... fields ) { if ( ! getType ( ) . isTupleType ( ) ) { throw new InvalidProgramException ( "DataSet#maxBy(int...) only works on Tuple types." ) ; } return new ReduceOperator < > ( this , new SelectByMaxFunction ( ( TupleTypeInfo ) getType ( ) , fields ) , Utils . getCallLocationName ( ) ) ; }
Selects an element with maximum value .
15,557
public < R > CoGroupOperator . CoGroupOperatorSets < T , R > coGroup ( DataSet < R > other ) { return new CoGroupOperator . CoGroupOperatorSets < > ( this , other ) ; }
Initiates a CoGroup transformation .
15,558
public UnionOperator < T > union ( DataSet < T > other ) { return new UnionOperator < > ( this , other , Utils . getCallLocationName ( ) ) ; }
Creates a union of this DataSet with an other DataSet . The other DataSet must be of the same data type .
15,559
public PartitionOperator < T > partitionByHash ( int ... fields ) { return new PartitionOperator < > ( this , PartitionMethod . HASH , new Keys . ExpressionKeys < > ( fields , getType ( ) ) , Utils . getCallLocationName ( ) ) ; }
Hash - partitions a DataSet on the specified key fields .
15,560
public < K extends Comparable < K > > PartitionOperator < T > partitionByHash ( KeySelector < T , K > keyExtractor ) { final TypeInformation < K > keyType = TypeExtractor . getKeySelectorTypes ( keyExtractor , getType ( ) ) ; return new PartitionOperator < > ( this , PartitionMethod . HASH , new Keys . SelectorFunctionKeys < > ( clean ( keyExtractor ) , this . getType ( ) , keyType ) , Utils . getCallLocationName ( ) ) ; }
Partitions a DataSet using the specified KeySelector .
15,561
public PartitionOperator < T > partitionByRange ( int ... fields ) { return new PartitionOperator < > ( this , PartitionMethod . RANGE , new Keys . ExpressionKeys < > ( fields , getType ( ) ) , Utils . getCallLocationName ( ) ) ; }
Range - partitions a DataSet on the specified key fields .
15,562
public < K > PartitionOperator < T > partitionCustom ( Partitioner < K > partitioner , int field ) { return new PartitionOperator < > ( this , new Keys . ExpressionKeys < > ( new int [ ] { field } , getType ( ) ) , clean ( partitioner ) , Utils . getCallLocationName ( ) ) ; }
Partitions a tuple DataSet on the specified key fields using a custom partitioner . This method takes the key position to partition on and a partitioner that accepts the key type .
15,563
public < K extends Comparable < K > > PartitionOperator < T > partitionCustom ( Partitioner < K > partitioner , KeySelector < T , K > keyExtractor ) { final TypeInformation < K > keyType = TypeExtractor . getKeySelectorTypes ( keyExtractor , getType ( ) ) ; return new PartitionOperator < > ( this , new Keys . SelectorFunctionKeys < > ( keyExtractor , getType ( ) , keyType ) , clean ( partitioner ) , Utils . getCallLocationName ( ) ) ; }
Partitions a DataSet on the key returned by the selector using a custom partitioner . This method takes the key selector to get the key to partition on and a partitioner that accepts the key type .
15,564
public < K > SortPartitionOperator < T > sortPartition ( KeySelector < T , K > keyExtractor , Order order ) { final TypeInformation < K > keyType = TypeExtractor . getKeySelectorTypes ( keyExtractor , getType ( ) ) ; return new SortPartitionOperator < > ( this , new Keys . SelectorFunctionKeys < > ( clean ( keyExtractor ) , getType ( ) , keyType ) , order , Utils . getCallLocationName ( ) ) ; }
Locally sorts the partitions of the DataSet on the extracted key in the specified order . The DataSet can be sorted on multiple values by returning a tuple from the KeySelector .
15,565
public T getDefaultValue ( ) { if ( defaultValue != null ) { if ( serializer != null ) { return serializer . copy ( defaultValue ) ; } else { throw new IllegalStateException ( "Serializer not yet initialized." ) ; } } else { return null ; } }
Returns the default value .
15,566
public void setQueryable ( String queryableStateName ) { Preconditions . checkArgument ( ttlConfig . getUpdateType ( ) == StateTtlConfig . UpdateType . Disabled , "Queryable state is currently not supported with TTL" ) ; if ( this . queryableStateName == null ) { this . queryableStateName = Preconditions . checkNotNull ( queryableStateName , "Registration name" ) ; } else { throw new IllegalStateException ( "Queryable state name already set" ) ; } }
Sets the name for queries of state created from this descriptor .
15,567
public void initializeSerializerUnlessSet ( ExecutionConfig executionConfig ) { if ( serializer == null ) { checkState ( typeInfo != null , "no serializer and no type info" ) ; serializer = typeInfo . createSerializer ( executionConfig ) ; typeInfo = null ; } }
Initializes the serializer unless it has been initialized before .
15,568
public Row nextRecord ( Row row ) throws IOException { try { if ( ! hasNext ) { return null ; } for ( int pos = 0 ; pos < row . getArity ( ) ; pos ++ ) { row . setField ( pos , resultSet . getObject ( pos + 1 ) ) ; } hasNext = resultSet . next ( ) ; return row ; } catch ( SQLException se ) { throw new IOException ( "Couldn't read data - " + se . getMessage ( ) , se ) ; } catch ( NullPointerException npe ) { throw new IOException ( "Couldn't access resultSet" , npe ) ; } }
Stores the next resultSet row in a tuple .
15,569
private CatalogTable validatePartitionSpec ( ObjectPath tablePath , CatalogPartitionSpec partitionSpec ) throws TableNotExistException , TableNotPartitionedException , PartitionSpecInvalidException { CatalogTable table = validatePartitionedTable ( tablePath ) ; List < String > partitionKeys = table . getPartitionKeys ( ) ; Map < String , String > spec = partitionSpec . getPartitionSpec ( ) ; if ( partitionKeys . size ( ) < spec . size ( ) ) { throw new PartitionSpecInvalidException ( catalogName , partitionKeys , tablePath , partitionSpec ) ; } else { int size = spec . size ( ) ; for ( int i = 0 ; i < size ; i ++ ) { if ( ! spec . containsKey ( partitionKeys . get ( i ) ) ) { throw new PartitionSpecInvalidException ( catalogName , partitionKeys , tablePath , partitionSpec ) ; } } } return table ; }
Validate the partitioned table and partitionSpec .
15,570
private CatalogTable validatePartitionedTable ( ObjectPath tablePath ) throws TableNotExistException , TableNotPartitionedException { CatalogBaseTable baseTable = getTable ( tablePath ) ; if ( ! ( baseTable instanceof CatalogTable ) ) { throw new CatalogException ( String . format ( "%s in Catalog %s is not a CatalogTable" , tablePath . getFullName ( ) , catalogName ) ) ; } CatalogTable table = ( CatalogTable ) baseTable ; if ( ! table . isPartitioned ( ) ) { throw new TableNotPartitionedException ( catalogName , tablePath ) ; } return table ; }
Validate the partitioned table .
15,571
protected void increaseBuffersInBacklog ( BufferConsumer buffer ) { assert Thread . holdsLock ( buffers ) ; if ( buffer != null && buffer . isBuffer ( ) ) { buffersInBacklog ++ ; } }
Increases the number of non - event buffers by one after adding a non - event buffer into this subpartition .
15,572
void lookupSelectHints ( SqlSelect select , SqlParserPos pos , Collection < SqlMoniker > hintList ) { IdInfo info = idPositions . get ( pos . toString ( ) ) ; if ( ( info == null ) || ( info . scope == null ) ) { SqlNode fromNode = select . getFrom ( ) ; final SqlValidatorScope fromScope = getFromScope ( select ) ; lookupFromHints ( fromNode , fromScope , pos , hintList ) ; } else { lookupNameCompletionHints ( info . scope , info . id . names , info . id . getParserPosition ( ) , hintList ) ; } }
Looks up completion hints for a syntactically correct select SQL that has been parsed into an expression tree .
15,573
public final void lookupNameCompletionHints ( SqlValidatorScope scope , List < String > names , SqlParserPos pos , Collection < SqlMoniker > hintList ) { List < String > subNames = Util . skipLast ( names ) ; if ( subNames . size ( ) > 0 ) { SqlValidatorNamespace ns = null ; for ( String name : subNames ) { if ( ns == null ) { final SqlValidatorScope . ResolvedImpl resolved = new SqlValidatorScope . ResolvedImpl ( ) ; final SqlNameMatcher nameMatcher = catalogReader . nameMatcher ( ) ; scope . resolve ( ImmutableList . of ( name ) , nameMatcher , false , resolved ) ; if ( resolved . count ( ) == 1 ) { ns = resolved . only ( ) . namespace ; } } else { ns = ns . lookupChild ( name ) ; } if ( ns == null ) { break ; } } if ( ns != null ) { RelDataType rowType = ns . getRowType ( ) ; if ( rowType . isStruct ( ) ) { for ( RelDataTypeField field : rowType . getFieldList ( ) ) { hintList . add ( new SqlMonikerImpl ( field . getName ( ) , SqlMonikerType . COLUMN ) ) ; } } } findAllValidFunctionNames ( names , this , hintList , pos ) ; } else { scope . findAliases ( hintList ) ; SelectScope selectScope = SqlValidatorUtil . getEnclosingSelectScope ( scope ) ; if ( ( selectScope != null ) && ( selectScope . getChildren ( ) . size ( ) == 1 ) ) { RelDataType rowType = selectScope . getChildren ( ) . get ( 0 ) . getRowType ( ) ; for ( RelDataTypeField field : rowType . getFieldList ( ) ) { hintList . add ( new SqlMonikerImpl ( field . getName ( ) , SqlMonikerType . COLUMN ) ) ; } } } findAllValidUdfNames ( names , this , hintList ) ; }
Populates a list of all the valid alternatives for an identifier .
15,574
protected void validateNamespace ( final SqlValidatorNamespace namespace , RelDataType targetRowType ) { namespace . validate ( targetRowType ) ; if ( namespace . getNode ( ) != null ) { setValidatedNodeType ( namespace . getNode ( ) , namespace . getType ( ) ) ; } }
Validates a namespace .
15,575
protected SqlSelect createSourceSelectForUpdate ( SqlUpdate call ) { final SqlNodeList selectList = new SqlNodeList ( SqlParserPos . ZERO ) ; selectList . add ( SqlIdentifier . star ( SqlParserPos . ZERO ) ) ; int ordinal = 0 ; for ( SqlNode exp : call . getSourceExpressionList ( ) ) { String alias = SqlUtil . deriveAliasFromOrdinal ( ordinal ) ; selectList . add ( SqlValidatorUtil . addAlias ( exp , alias ) ) ; ++ ordinal ; } SqlNode sourceTable = call . getTargetTable ( ) ; if ( call . getAlias ( ) != null ) { sourceTable = SqlValidatorUtil . addAlias ( sourceTable , call . getAlias ( ) . getSimple ( ) ) ; } return new SqlSelect ( SqlParserPos . ZERO , null , selectList , sourceTable , call . getCondition ( ) , null , null , null , null , null , null ) ; }
Creates the SELECT statement that putatively feeds rows into an UPDATE statement to be updated .
15,576
protected SqlSelect createSourceSelectForDelete ( SqlDelete call ) { final SqlNodeList selectList = new SqlNodeList ( SqlParserPos . ZERO ) ; selectList . add ( SqlIdentifier . star ( SqlParserPos . ZERO ) ) ; SqlNode sourceTable = call . getTargetTable ( ) ; if ( call . getAlias ( ) != null ) { sourceTable = SqlValidatorUtil . addAlias ( sourceTable , call . getAlias ( ) . getSimple ( ) ) ; } return new SqlSelect ( SqlParserPos . ZERO , null , selectList , sourceTable , call . getCondition ( ) , null , null , null , null , null , null ) ; }
Creates the SELECT statement that putatively feeds rows into a DELETE statement to be deleted .
15,577
RelDataType getTableConstructorRowType ( SqlCall values , SqlValidatorScope scope ) { final List < SqlNode > rows = values . getOperandList ( ) ; assert rows . size ( ) >= 1 ; final List < RelDataType > rowTypes = new ArrayList < > ( ) ; for ( final SqlNode row : rows ) { assert row . getKind ( ) == SqlKind . ROW ; SqlCall rowConstructor = ( SqlCall ) row ; final List < String > aliasList = new ArrayList < > ( ) ; final List < RelDataType > typeList = new ArrayList < > ( ) ; for ( Ord < SqlNode > column : Ord . zip ( rowConstructor . getOperandList ( ) ) ) { final String alias = deriveAlias ( column . e , column . i ) ; aliasList . add ( alias ) ; final RelDataType type = deriveType ( scope , column . e ) ; typeList . add ( type ) ; } rowTypes . add ( typeFactory . createStructType ( typeList , aliasList ) ) ; } if ( rows . size ( ) == 1 ) { return rowTypes . get ( 0 ) ; } return typeFactory . leastRestrictive ( rowTypes ) ; }
Returns null if there is no common type . E . g . if the rows have a different number of columns .
15,578
RelDataType deriveTypeImpl ( SqlValidatorScope scope , SqlNode operand ) { DeriveTypeVisitor v = new DeriveTypeVisitor ( scope ) ; final RelDataType type = operand . accept ( v ) ; return Objects . requireNonNull ( scope . nullifyType ( operand , type ) ) ; }
Derives the type of a node never null .
15,579
protected void addToSelectList ( List < SqlNode > list , Set < String > aliases , List < Map . Entry < String , RelDataType > > fieldList , SqlNode exp , SqlValidatorScope scope , final boolean includeSystemVars ) { String alias = SqlValidatorUtil . getAlias ( exp , - 1 ) ; String uniqueAlias = SqlValidatorUtil . uniquify ( alias , aliases , SqlValidatorUtil . EXPR_SUGGESTER ) ; if ( ! alias . equals ( uniqueAlias ) ) { exp = SqlValidatorUtil . addAlias ( exp , uniqueAlias ) ; } fieldList . add ( Pair . of ( uniqueAlias , deriveType ( scope , exp ) ) ) ; list . add ( exp ) ; }
Adds an expression to a select list ensuring that its alias does not clash with any existing expressions on the list .
15,580
protected void registerNamespace ( SqlValidatorScope usingScope , String alias , SqlValidatorNamespace ns , boolean forceNullable ) { namespaces . put ( ns . getNode ( ) , ns ) ; if ( usingScope != null ) { usingScope . addChild ( ns , alias , forceNullable ) ; } }
Registers a new namespace and adds it as a child of its parent scope . Derived class can override this method to tinker with namespaces as they are created .
15,581
private SqlNode getAgg ( SqlSelect select ) { final SelectScope selectScope = getRawSelectScope ( select ) ; if ( selectScope != null ) { final List < SqlNode > selectList = selectScope . getExpandedSelectList ( ) ; if ( selectList != null ) { return aggFinder . findAgg ( selectList ) ; } } return aggFinder . findAgg ( select . getSelectList ( ) ) ; }
If there is at least one call to an aggregate function returns the first .
15,582
private void registerOperandSubQueries ( SqlValidatorScope parentScope , SqlCall call , int operandOrdinal ) { SqlNode operand = call . operand ( operandOrdinal ) ; if ( operand == null ) { return ; } if ( operand . getKind ( ) . belongsTo ( SqlKind . QUERY ) && call . getOperator ( ) . argumentMustBeScalar ( operandOrdinal ) ) { operand = SqlStdOperatorTable . SCALAR_QUERY . createCall ( operand . getParserPosition ( ) , operand ) ; call . setOperand ( operandOrdinal , operand ) ; } registerSubQueries ( parentScope , operand ) ; }
Registers any sub - queries inside a given call operand and converts the operand to a scalar sub - query if the operator requires it .
15,583
private void validateNoAggs ( AggFinder aggFinder , SqlNode node , String clause ) { final SqlCall agg = aggFinder . findAgg ( node ) ; if ( agg == null ) { return ; } final SqlOperator op = agg . getOperator ( ) ; if ( op == SqlStdOperatorTable . OVER ) { throw newValidationError ( agg , RESOURCE . windowedAggregateIllegalInClause ( clause ) ) ; } else if ( op . isGroup ( ) || op . isGroupAuxiliary ( ) ) { throw newValidationError ( agg , RESOURCE . groupFunctionMustAppearInGroupByClause ( op . getName ( ) ) ) ; } else { throw newValidationError ( agg , RESOURCE . aggregateIllegalInClause ( clause ) ) ; } }
Throws an error if there is an aggregate or windowed aggregate in the given clause .
15,584
protected void validateSelect ( SqlSelect select , RelDataType targetRowType ) { assert targetRowType != null ; final SelectNamespace ns = getNamespace ( select ) . unwrap ( SelectNamespace . class ) ; assert ns . rowType == null ; if ( select . isDistinct ( ) ) { validateFeature ( RESOURCE . sQLFeature_E051_01 ( ) , select . getModifierNode ( SqlSelectKeyword . DISTINCT ) . getParserPosition ( ) ) ; } final SqlNodeList selectItems = select . getSelectList ( ) ; RelDataType fromType = unknownType ; if ( selectItems . size ( ) == 1 ) { final SqlNode selectItem = selectItems . get ( 0 ) ; if ( selectItem instanceof SqlIdentifier ) { SqlIdentifier id = ( SqlIdentifier ) selectItem ; if ( id . isStar ( ) && ( id . names . size ( ) == 1 ) ) { fromType = targetRowType ; } } } final SelectScope fromScope = ( SelectScope ) getFromScope ( select ) ; List < String > names = fromScope . getChildNames ( ) ; if ( ! catalogReader . nameMatcher ( ) . isCaseSensitive ( ) ) { names = Lists . transform ( names , s -> s . toUpperCase ( Locale . ROOT ) ) ; } final int duplicateAliasOrdinal = Util . firstDuplicate ( names ) ; if ( duplicateAliasOrdinal >= 0 ) { final ScopeChild child = fromScope . children . get ( duplicateAliasOrdinal ) ; throw newValidationError ( child . namespace . getEnclosingNode ( ) , RESOURCE . fromAliasDuplicate ( child . name ) ) ; } if ( select . getFrom ( ) == null ) { if ( conformance . isFromRequired ( ) ) { throw newValidationError ( select , RESOURCE . selectMissingFrom ( ) ) ; } } else { validateFrom ( select . getFrom ( ) , fromType , fromScope ) ; } validateWhereClause ( select ) ; validateGroupClause ( select ) ; validateHavingClause ( select ) ; validateWindowClause ( select ) ; handleOffsetFetch ( select . getOffset ( ) , select . getFetch ( ) ) ; final RelDataType rowType = validateSelectList ( selectItems , select , targetRowType ) ; ns . setType ( rowType ) ; validateOrderList ( select ) ; if ( shouldCheckForRollUp ( select . getFrom ( ) ) ) { checkRollUpInSelectList ( select ) ; checkRollUp ( null , select , select . getWhere ( ) , getWhereScope ( select ) ) ; checkRollUp ( null , select , select . getHaving ( ) , getHavingScope ( select ) ) ; checkRollUpInWindowDecl ( select ) ; checkRollUpInGroupBy ( select ) ; checkRollUpInOrderBy ( select ) ; } }
Validates a SELECT statement .
15,585
private boolean isRolledUpColumnAllowedInAgg ( SqlIdentifier identifier , SqlValidatorScope scope , SqlCall aggCall , SqlNode parent ) { Pair < String , String > pair = findTableColumnPair ( identifier , scope ) ; if ( pair == null ) { return true ; } String tableAlias = pair . left ; String columnName = pair . right ; Table table = findTable ( tableAlias ) ; if ( table != null ) { return table . rolledUpColumnValidInsideAgg ( columnName , aggCall , parent , catalogReader . getConfig ( ) ) ; } return true ; }
Returns true iff the given column is valid inside the given aggCall .
15,586
private boolean isRolledUpColumn ( SqlIdentifier identifier , SqlValidatorScope scope ) { Pair < String , String > pair = findTableColumnPair ( identifier , scope ) ; if ( pair == null ) { return false ; } String tableAlias = pair . left ; String columnName = pair . right ; Table table = findTable ( tableAlias ) ; if ( table != null ) { return table . isRolledUp ( columnName ) ; } return false ; }
Returns true iff the given column is actually rolled up .
15,587
private void validateModality ( SqlNode query ) { final SqlModality modality = deduceModality ( query ) ; if ( query instanceof SqlSelect ) { final SqlSelect select = ( SqlSelect ) query ; validateModality ( select , modality , true ) ; } else if ( query . getKind ( ) == SqlKind . VALUES ) { switch ( modality ) { case STREAM : throw newValidationError ( query , Static . RESOURCE . cannotStreamValues ( ) ) ; } } else { assert query . isA ( SqlKind . SET_QUERY ) ; final SqlCall call = ( SqlCall ) query ; for ( SqlNode operand : call . getOperandList ( ) ) { if ( deduceModality ( operand ) != modality ) { throw newValidationError ( operand , Static . RESOURCE . streamSetOpInconsistentInputs ( ) ) ; } validateModality ( operand ) ; } } }
Validates that a query can deliver the modality it promises . Only called on the top - most SELECT or set operator in the tree .
15,588
private SqlModality deduceModality ( SqlNode query ) { if ( query instanceof SqlSelect ) { SqlSelect select = ( SqlSelect ) query ; return select . getModifierNode ( SqlSelectKeyword . STREAM ) != null ? SqlModality . STREAM : SqlModality . RELATION ; } else if ( query . getKind ( ) == SqlKind . VALUES ) { return SqlModality . RELATION ; } else { assert query . isA ( SqlKind . SET_QUERY ) ; final SqlCall call = ( SqlCall ) query ; return deduceModality ( call . getOperandList ( ) . get ( 0 ) ) ; } }
Return the intended modality of a SELECT or set - op .
15,589
private boolean hasSortedPrefix ( SelectScope scope , SqlNodeList orderList ) { return isSortCompatible ( scope , orderList . get ( 0 ) , false ) ; }
Returns whether the prefix is sorted .
15,590
protected void validateOrderList ( SqlSelect select ) { SqlNodeList orderList = select . getOrderList ( ) ; if ( orderList == null ) { return ; } if ( ! shouldAllowIntermediateOrderBy ( ) ) { if ( ! cursorSet . contains ( select ) ) { throw newValidationError ( select , RESOURCE . invalidOrderByPos ( ) ) ; } } final SqlValidatorScope orderScope = getOrderScope ( select ) ; Objects . requireNonNull ( orderScope ) ; List < SqlNode > expandList = new ArrayList < > ( ) ; for ( SqlNode orderItem : orderList ) { SqlNode expandedOrderItem = expand ( orderItem , orderScope ) ; expandList . add ( expandedOrderItem ) ; } SqlNodeList expandedOrderList = new SqlNodeList ( expandList , orderList . getParserPosition ( ) ) ; select . setOrderBy ( expandedOrderList ) ; for ( SqlNode orderItem : expandedOrderList ) { validateOrderItem ( select , orderItem ) ; } }
Validates the ORDER BY clause of a SELECT statement .
15,591
private void validateGroupByItem ( SqlSelect select , SqlNode groupByItem ) { final SqlValidatorScope groupByScope = getGroupScope ( select ) ; groupByScope . validateExpr ( groupByItem ) ; }
Validates an item in the GROUP BY clause of a SELECT statement .
15,592
private void validateOrderItem ( SqlSelect select , SqlNode orderItem ) { switch ( orderItem . getKind ( ) ) { case DESCENDING : validateFeature ( RESOURCE . sQLConformance_OrderByDesc ( ) , orderItem . getParserPosition ( ) ) ; validateOrderItem ( select , ( ( SqlCall ) orderItem ) . operand ( 0 ) ) ; return ; } final SqlValidatorScope orderScope = getOrderScope ( select ) ; validateExpr ( orderItem , orderScope ) ; }
Validates an item in the ORDER BY clause of a SELECT statement .
15,593
protected void validateGroupClause ( SqlSelect select ) { SqlNodeList groupList = select . getGroup ( ) ; if ( groupList == null ) { return ; } final String clause = "GROUP BY" ; validateNoAggs ( aggOrOverFinder , groupList , clause ) ; final SqlValidatorScope groupScope = getGroupScope ( select ) ; inferUnknownTypes ( unknownType , groupScope , groupList ) ; List < SqlNode > expandedList = new ArrayList < > ( ) ; for ( SqlNode groupItem : groupList ) { SqlNode expandedItem = expandGroupByOrHavingExpr ( groupItem , groupScope , select , false ) ; expandedList . add ( expandedItem ) ; } groupList = new SqlNodeList ( expandedList , groupList . getParserPosition ( ) ) ; select . setGroupBy ( groupList ) ; for ( SqlNode groupItem : expandedList ) { validateGroupByItem ( select , groupItem ) ; } for ( SqlNode node : groupList ) { switch ( node . getKind ( ) ) { case GROUPING_SETS : case ROLLUP : case CUBE : node . validate ( this , groupScope ) ; break ; default : node . validateExpr ( this , groupScope ) ; } } final SqlValidatorScope selectScope = getSelectScope ( select ) ; AggregatingSelectScope aggregatingScope = null ; if ( selectScope instanceof AggregatingSelectScope ) { aggregatingScope = ( AggregatingSelectScope ) selectScope ; } for ( SqlNode groupItem : groupList ) { if ( groupItem instanceof SqlNodeList && ( ( SqlNodeList ) groupItem ) . size ( ) == 0 ) { continue ; } validateGroupItem ( groupScope , aggregatingScope , groupItem ) ; } SqlNode agg = aggFinder . findAgg ( groupList ) ; if ( agg != null ) { throw newValidationError ( agg , RESOURCE . aggregateIllegalInClause ( clause ) ) ; } }
Validates the GROUP BY clause of a SELECT statement . This method is called even if no GROUP BY clause is present .
15,594
private void handleScalarSubQuery ( SqlSelect parentSelect , SqlSelect selectItem , List < SqlNode > expandedSelectItems , Set < String > aliasList , List < Map . Entry < String , RelDataType > > fieldList ) { if ( 1 != selectItem . getSelectList ( ) . size ( ) ) { throw newValidationError ( selectItem , RESOURCE . onlyScalarSubQueryAllowed ( ) ) ; } expandedSelectItems . add ( selectItem ) ; final String alias = deriveAlias ( selectItem , aliasList . size ( ) ) ; aliasList . add ( alias ) ; final SelectScope scope = ( SelectScope ) getWhereScope ( parentSelect ) ; final RelDataType type = deriveType ( scope , selectItem ) ; setValidatedNodeType ( selectItem , type ) ; assert type instanceof RelRecordType ; RelRecordType rec = ( RelRecordType ) type ; RelDataType nodeType = rec . getFieldList ( ) . get ( 0 ) . getType ( ) ; nodeType = typeFactory . createTypeWithNullability ( nodeType , true ) ; fieldList . add ( Pair . of ( alias , nodeType ) ) ; }
Processes SubQuery found in Select list . Checks that is actually Scalar sub - query and makes proper entries in each of the 3 lists used to create the final rowType entry .
15,595
protected RelDataType createTargetRowType ( SqlValidatorTable table , SqlNodeList targetColumnList , boolean append ) { RelDataType baseRowType = table . getRowType ( ) ; if ( targetColumnList == null ) { return baseRowType ; } List < RelDataTypeField > targetFields = baseRowType . getFieldList ( ) ; final List < Map . Entry < String , RelDataType > > fields = new ArrayList < > ( ) ; if ( append ) { for ( RelDataTypeField targetField : targetFields ) { fields . add ( Pair . of ( SqlUtil . deriveAliasFromOrdinal ( fields . size ( ) ) , targetField . getType ( ) ) ) ; } } final Set < Integer > assignedFields = new HashSet < > ( ) ; final RelOptTable relOptTable = table instanceof RelOptTable ? ( ( RelOptTable ) table ) : null ; for ( SqlNode node : targetColumnList ) { SqlIdentifier id = ( SqlIdentifier ) node ; RelDataTypeField targetField = SqlValidatorUtil . getTargetField ( baseRowType , typeFactory , id , catalogReader , relOptTable ) ; if ( targetField == null ) { throw newValidationError ( id , RESOURCE . unknownTargetColumn ( id . toString ( ) ) ) ; } if ( ! assignedFields . add ( targetField . getIndex ( ) ) ) { throw newValidationError ( id , RESOURCE . duplicateTargetColumn ( targetField . getName ( ) ) ) ; } fields . add ( targetField ) ; } return typeFactory . createStructType ( fields ) ; }
Derives a row - type for INSERT and UPDATE operations .
15,596
private void checkConstraint ( SqlValidatorTable validatorTable , SqlNode source , RelDataType targetRowType ) { final ModifiableViewTable modifiableViewTable = validatorTable . unwrap ( ModifiableViewTable . class ) ; if ( modifiableViewTable != null && source instanceof SqlCall ) { final Table table = modifiableViewTable . unwrap ( Table . class ) ; final RelDataType tableRowType = table . getRowType ( typeFactory ) ; final List < RelDataTypeField > tableFields = tableRowType . getFieldList ( ) ; final Map < Integer , RelDataTypeField > tableIndexToTargetField = SqlValidatorUtil . getIndexToFieldMap ( tableFields , targetRowType ) ; final Map < Integer , RexNode > projectMap = RelOptUtil . getColumnConstraints ( modifiableViewTable , targetRowType , typeFactory ) ; final ImmutableBitSet targetColumns = ImmutableBitSet . of ( tableIndexToTargetField . keySet ( ) ) ; final ImmutableBitSet constrainedColumns = ImmutableBitSet . of ( projectMap . keySet ( ) ) ; final ImmutableBitSet constrainedTargetColumns = targetColumns . intersect ( constrainedColumns ) ; final List < SqlNode > values = ( ( SqlCall ) source ) . getOperandList ( ) ; for ( final int colIndex : constrainedTargetColumns . asList ( ) ) { final String colName = tableFields . get ( colIndex ) . getName ( ) ; final RelDataTypeField targetField = tableIndexToTargetField . get ( colIndex ) ; for ( SqlNode row : values ) { final SqlCall call = ( SqlCall ) row ; final SqlNode sourceValue = call . operand ( targetField . getIndex ( ) ) ; final ValidationError validationError = new ValidationError ( sourceValue , RESOURCE . viewConstraintNotSatisfied ( colName , Util . last ( validatorTable . getQualifiedName ( ) ) ) ) ; RelOptUtil . validateValueAgainstConstraint ( sourceValue , projectMap . get ( colIndex ) , validationError ) ; } } } }
Validates insert values against the constraint of a modifiable view .
15,597
private void checkConstraint ( SqlValidatorTable validatorTable , SqlUpdate update , RelDataType targetRowType ) { final ModifiableViewTable modifiableViewTable = validatorTable . unwrap ( ModifiableViewTable . class ) ; if ( modifiableViewTable != null ) { final Table table = modifiableViewTable . unwrap ( Table . class ) ; final RelDataType tableRowType = table . getRowType ( typeFactory ) ; final Map < Integer , RexNode > projectMap = RelOptUtil . getColumnConstraints ( modifiableViewTable , targetRowType , typeFactory ) ; final Map < String , Integer > nameToIndex = SqlValidatorUtil . mapNameToIndex ( tableRowType . getFieldList ( ) ) ; final List < SqlNode > targets = update . getTargetColumnList ( ) . getList ( ) ; final List < SqlNode > sources = update . getSourceExpressionList ( ) . getList ( ) ; for ( final Pair < SqlNode , SqlNode > column : Pair . zip ( targets , sources ) ) { final String columnName = ( ( SqlIdentifier ) column . left ) . getSimple ( ) ; final Integer columnIndex = nameToIndex . get ( columnName ) ; if ( projectMap . containsKey ( columnIndex ) ) { final RexNode columnConstraint = projectMap . get ( columnIndex ) ; final ValidationError validationError = new ValidationError ( column . right , RESOURCE . viewConstraintNotSatisfied ( columnName , Util . last ( validatorTable . getQualifiedName ( ) ) ) ) ; RelOptUtil . validateValueAgainstConstraint ( column . right , columnConstraint , validationError ) ; } } } }
Validates updates against the constraint of a modifiable view .
15,598
private SqlNode getNthExpr ( SqlNode query , int ordinal , int sourceCount ) { if ( query instanceof SqlInsert ) { SqlInsert insert = ( SqlInsert ) query ; if ( insert . getTargetColumnList ( ) != null ) { return insert . getTargetColumnList ( ) . get ( ordinal ) ; } else { return getNthExpr ( insert . getSource ( ) , ordinal , sourceCount ) ; } } else if ( query instanceof SqlUpdate ) { SqlUpdate update = ( SqlUpdate ) query ; if ( update . getTargetColumnList ( ) != null ) { return update . getTargetColumnList ( ) . get ( ordinal ) ; } else if ( update . getSourceExpressionList ( ) != null ) { return update . getSourceExpressionList ( ) . get ( ordinal ) ; } else { return getNthExpr ( update . getSourceSelect ( ) , ordinal , sourceCount ) ; } } else if ( query instanceof SqlSelect ) { SqlSelect select = ( SqlSelect ) query ; if ( select . getSelectList ( ) . size ( ) == sourceCount ) { return select . getSelectList ( ) . get ( ordinal ) ; } else { return query ; } } else { return query ; } }
Locates the n th expression in an INSERT or UPDATE query .
15,599
private void validateAccess ( SqlNode node , SqlValidatorTable table , SqlAccessEnum requiredAccess ) { if ( table != null ) { SqlAccessType access = table . getAllowedAccess ( ) ; if ( ! access . allowsAccess ( requiredAccess ) ) { throw newValidationError ( node , RESOURCE . accessNotAllowed ( requiredAccess . name ( ) , table . getQualifiedName ( ) . toString ( ) ) ) ; } } }
Validates access to a table .