idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
20,200
public int indexOf ( Field field ) { requireNonNull ( field , "field cannot be null" ) ; Integer index = fieldIndexes . get ( field ) ; checkArgument ( index != null , "Field %s not found" , field ) ; return index ; }
Gets the index of the specified field .
20,201
public Field getFieldByIndex ( int fieldIndex ) { checkElementIndex ( fieldIndex , allFields . size ( ) , "fieldIndex" ) ; return allFields . get ( fieldIndex ) ; }
Gets the field at the specified index .
20,202
public List < Field > resolveFields ( QualifiedName name ) { return allFields . stream ( ) . filter ( input -> input . canResolve ( name ) ) . collect ( toImmutableList ( ) ) ; }
Gets the index of all columns matching the specified name
20,203
public RelationType joinWith ( RelationType other ) { List < Field > fields = ImmutableList . < Field > builder ( ) . addAll ( this . allFields ) . addAll ( other . allFields ) . build ( ) ; return new RelationType ( fields ) ; }
Creates a new tuple descriptor containing all fields from this tuple descriptor and all fields from the specified tuple descriptor .
20,204
public RelationType withAlias ( String relationAlias , List < String > columnAliases ) { if ( columnAliases != null ) { checkArgument ( columnAliases . size ( ) == visibleFields . size ( ) , "Column alias list has %s entries but '%s' has %s columns available" , columnAliases . size ( ) , relationAlias , visibleFields . size ( ) ) ; } ImmutableList . Builder < Field > fieldsBuilder = ImmutableList . builder ( ) ; for ( int i = 0 ; i < allFields . size ( ) ; i ++ ) { Field field = allFields . get ( i ) ; Optional < String > columnAlias = field . getName ( ) ; if ( columnAliases == null ) { fieldsBuilder . add ( Field . newQualified ( QualifiedName . of ( relationAlias ) , columnAlias , field . getType ( ) , field . isHidden ( ) , field . getOriginTable ( ) , field . getOriginColumnName ( ) , field . isAliased ( ) ) ) ; } else if ( ! field . isHidden ( ) ) { columnAlias = Optional . of ( columnAliases . get ( i ) ) ; fieldsBuilder . add ( Field . newQualified ( QualifiedName . of ( relationAlias ) , columnAlias , field . getType ( ) , false , field . getOriginTable ( ) , field . getOriginColumnName ( ) , field . isAliased ( ) ) ) ; } } return new RelationType ( fieldsBuilder . build ( ) ) ; }
Creates a new tuple descriptor with the relation and optionally the columns aliased .
20,205
public static PlanNode replaceChildren ( PlanNode node , List < PlanNode > children ) { for ( int i = 0 ; i < node . getSources ( ) . size ( ) ; i ++ ) { if ( children . get ( i ) != node . getSources ( ) . get ( i ) ) { return node . replaceChildren ( children ) ; } } return node ; }
Return an identical copy of the given node with its children replaced
20,206
private static void pushStackType ( Scope scope , BytecodeBlock block , Type sqlType , BytecodeBlock getBlockBytecode , Class < ? > parameter , CallSiteBinder callSiteBinder ) { Variable position = scope . getVariable ( "position" ) ; if ( parameter == long . class ) { block . comment ( "%s.getLong(block, position)" , sqlType . getTypeSignature ( ) ) . append ( constantType ( callSiteBinder , sqlType ) ) . append ( getBlockBytecode ) . append ( position ) . invokeInterface ( Type . class , "getLong" , long . class , Block . class , int . class ) ; } else if ( parameter == double . class ) { block . comment ( "%s.getDouble(block, position)" , sqlType . getTypeSignature ( ) ) . append ( constantType ( callSiteBinder , sqlType ) ) . append ( getBlockBytecode ) . append ( position ) . invokeInterface ( Type . class , "getDouble" , double . class , Block . class , int . class ) ; } else if ( parameter == boolean . class ) { block . comment ( "%s.getBoolean(block, position)" , sqlType . getTypeSignature ( ) ) . append ( constantType ( callSiteBinder , sqlType ) ) . append ( getBlockBytecode ) . append ( position ) . invokeInterface ( Type . class , "getBoolean" , boolean . class , Block . class , int . class ) ; } else if ( parameter == Slice . class ) { block . comment ( "%s.getSlice(block, position)" , sqlType . getTypeSignature ( ) ) . append ( constantType ( callSiteBinder , sqlType ) ) . append ( getBlockBytecode ) . append ( position ) . invokeInterface ( Type . class , "getSlice" , Slice . class , Block . class , int . class ) ; } else { block . comment ( "%s.getObject(block, position)" , sqlType . getTypeSignature ( ) ) . append ( constantType ( callSiteBinder , sqlType ) ) . append ( getBlockBytecode ) . append ( position ) . invokeInterface ( Type . class , "getObject" , Object . class , Block . class , int . class ) ; } }
Assumes that there is a variable named position in the block which is the current index
20,207
private static BytecodeBlock generateBlockNonNullPositionForLoop ( Scope scope , Variable positionVariable , BytecodeBlock loopBody ) { Variable rowsVariable = scope . declareVariable ( int . class , "rows" ) ; Variable blockVariable = scope . getVariable ( "block" ) ; BytecodeBlock block = new BytecodeBlock ( ) . append ( blockVariable ) . invokeInterface ( Block . class , "getPositionCount" , int . class ) . putVariable ( rowsVariable ) ; IfStatement ifStatement = new IfStatement ( "if(!block.isNull(position))" ) . condition ( new BytecodeBlock ( ) . append ( blockVariable ) . append ( positionVariable ) . invokeInterface ( Block . class , "isNull" , boolean . class , int . class ) ) . ifFalse ( loopBody ) ; block . append ( new ForLoop ( ) . initialize ( positionVariable . set ( constantInt ( 0 ) ) ) . condition ( new BytecodeBlock ( ) . append ( positionVariable ) . append ( rowsVariable ) . invokeStatic ( CompilerOperations . class , "lessThan" , boolean . class , int . class , int . class ) ) . update ( new BytecodeBlock ( ) . incrementVariable ( positionVariable , ( byte ) 1 ) ) . body ( ifStatement ) ) ; return block ; }
loopBody will only be executed for non - null positions in the Block
20,208
public ObjectInstance registerMBean ( Object object , ObjectName name ) throws MBeanRegistrationException , NotCompliantMBeanException { while ( true ) { try { return mbeanServer . registerMBean ( object , name ) ; } catch ( InstanceAlreadyExistsException ignored ) { } try { ObjectInstance objectInstance = mbeanServer . getObjectInstance ( name ) ; log . debug ( "%s already bound to %s" , name , objectInstance ) ; return objectInstance ; } catch ( InstanceNotFoundException ignored ) { } } }
Delegates to the wrapped mbean server but if a mbean is already registered with the specified name the existing instance is returned .
20,209
public static void set ( final int [ ] [ ] array , final long index , int value ) { array [ segment ( index ) ] [ displacement ( index ) ] = value ; }
Sets the element of the given big array of specified index .
20,210
public static void swap ( final int [ ] [ ] array , final long first , final long second ) { final int t = array [ segment ( first ) ] [ displacement ( first ) ] ; array [ segment ( first ) ] [ displacement ( first ) ] = array [ segment ( second ) ] [ displacement ( second ) ] ; array [ segment ( second ) ] [ displacement ( second ) ] = t ; }
Swaps the element of the given big array of specified indices .
20,211
private void addStringStatistics ( long valueCount , StringStatistics value ) { requireNonNull ( value , "value is null" ) ; checkArgument ( valueCount > 0 , "valueCount is 0" ) ; checkArgument ( value . getMin ( ) != null || value . getMax ( ) != null , "min and max cannot both be null" ) ; if ( nonNullValueCount == 0 ) { checkState ( minimum == null && maximum == null ) ; minimum = value . getMin ( ) ; maximum = value . getMax ( ) ; } else { if ( minimum != null && ( value . getMin ( ) == null || minimum . compareTo ( value . getMin ( ) ) > 0 ) ) { minimum = value . getMin ( ) ; } if ( maximum != null && ( value . getMax ( ) == null || maximum . compareTo ( value . getMax ( ) ) < 0 ) ) { maximum = value . getMax ( ) ; } } nonNullValueCount += valueCount ; sum = addExact ( sum , value . getSum ( ) ) ; }
This method can only be used in merging stats . It assumes min or max could be nulls .
20,212
@ Description ( "Decodes json to an exception and throws it" ) @ ScalarFunction ( value = "fail" , hidden = true ) @ SqlType ( "unknown" ) public static boolean failWithException ( @ SqlType ( StandardTypes . JSON ) Slice failureInfoSlice ) { FailureInfo failureInfo = JSON_CODEC . fromJson ( failureInfoSlice . getBytes ( ) ) ; throw new PrestoException ( StandardErrorCode . GENERIC_USER_ERROR , failureInfo . toException ( ) ) ; }
We shouldn t be using UNKNOWN as an explicit type . This will be fixed when we fix type inference
20,213
private static long bytesToLongBE ( InputStream input , int n ) throws IOException { long out = 0 ; long val ; while ( n > 0 ) { n -- ; val = input . read ( ) ; out |= ( val << ( n * 8 ) ) ; } return out ; }
Read n bytes in big endian order and convert to long .
20,214
private static Integer decimalDigits ( Type type ) { if ( type instanceof DecimalType ) { return ( ( DecimalType ) type ) . getScale ( ) ; } return null ; }
DECIMAL_DIGITS is the number of fractional digits
20,215
private Document getTableMetadata ( SchemaTableName schemaTableName ) throws TableNotFoundException { String schemaName = schemaTableName . getSchemaName ( ) ; String tableName = schemaTableName . getTableName ( ) ; MongoDatabase db = client . getDatabase ( schemaName ) ; MongoCollection < Document > schema = db . getCollection ( schemaCollection ) ; Document doc = schema . find ( new Document ( TABLE_NAME_KEY , tableName ) ) . first ( ) ; if ( doc == null ) { if ( ! collectionExists ( db , tableName ) ) { throw new TableNotFoundException ( schemaTableName ) ; } else { Document metadata = new Document ( TABLE_NAME_KEY , tableName ) ; metadata . append ( FIELDS_KEY , guessTableFields ( schemaTableName ) ) ; schema . createIndex ( new Document ( TABLE_NAME_KEY , 1 ) , new IndexOptions ( ) . unique ( true ) ) ; schema . insertOne ( metadata ) ; return metadata ; } } return doc ; }
Internal Schema management
20,216
private static boolean isDistinct ( AggregationNode node ) { return node . getAggregations ( ) . isEmpty ( ) && node . getOutputSymbols ( ) . size ( ) == node . getGroupingKeys ( ) . size ( ) && node . getOutputSymbols ( ) . containsAll ( node . getGroupingKeys ( ) ) ; }
Whether this node corresponds to a DISTINCT operation in SQL
20,217
public static Block fromElementBlock ( int positionCount , Optional < boolean [ ] > valueIsNull , int [ ] arrayOffset , Block values ) { validateConstructorArguments ( 0 , positionCount , valueIsNull . orElse ( null ) , arrayOffset , values ) ; for ( int i = 0 ; i < positionCount ; i ++ ) { int offset = arrayOffset [ i ] ; int length = arrayOffset [ i + 1 ] - offset ; if ( length < 0 ) { throw new IllegalArgumentException ( format ( "Offset is not monotonically ascending. offsets[%s]=%s, offsets[%s]=%s" , i , arrayOffset [ i ] , i + 1 , arrayOffset [ i + 1 ] ) ) ; } if ( valueIsNull . isPresent ( ) && valueIsNull . get ( ) [ i ] && length != 0 ) { throw new IllegalArgumentException ( "A null array must have zero entries" ) ; } } return new ArrayBlock ( 0 , positionCount , valueIsNull . orElse ( null ) , arrayOffset , values ) ; }
Create an array block directly from columnar nulls values and offsets into the values . A null array must have no entries .
20,218
public static ColumnIO lookupColumnByName ( GroupColumnIO groupColumnIO , String columnName ) { ColumnIO columnIO = groupColumnIO . getChild ( columnName ) ; if ( columnIO != null ) { return columnIO ; } for ( int i = 0 ; i < groupColumnIO . getChildrenCount ( ) ; i ++ ) { if ( groupColumnIO . getChild ( i ) . getName ( ) . equalsIgnoreCase ( columnName ) ) { return groupColumnIO . getChild ( i ) ; } } return null ; }
Parquet column names are case - sensitive unlike Hive which converts all column names to lowercase . Therefore when we look up columns we first check for exact match and if that fails we look for a case - insensitive match .
20,219
private static int mergeMaps ( Map < String , Integer > map , Map < String , Integer > other ) { int deltaSize = 0 ; for ( Map . Entry < String , Integer > entry : other . entrySet ( ) ) { if ( ! map . containsKey ( entry . getKey ( ) ) ) { deltaSize += entry . getKey ( ) . getBytes ( ) . length + SIZE_OF_INT ; } map . put ( entry . getKey ( ) , map . getOrDefault ( entry . getKey ( ) , 0 ) + other . getOrDefault ( entry . getKey ( ) , 0 ) ) ; } return deltaSize ; }
Returns the estimated memory increase in map
20,220
@ GuardedBy ( "this" ) private TableSource getTableSource ( String databaseName , String tableName ) { checkHoldsLock ( ) ; checkReadable ( ) ; Action < TableAndMore > tableAction = tableActions . get ( new SchemaTableName ( databaseName , tableName ) ) ; if ( tableAction == null ) { return TableSource . PRE_EXISTING_TABLE ; } switch ( tableAction . getType ( ) ) { case ADD : return TableSource . CREATED_IN_THIS_TRANSACTION ; case ALTER : throw new IllegalStateException ( "Tables are never altered in the current implementation" ) ; case DROP : throw new TableNotFoundException ( new SchemaTableName ( databaseName , tableName ) ) ; case INSERT_EXISTING : return TableSource . PRE_EXISTING_TABLE ; default : throw new IllegalStateException ( "Unknown action type" ) ; } }
This method can only be called when the table is known to exist
20,221
public Set < OrganizationSet > createCompactionSets ( Table tableInfo , Collection < ShardIndexInfo > shards ) { Collection < Collection < ShardIndexInfo > > shardsByDaysBuckets = getShardsByDaysBuckets ( tableInfo , shards , temporalFunction ) ; ImmutableSet . Builder < OrganizationSet > compactionSets = ImmutableSet . builder ( ) ; for ( Collection < ShardIndexInfo > shardInfos : shardsByDaysBuckets ) { compactionSets . addAll ( buildCompactionSets ( tableInfo , ImmutableSet . copyOf ( shardInfos ) ) ) ; } return compactionSets . build ( ) ; }
All shards provided to this method will be considered for creating a compaction set .
20,222
public static Block fromFieldBlocks ( int positionCount , Optional < boolean [ ] > rowIsNull , Block [ ] fieldBlocks ) { int [ ] fieldBlockOffsets = new int [ positionCount + 1 ] ; for ( int position = 0 ; position < positionCount ; position ++ ) { fieldBlockOffsets [ position + 1 ] = fieldBlockOffsets [ position ] + ( rowIsNull . isPresent ( ) && rowIsNull . get ( ) [ position ] ? 0 : 1 ) ; } validateConstructorArguments ( 0 , positionCount , rowIsNull . orElse ( null ) , fieldBlockOffsets , fieldBlocks ) ; return new RowBlock ( 0 , positionCount , rowIsNull . orElse ( null ) , fieldBlockOffsets , fieldBlocks ) ; }
Create a row block directly from columnar nulls and field blocks .
20,223
private static boolean isValidHeaderMagic ( OrcDataSource source ) throws IOException { byte [ ] headerMagic = new byte [ MAGIC . length ( ) ] ; source . readFully ( 0 , headerMagic ) ; return MAGIC . equals ( Slices . wrappedBuffer ( headerMagic ) ) ; }
Does the file start with the ORC magic bytes?
20,224
public List < PlanFragment > getAllFragments ( ) { ImmutableList . Builder < PlanFragment > fragments = ImmutableList . builder ( ) ; fragments . add ( getFragment ( ) ) ; for ( SubPlan child : getChildren ( ) ) { fragments . addAll ( child . getAllFragments ( ) ) ; } return fragments . build ( ) ; }
Flattens the subplan and returns all PlanFragments in the tree
20,225
public List < RemoteTask > getAllTasks ( ) { return tasks . values ( ) . stream ( ) . flatMap ( Set :: stream ) . collect ( toImmutableList ( ) ) ; }
this is used for query info building which should be independent of scheduling work
20,226
public Set < Symbol > getCommonGroupingColumns ( ) { Set < Symbol > intersection = new HashSet < > ( groupingSets . get ( 0 ) ) ; for ( int i = 1 ; i < groupingSets . size ( ) ; i ++ ) { intersection . retainAll ( groupingSets . get ( i ) ) ; } return ImmutableSet . copyOf ( intersection ) ; }
returns the common grouping columns in terms of output symbols
20,227
private static int deepInstanceSize ( Class < ? > clazz ) { if ( clazz . isArray ( ) ) { throw new IllegalArgumentException ( format ( "Cannot determine size of %s because it contains an array" , clazz . getSimpleName ( ) ) ) ; } if ( clazz . isInterface ( ) ) { throw new IllegalArgumentException ( format ( "%s is an interface" , clazz . getSimpleName ( ) ) ) ; } if ( Modifier . isAbstract ( clazz . getModifiers ( ) ) ) { throw new IllegalArgumentException ( format ( "%s is abstract" , clazz . getSimpleName ( ) ) ) ; } if ( ! clazz . getSuperclass ( ) . equals ( Object . class ) ) { throw new IllegalArgumentException ( format ( "Cannot determine size of a subclass. %s extends from %s" , clazz . getSimpleName ( ) , clazz . getSuperclass ( ) . getSimpleName ( ) ) ) ; } int size = ClassLayout . parseClass ( clazz ) . instanceSize ( ) ; for ( Field field : clazz . getDeclaredFields ( ) ) { if ( ! field . getType ( ) . isPrimitive ( ) ) { size += deepInstanceSize ( field . getType ( ) ) ; } } return size ; }
Computes the size of an instance of this class assuming that all reference fields are non - null
20,228
private boolean loadPagesIfNecessary ( PagesSupplier pagesSupplier , DataSize maxSize ) { checkState ( ! Thread . holdsLock ( this ) , "Can not load pages while holding a lock on this" ) ; boolean dataAddedOrNoMorePages ; List < SerializedPageReference > pageReferences ; synchronized ( this ) { if ( noMorePages ) { return false ; } if ( ! pages . isEmpty ( ) ) { return false ; } pageReferences = pagesSupplier . getPages ( maxSize ) ; addPages ( pageReferences ) ; if ( ! pagesSupplier . mayHaveMorePages ( ) ) { noMorePages = true ; } dataAddedOrNoMorePages = ! pageReferences . isEmpty ( ) || noMorePages ; } pageReferences . forEach ( SerializedPageReference :: dereferencePage ) ; return dataAddedOrNoMorePages ; }
If there no data attempt to load some from the pages supplier .
20,229
public void acknowledgePages ( long sequenceId ) { checkArgument ( sequenceId >= 0 , "Invalid sequence id" ) ; List < SerializedPageReference > removedPages = new ArrayList < > ( ) ; synchronized ( this ) { if ( destroyed . get ( ) ) { return ; } long oldCurrentSequenceId = currentSequenceId . get ( ) ; if ( sequenceId < oldCurrentSequenceId ) { return ; } int pagesToRemove = toIntExact ( sequenceId - oldCurrentSequenceId ) ; checkArgument ( pagesToRemove <= pages . size ( ) , "Invalid sequence id" ) ; long bytesRemoved = 0 ; for ( int i = 0 ; i < pagesToRemove ; i ++ ) { SerializedPageReference removedPage = pages . removeFirst ( ) ; removedPages . add ( removedPage ) ; bytesRemoved += removedPage . getRetainedSizeInBytes ( ) ; } verify ( currentSequenceId . compareAndSet ( oldCurrentSequenceId , oldCurrentSequenceId + pagesToRemove ) ) ; verify ( bufferedBytes . addAndGet ( - bytesRemoved ) >= 0 ) ; } removedPages . forEach ( SerializedPageReference :: dereferencePage ) ; }
Drops pages up to the specified sequence id
20,230
public static long findFirstSyncPosition ( RcFileDataSource dataSource , long offset , long length , long syncFirst , long syncSecond ) throws IOException { requireNonNull ( dataSource , "dataSource is null" ) ; checkArgument ( offset >= 0 , "offset is negative" ) ; checkArgument ( length >= 1 , "length must be at least 1" ) ; checkArgument ( offset + length <= dataSource . getSize ( ) , "offset plus length is greater than data size" ) ; Slice sync = Slices . allocate ( SIZE_OF_INT + SIZE_OF_LONG + SIZE_OF_LONG ) ; sync . setInt ( 0 , 0xFFFF_FFFF ) ; sync . setLong ( SIZE_OF_INT , syncFirst ) ; sync . setLong ( SIZE_OF_INT + SIZE_OF_LONG , syncSecond ) ; byte [ ] buffer = new byte [ toIntExact ( min ( 1 << 22 , length + ( SYNC_SEQUENCE_LENGTH - 1 ) ) ) ] ; Slice bufferSlice = Slices . wrappedBuffer ( buffer ) ; for ( long position = 0 ; position < length ; position += bufferSlice . length ( ) - ( SYNC_SEQUENCE_LENGTH - 1 ) ) { int bufferSize = toIntExact ( min ( buffer . length , length + ( SYNC_SEQUENCE_LENGTH - 1 ) - position ) ) ; bufferSize = toIntExact ( min ( bufferSize , dataSource . getSize ( ) - offset - position ) ) ; dataSource . readFully ( offset + position , buffer , 0 , bufferSize ) ; int index = bufferSlice . indexOf ( sync ) ; if ( index >= 0 ) { if ( position + index < length ) { long startOfSyncSequence = offset + position + index ; return startOfSyncSequence ; } else { return - 1 ; } } } return - 1 ; }
Find the beginning of the first full sync sequence that starts within the specified range .
20,231
public int addAndGetPosition ( Type type , Block block , int position , long valueHash ) { if ( values . getPositionCount ( ) >= maxFill ) { rehash ( ) ; } int bucketId = getBucketId ( valueHash , mask ) ; int valuePointer ; int probeCount = 1 ; int originalBucketId = bucketId ; while ( true ) { checkState ( probeCount < bucketCount , "could not find match for value nor empty slot in %s buckets" , bucketCount ) ; valuePointer = buckets . get ( bucketId ) ; if ( valuePointer == EMPTY_BUCKET ) { valuePointer = values . getPositionCount ( ) ; valueHashes . set ( valuePointer , ( int ) valueHash ) ; type . appendTo ( block , position , values ) ; buckets . set ( bucketId , valuePointer ) ; return valuePointer ; } else if ( type . equalTo ( block , position , values , valuePointer ) ) { return valuePointer ; } else { int probe = nextProbe ( probeCount ) ; bucketId = nextBucketId ( originalBucketId , mask , probe ) ; probeCount ++ ; } } }
This will add an item if not already in the system . It returns a pointer that is unique for multiple instances of the value . If item present returns the pointer into the system
20,232
private List < OrcDataOutput > bufferFileFooter ( ) throws IOException { List < OrcDataOutput > outputData = new ArrayList < > ( ) ; Metadata metadata = new Metadata ( closedStripes . stream ( ) . map ( ClosedStripe :: getStatistics ) . collect ( toList ( ) ) ) ; Slice metadataSlice = metadataWriter . writeMetadata ( metadata ) ; outputData . add ( createDataOutput ( metadataSlice ) ) ; long numberOfRows = closedStripes . stream ( ) . mapToLong ( stripe -> stripe . getStripeInformation ( ) . getNumberOfRows ( ) ) . sum ( ) ; List < ColumnStatistics > fileStats = toFileStats ( closedStripes . stream ( ) . map ( ClosedStripe :: getStatistics ) . map ( StripeStatistics :: getColumnStatistics ) . collect ( toList ( ) ) ) ; recordValidation ( validation -> validation . setFileStatistics ( fileStats ) ) ; Map < String , Slice > userMetadata = this . userMetadata . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( Entry :: getKey , entry -> utf8Slice ( entry . getValue ( ) ) ) ) ; Footer footer = new Footer ( numberOfRows , rowGroupMaxRowCount , closedStripes . stream ( ) . map ( ClosedStripe :: getStripeInformation ) . collect ( toList ( ) ) , orcTypes , fileStats , userMetadata ) ; closedStripes . clear ( ) ; closedStripesRetainedBytes = 0 ; Slice footerSlice = metadataWriter . writeFooter ( footer ) ; outputData . add ( createDataOutput ( footerSlice ) ) ; recordValidation ( validation -> validation . setVersion ( metadataWriter . getOrcMetadataVersion ( ) ) ) ; Slice postscriptSlice = metadataWriter . writePostscript ( footerSlice . length ( ) , metadataSlice . length ( ) , compression , maxCompressionBufferSize ) ; outputData . add ( createDataOutput ( postscriptSlice ) ) ; outputData . add ( createDataOutput ( Slices . wrappedBuffer ( ( byte ) postscriptSlice . length ( ) ) ) ) ; return outputData ; }
Collect the data for for the file footer . This is not the actual data but instead are functions that know how to write the data .
20,233
private void iterateGroupNodes ( long groupdId , NodeReader nodeReader ) { int currentPointer = ( int ) headPointers . get ( groupdId ) ; checkArgument ( currentPointer != NULL , "valid group must have non-null head pointer" ) ; while ( currentPointer != NULL ) { checkState ( currentPointer < nextNodePointer , "error, corrupt pointer; max valid %s, found %s" , nextNodePointer , currentPointer ) ; nodeReader . read ( currentPointer ) ; currentPointer = nextPointers . get ( currentPointer ) ; } }
used to iterate over all non - null nodes in the data structure
20,234
private void resizeNodeArrays ( int newBucketCount ) { counts . ensureCapacity ( newBucketCount ) ; valuePositions . ensureCapacity ( newBucketCount ) ; nextPointers . ensureCapacity ( newBucketCount ) ; valueAndGroupHashes . ensureCapacity ( newBucketCount ) ; groupIds . ensureCapacity ( newBucketCount ) ; }
parallel arrays with data for
20,235
public static ParametricAggregation parseFunctionDefinitionWithTypesConstraint ( Class < ? > clazz , TypeSignature returnType , List < TypeSignature > argumentTypes ) { requireNonNull ( returnType , "returnType is null" ) ; requireNonNull ( argumentTypes , "argumentTypes is null" ) ; for ( ParametricAggregation aggregation : parseFunctionDefinitions ( clazz ) ) { if ( aggregation . getSignature ( ) . getReturnType ( ) . equals ( returnType ) && aggregation . getSignature ( ) . getArgumentTypes ( ) . equals ( argumentTypes ) ) { return aggregation ; } } throw new IllegalArgumentException ( String . format ( "No method with return type %s and arguments %s" , returnType , argumentTypes ) ) ; }
General purpose function matching is done through FunctionRegistry .
20,236
private void scheduleDriversForTaskLifeCycle ( ) { List < DriverSplitRunner > runners = new ArrayList < > ( ) ; for ( DriverSplitRunnerFactory driverRunnerFactory : driverRunnerFactoriesWithTaskLifeCycle ) { for ( int i = 0 ; i < driverRunnerFactory . getDriverInstances ( ) . orElse ( 1 ) ; i ++ ) { runners . add ( driverRunnerFactory . createDriverRunner ( null , Lifespan . taskWide ( ) ) ) ; } } enqueueDriverSplitRunner ( true , runners ) ; for ( DriverSplitRunnerFactory driverRunnerFactory : driverRunnerFactoriesWithTaskLifeCycle ) { driverRunnerFactory . noMoreDriverRunner ( ImmutableList . of ( Lifespan . taskWide ( ) ) ) ; verify ( driverRunnerFactory . isNoMoreDriverRunner ( ) ) ; } }
They also have a few differences making it more convenient to keep the two methods separate .
20,237
public Object optimize ( SymbolResolver inputs ) { checkState ( optimize , "optimize(SymbolResolver) not allowed for interpreter" ) ; Object result = expression . accept ( visitor , inputs ) ; if ( ! ( result instanceof RowExpression ) ) { return result ; } return new ExpressionOptimizer ( metadata . getFunctionManager ( ) , session ) . optimize ( ( RowExpression ) result ) ; }
For test only ; convenient to replace symbol with constants . Production code should not replace any symbols ; use the interface above
20,238
private static long addUnsignedReturnOverflow ( Slice left , Slice right , Slice result , boolean resultNegative ) { int l0 = getInt ( left , 0 ) ; int l1 = getInt ( left , 1 ) ; int l2 = getInt ( left , 2 ) ; int l3 = getInt ( left , 3 ) ; int r0 = getInt ( right , 0 ) ; int r1 = getInt ( right , 1 ) ; int r2 = getInt ( right , 2 ) ; int r3 = getInt ( right , 3 ) ; long intermediateResult ; intermediateResult = ( l0 & LONG_MASK ) + ( r0 & LONG_MASK ) ; int z0 = ( int ) intermediateResult ; intermediateResult = ( l1 & LONG_MASK ) + ( r1 & LONG_MASK ) + ( intermediateResult >>> 32 ) ; int z1 = ( int ) intermediateResult ; intermediateResult = ( l2 & LONG_MASK ) + ( r2 & LONG_MASK ) + ( intermediateResult >>> 32 ) ; int z2 = ( int ) intermediateResult ; intermediateResult = ( l3 & LONG_MASK ) + ( r3 & LONG_MASK ) + ( intermediateResult >>> 32 ) ; int z3 = ( int ) intermediateResult & ( ~ SIGN_INT_MASK ) ; pack ( result , z0 , z1 , z2 , z3 , resultNegative ) ; return intermediateResult >> 31 ; }
This method ignores signs of the left and right . Returns overflow value .
20,239
public static String getBucketName ( URI uri ) { if ( uri . getHost ( ) != null ) { return uri . getHost ( ) ; } if ( uri . getUserInfo ( ) == null ) { return uri . getAuthority ( ) ; } throw new IllegalArgumentException ( "Unable to determine S3 bucket from URI." ) ; }
Helper function used to work around the fact that if you use an S3 bucket with an _ that java . net . URI behaves differently and sets the host value to null whereas S3 buckets without _ have a properly set host field . _ is only allowed in S3 bucket names in us - east - 1 .
20,240
public LocalMemoryContext newLocalSystemMemoryContext ( String allocationTag ) { return new InternalLocalMemoryContext ( operatorMemoryContext . newSystemMemoryContext ( allocationTag ) , memoryFuture , this :: updatePeakMemoryReservations , true ) ; }
caller should close this context as it s a new context
20,241
private void updatePeakMemoryReservations ( ) { long userMemory = operatorMemoryContext . getUserMemory ( ) ; long systemMemory = operatorMemoryContext . getSystemMemory ( ) ; long totalMemory = userMemory + systemMemory ; peakUserMemoryReservation . accumulateAndGet ( userMemory , Math :: max ) ; peakSystemMemoryReservation . accumulateAndGet ( systemMemory , Math :: max ) ; peakTotalMemoryReservation . accumulateAndGet ( totalMemory , Math :: max ) ; }
listen to all memory allocations and update the peak memory reservations accordingly
20,242
public long requestMemoryRevoking ( ) { long revokedMemory = 0L ; Runnable listener = null ; synchronized ( this ) { if ( ! isMemoryRevokingRequested ( ) && operatorMemoryContext . getRevocableMemory ( ) > 0 ) { memoryRevokingRequested = true ; revokedMemory = operatorMemoryContext . getRevocableMemory ( ) ; listener = memoryRevocationRequestListener ; } } if ( listener != null ) { runListener ( listener ) ; } return revokedMemory ; }
Returns how much revocable memory will be revoked by the operator
20,243
public long getColumnCardinality ( String schema , String table , Authorizations auths , String family , String qualifier , Collection < Range > colValues ) throws ExecutionException { LOG . debug ( "Getting cardinality for %s:%s" , family , qualifier ) ; Collection < CacheKey > exactRanges = colValues . stream ( ) . filter ( ColumnCardinalityCache :: isExact ) . map ( range -> new CacheKey ( schema , table , family , qualifier , range , auths ) ) . collect ( Collectors . toList ( ) ) ; LOG . debug ( "Column values contain %s exact ranges of %s" , exactRanges . size ( ) , colValues . size ( ) ) ; long sum = cache . getAll ( exactRanges ) . values ( ) . stream ( ) . mapToLong ( Long :: longValue ) . sum ( ) ; if ( exactRanges . size ( ) != colValues . size ( ) ) { for ( Range range : colValues ) { if ( ! isExact ( range ) ) { sum += cache . get ( new CacheKey ( schema , table , family , qualifier , range , auths ) ) ; } } } return sum ; }
Gets the column cardinality for all of the given range values . May reach out to the metrics table in Accumulo to retrieve new cache elements .
20,244
protected OrcDataSink createOrcDataSink ( ConnectorSession session , FileSystem fileSystem , Path path ) throws IOException { return new OutputStreamOrcDataSink ( fileSystem . create ( path ) ) ; }
Allow subclass to replace data sink implementation .
20,245
private void failTask ( Throwable cause ) { TaskStatus taskStatus = getTaskStatus ( ) ; if ( ! taskStatus . getState ( ) . isDone ( ) ) { log . debug ( cause , "Remote task %s failed with %s" , taskStatus . getSelf ( ) , cause ) ; } abort ( failWith ( getTaskStatus ( ) , FAILED , ImmutableList . of ( toFailure ( cause ) ) ) ) ; }
Move the task directly to the failed state if there was a failure in this task
20,246
public static Block serializeObject ( Type type , BlockBuilder builder , Object object , ObjectInspector inspector , boolean filterNullMapKeys ) { switch ( inspector . getCategory ( ) ) { case PRIMITIVE : serializePrimitive ( type , builder , object , ( PrimitiveObjectInspector ) inspector ) ; return null ; case LIST : return serializeList ( type , builder , object , ( ListObjectInspector ) inspector ) ; case MAP : return serializeMap ( type , builder , object , ( MapObjectInspector ) inspector , filterNullMapKeys ) ; case STRUCT : return serializeStruct ( type , builder , object , ( StructObjectInspector ) inspector ) ; } throw new RuntimeException ( "Unknown object inspector category: " + inspector . getCategory ( ) ) ; }
that contain null map keys . For production null map keys are not allowed .
20,247
@ SuppressWarnings ( "unchecked" ) public T get ( long index ) { return ( T ) array [ segment ( index ) ] [ offset ( index ) ] ; }
Returns the element of this big array at specified index .
20,248
private static boolean inRange ( Text text , Collection < Range > ranges ) { Key kCq = new Key ( text ) ; return ranges . stream ( ) . anyMatch ( r -> ! r . beforeStartKey ( kCq ) && ! r . afterEndKey ( kCq ) ) ; }
Gets a Boolean value indicating if the given value is in one of the Ranges in the given collection
20,249
public DiskRange span ( DiskRange otherDiskRange ) { requireNonNull ( otherDiskRange , "otherDiskRange is null" ) ; long start = Math . min ( this . offset , otherDiskRange . getOffset ( ) ) ; long end = Math . max ( getEnd ( ) , otherDiskRange . getEnd ( ) ) ; return new DiskRange ( start , toIntExact ( end - start ) ) ; }
Returns the minimal DiskRange that encloses both this DiskRange and otherDiskRange . If there was a gap between the ranges the new range will cover that gap .
20,250
private Optional < ThriftTableMetadata > getTableMetadataInternal ( SchemaTableName schemaTableName ) { requireNonNull ( schemaTableName , "schemaTableName is null" ) ; PrestoThriftNullableTableMetadata thriftTableMetadata = getTableMetadata ( schemaTableName ) ; if ( thriftTableMetadata . getTableMetadata ( ) == null ) { return Optional . empty ( ) ; } ThriftTableMetadata tableMetadata = new ThriftTableMetadata ( thriftTableMetadata . getTableMetadata ( ) , typeManager ) ; if ( ! Objects . equals ( schemaTableName , tableMetadata . getSchemaTableName ( ) ) ) { throw new PrestoException ( THRIFT_SERVICE_INVALID_RESPONSE , "Requested and actual table names are different" ) ; } return Optional . of ( tableMetadata ) ; }
this method makes actual thrift request and should be called only by cache load method
20,251
@ ThriftField ( value = 1 , requiredness = OPTIONAL ) public Map < String , PrestoThriftDomain > getDomains ( ) { return domains ; }
Return a map of column names to constraints .
20,252
public Map < String , Object > decode ( Map < String , String > tableParameters ) { return ImmutableMap . of ( ) ; }
Decode Hive table parameters into additional Presto table properties .
20,253
public Map < String , String > encode ( Map < String , Object > tableProperties ) { return ImmutableMap . of ( ) ; }
Encode additional Presto table properties into Hive table parameters .
20,254
private void updateEligibility ( ) { checkState ( Thread . holdsLock ( root ) , "Must hold lock to update eligibility" ) ; synchronized ( root ) { if ( ! parent . isPresent ( ) ) { return ; } if ( isEligibleToStartNext ( ) ) { parent . get ( ) . addOrUpdateSubGroup ( this ) ; } else { parent . get ( ) . eligibleSubGroups . remove ( this ) ; lastStartMillis = 0 ; } parent . get ( ) . updateEligibility ( ) ; } }
This method must be called whenever the group s eligibility to run more queries may have changed .
20,255
protected void internalRefreshStats ( ) { checkState ( Thread . holdsLock ( root ) , "Must hold lock to refresh stats" ) ; synchronized ( root ) { if ( subGroups . isEmpty ( ) ) { cachedMemoryUsageBytes = 0 ; for ( ManagedQueryExecution query : runningQueries ) { cachedMemoryUsageBytes += query . getUserMemoryReservation ( ) . toBytes ( ) ; } } else { for ( Iterator < InternalResourceGroup > iterator = dirtySubGroups . iterator ( ) ; iterator . hasNext ( ) ; ) { InternalResourceGroup subGroup = iterator . next ( ) ; long oldMemoryUsageBytes = subGroup . cachedMemoryUsageBytes ; cachedMemoryUsageBytes -= oldMemoryUsageBytes ; subGroup . internalRefreshStats ( ) ; cachedMemoryUsageBytes += subGroup . cachedMemoryUsageBytes ; if ( ! subGroup . isDirty ( ) ) { iterator . remove ( ) ; } if ( oldMemoryUsageBytes != subGroup . cachedMemoryUsageBytes ) { subGroup . updateEligibility ( ) ; } } } } }
Memory usage stats are expensive to maintain so this method must be called periodically to update them
20,256
public static Slice currentTokenAsVarchar ( JsonParser parser ) throws IOException { switch ( parser . currentToken ( ) ) { case VALUE_NULL : return null ; case VALUE_STRING : case FIELD_NAME : return Slices . utf8Slice ( parser . getText ( ) ) ; case VALUE_NUMBER_FLOAT : return DoubleOperators . castToVarchar ( parser . getDoubleValue ( ) ) ; case VALUE_NUMBER_INT : return Slices . utf8Slice ( parser . getText ( ) ) ; case VALUE_TRUE : return BooleanOperators . castToVarchar ( true ) ; case VALUE_FALSE : return BooleanOperators . castToVarchar ( false ) ; default : throw new JsonCastException ( format ( "Unexpected token when cast to %s: %s" , StandardTypes . VARCHAR , parser . getText ( ) ) ) ; } }
utility classes and functions for cast from JSON
20,257
private static BigDecimal currentTokenAsJavaDecimal ( JsonParser parser , int precision , int scale ) throws IOException { BigDecimal result ; switch ( parser . getCurrentToken ( ) ) { case VALUE_NULL : return null ; case VALUE_STRING : case FIELD_NAME : result = new BigDecimal ( parser . getText ( ) ) ; result = result . setScale ( scale , HALF_UP ) ; break ; case VALUE_NUMBER_FLOAT : case VALUE_NUMBER_INT : result = parser . getDecimalValue ( ) ; result = result . setScale ( scale , HALF_UP ) ; break ; case VALUE_TRUE : result = BigDecimal . ONE . setScale ( scale , HALF_UP ) ; break ; case VALUE_FALSE : result = BigDecimal . ZERO . setScale ( scale , HALF_UP ) ; break ; default : throw new JsonCastException ( format ( "Unexpected token when cast to DECIMAL(%s,%s): %s" , precision , scale , parser . getText ( ) ) ) ; } if ( result . precision ( ) > precision ) { throw new PrestoException ( INVALID_CAST_ARGUMENT , format ( "Cannot cast input json to DECIMAL(%s,%s)" , precision , scale ) ) ; } return result ; }
by calling the corresponding cast - to - decimal function similar to other JSON cast function .
20,258
public static void parseJsonToSingleRowBlock ( JsonParser parser , SingleRowBlockWriter singleRowBlockWriter , BlockBuilderAppender [ ] fieldAppenders , Optional < Map < String , Integer > > fieldNameToIndex ) throws IOException { if ( parser . getCurrentToken ( ) == START_ARRAY ) { for ( int i = 0 ; i < fieldAppenders . length ; i ++ ) { parser . nextToken ( ) ; fieldAppenders [ i ] . append ( parser , singleRowBlockWriter ) ; } if ( parser . nextToken ( ) != JsonToken . END_ARRAY ) { throw new JsonCastException ( format ( "Expected json array ending, but got %s" , parser . getText ( ) ) ) ; } } else { verify ( parser . getCurrentToken ( ) == START_OBJECT ) ; if ( ! fieldNameToIndex . isPresent ( ) ) { throw new JsonCastException ( "Cannot cast a JSON object to anonymous row type. Input must be a JSON array." ) ; } boolean [ ] fieldWritten = new boolean [ fieldAppenders . length ] ; int numFieldsWritten = 0 ; while ( parser . nextToken ( ) != JsonToken . END_OBJECT ) { if ( parser . currentToken ( ) != FIELD_NAME ) { throw new JsonCastException ( format ( "Expected a json field name, but got %s" , parser . getText ( ) ) ) ; } String fieldName = parser . getText ( ) . toLowerCase ( Locale . ENGLISH ) ; Integer fieldIndex = fieldNameToIndex . get ( ) . get ( fieldName ) ; parser . nextToken ( ) ; if ( fieldIndex != null ) { if ( fieldWritten [ fieldIndex ] ) { throw new JsonCastException ( "Duplicate field: " + fieldName ) ; } fieldWritten [ fieldIndex ] = true ; numFieldsWritten ++ ; fieldAppenders [ fieldIndex ] . append ( parser , singleRowBlockWriter . getFieldBlockBuilder ( fieldIndex ) ) ; } else { parser . skipChildren ( ) ; } } if ( numFieldsWritten != fieldAppenders . length ) { for ( int i = 0 ; i < fieldWritten . length ; i ++ ) { if ( ! fieldWritten [ i ] ) { singleRowBlockWriter . getFieldBlockBuilder ( i ) . appendNull ( ) ; } } } } }
Thus there will be single call to this method so this method can be inlined .
20,259
private void initialize ( ) { activeOperators . stream ( ) . map ( Operator :: getOperatorContext ) . forEach ( operatorContext -> operatorContext . setMemoryRevocationRequestListener ( ( ) -> driverBlockedFuture . get ( ) . set ( null ) ) ) ; }
another thread which will cause unsafe publication of this instance .
20,260
public static QualifiedName getQualifiedName ( DereferenceExpression expression ) { List < String > parts = tryParseParts ( expression . base , expression . field . getValue ( ) . toLowerCase ( Locale . ENGLISH ) ) ; return parts == null ? null : QualifiedName . of ( parts ) ; }
If this DereferenceExpression looks like a QualifiedName return QualifiedName . Otherwise return null
20,261
public static Expression castToExpression ( RowExpression rowExpression ) { checkArgument ( isExpression ( rowExpression ) ) ; return ( ( OriginalExpression ) rowExpression ) . getExpression ( ) ; }
Degrade to Expression
20,262
private SymbolStatsEstimate normalizeSymbolStats ( Symbol symbol , SymbolStatsEstimate symbolStats , PlanNodeStatsEstimate stats , TypeProvider types ) { if ( symbolStats . isUnknown ( ) ) { return SymbolStatsEstimate . unknown ( ) ; } double outputRowCount = stats . getOutputRowCount ( ) ; checkArgument ( outputRowCount > 0 , "outputRowCount must be greater than zero: %s" , outputRowCount ) ; double distinctValuesCount = symbolStats . getDistinctValuesCount ( ) ; double nullsFraction = symbolStats . getNullsFraction ( ) ; if ( ! isNaN ( distinctValuesCount ) ) { Type type = requireNonNull ( types . get ( symbol ) , ( ) -> "type is missing for symbol " + symbol ) ; double maxDistinctValuesByLowHigh = maxDistinctValuesByLowHigh ( symbolStats , type ) ; if ( distinctValuesCount > maxDistinctValuesByLowHigh ) { distinctValuesCount = maxDistinctValuesByLowHigh ; } if ( distinctValuesCount > outputRowCount ) { distinctValuesCount = outputRowCount ; } double nonNullValues = outputRowCount * ( 1 - nullsFraction ) ; if ( distinctValuesCount > nonNullValues ) { double difference = distinctValuesCount - nonNullValues ; distinctValuesCount -= difference / 2 ; nonNullValues += difference / 2 ; nullsFraction = 1 - nonNullValues / outputRowCount ; } } if ( distinctValuesCount == 0.0 ) { return SymbolStatsEstimate . zero ( ) ; } return SymbolStatsEstimate . buildFrom ( symbolStats ) . setDistinctValuesCount ( distinctValuesCount ) . setNullsFraction ( nullsFraction ) . build ( ) ; }
Calculates consistent stats for a symbol .
20,263
private void pruneExpiredQueries ( ) { if ( expirationQueue . size ( ) <= maxQueryHistory ) { return ; } int count = 0 ; for ( T query : expirationQueue ) { if ( expirationQueue . size ( ) - count <= maxQueryHistory ) { break ; } query . pruneInfo ( ) ; count ++ ; } }
Prune extraneous info from old queries
20,264
private void removeExpiredQueries ( ) { DateTime timeHorizon = DateTime . now ( ) . minus ( minQueryExpireAge . toMillis ( ) ) ; while ( expirationQueue . size ( ) > maxQueryHistory ) { T query = expirationQueue . peek ( ) ; if ( query == null ) { return ; } Optional < DateTime > endTime = query . getEndTime ( ) ; if ( ! endTime . isPresent ( ) ) { continue ; } if ( endTime . get ( ) . isAfter ( timeHorizon ) ) { return ; } QueryId queryId = query . getQueryId ( ) ; log . debug ( "Remove query %s" , queryId ) ; queries . remove ( queryId ) ; expirationQueue . remove ( query ) ; } }
Remove completed queries after a waiting period
20,265
private boolean joinCurrentPosition ( LookupSource lookupSource , DriverYieldSignal yieldSignal ) { while ( joinPosition >= 0 ) { if ( lookupSource . isJoinPositionEligible ( joinPosition , probe . getPosition ( ) , probe . getPage ( ) ) ) { currentProbePositionProducedRow = true ; pageBuilder . appendRow ( probe , lookupSource , joinPosition ) ; joinSourcePositions ++ ; } joinPosition = lookupSource . getNextJoinPosition ( joinPosition , probe . getPosition ( ) , probe . getPage ( ) ) ; if ( yieldSignal . isSet ( ) || tryBuildPage ( ) ) { return false ; } } return true ; }
Produce rows matching join condition for the current probe position . If this method was called previously for the current probe position calling this again will produce rows that wasn t been produced in previous invocations .
20,266
public void appendRow ( JoinProbe probe , LookupSource lookupSource , long joinPosition ) { appendProbeIndex ( probe ) ; buildPageBuilder . declarePosition ( ) ; lookupSource . appendTo ( joinPosition , buildPageBuilder , 0 ) ; }
append the index for the probe and copy the row for the build
20,267
public void appendNullForBuild ( JoinProbe probe ) { appendProbeIndex ( probe ) ; buildPageBuilder . declarePosition ( ) ; for ( int i = 0 ; i < buildOutputChannelCount ; i ++ ) { buildPageBuilder . getBlockBuilder ( i ) . appendNull ( ) ; } }
append the index for the probe and append nulls for the build
20,268
public static RowType createWithTypeSignature ( TypeSignature typeSignature , List < Field > fields ) { return new RowType ( typeSignature , fields ) ; }
Only RowParametricType . createType should call this method
20,269
private boolean fetchKeys ( ) { try ( Jedis jedis = jedisPool . getResource ( ) ) { switch ( split . getKeyDataType ( ) ) { case STRING : { String cursor = SCAN_POINTER_START ; if ( redisCursor != null ) { cursor = redisCursor . getStringCursor ( ) ; } log . debug ( "Scanning new Redis keys from cursor %s . %d values read so far" , cursor , totalValues ) ; redisCursor = jedis . scan ( cursor , scanParms ) ; List < String > keys = redisCursor . getResult ( ) ; keysIterator = keys . iterator ( ) ; } break ; case ZSET : Set < String > keys = jedis . zrange ( split . getKeyName ( ) , split . getStart ( ) , split . getEnd ( ) ) ; keysIterator = keys . iterator ( ) ; break ; default : log . debug ( "Redis type of key %s is unsupported" , split . getKeyDataFormat ( ) ) ; return false ; } } return true ; }
Otherwise they need to be found by scanning Redis
20,270
public static NullableValue fromSerializable ( @ JsonProperty ( "serializable" ) Serializable serializable ) { Type type = serializable . getType ( ) ; Block block = serializable . getBlock ( ) ; return new NullableValue ( type , block == null ? null : Utils . blockToNativeValue ( type , block ) ) ; }
Jackson deserialization only
20,271
public Serializable getSerializable ( ) { return new Serializable ( type , value == null ? null : Utils . nativeValueToBlock ( type , value ) ) ; }
Jackson serialization only
20,272
public synchronized ListenableFuture < ? > add ( Page page ) { checkState ( ! isFull ( ) , "PageBuffer is full!" ) ; pages . offer ( page ) ; if ( isFull ( ) ) { if ( settableFuture == null ) { settableFuture = SettableFuture . create ( ) ; } return settableFuture ; } return NOT_FULL ; }
Adds a page to the buffer . Returns a ListenableFuture that is marked as done when the next page can be added .
20,273
public synchronized Page poll ( ) { if ( settableFuture != null ) { settableFuture . set ( null ) ; settableFuture = null ; } return pages . poll ( ) ; }
Return a page from the buffer or null if none exists
20,274
public static Optional < Symbol > filterOrRewrite ( Collection < Symbol > columns , Collection < JoinNode . EquiJoinClause > equalities , Symbol column ) { if ( columns . contains ( column ) ) { return Optional . of ( column ) ; } for ( JoinNode . EquiJoinClause equality : equalities ) { if ( equality . getLeft ( ) . equals ( column ) && columns . contains ( equality . getRight ( ) ) ) { return Optional . of ( equality . getRight ( ) ) ; } else if ( equality . getRight ( ) . equals ( column ) && columns . contains ( equality . getLeft ( ) ) ) { return Optional . of ( equality . getLeft ( ) ) ; } } return Optional . empty ( ) ; }
to the other symbol if that s exposed instead .
20,275
private Optional < PlanNode > coalesceWithNullAggregation ( AggregationNode aggregationNode , PlanNode outerJoin , SymbolAllocator symbolAllocator , PlanNodeIdAllocator idAllocator , Lookup lookup ) { Optional < MappedAggregationInfo > aggregationOverNullInfoResultNode = createAggregationOverNull ( aggregationNode , symbolAllocator , idAllocator , lookup ) ; if ( ! aggregationOverNullInfoResultNode . isPresent ( ) ) { return Optional . empty ( ) ; } MappedAggregationInfo aggregationOverNullInfo = aggregationOverNullInfoResultNode . get ( ) ; AggregationNode aggregationOverNull = aggregationOverNullInfo . getAggregation ( ) ; Map < Symbol , Symbol > sourceAggregationToOverNullMapping = aggregationOverNullInfo . getSymbolMapping ( ) ; JoinNode crossJoin = new JoinNode ( idAllocator . getNextId ( ) , JoinNode . Type . INNER , outerJoin , aggregationOverNull , ImmutableList . of ( ) , ImmutableList . < Symbol > builder ( ) . addAll ( outerJoin . getOutputSymbols ( ) ) . addAll ( aggregationOverNull . getOutputSymbols ( ) ) . build ( ) , Optional . empty ( ) , Optional . empty ( ) , Optional . empty ( ) , Optional . empty ( ) ) ; Assignments . Builder assignmentsBuilder = Assignments . builder ( ) ; for ( Symbol symbol : outerJoin . getOutputSymbols ( ) ) { if ( aggregationNode . getAggregations ( ) . containsKey ( symbol ) ) { assignmentsBuilder . put ( symbol , new CoalesceExpression ( symbol . toSymbolReference ( ) , sourceAggregationToOverNullMapping . get ( symbol ) . toSymbolReference ( ) ) ) ; } else { assignmentsBuilder . put ( symbol , symbol . toSymbolReference ( ) ) ; } } return Optional . of ( new ProjectNode ( idAllocator . getNextId ( ) , crossJoin , assignmentsBuilder . build ( ) ) ) ; }
null row .
20,276
public Expression rewriteExpression ( Expression expression , Predicate < Symbol > symbolScope ) { checkArgument ( isDeterministic ( expression ) , "Only deterministic expressions may be considered for rewrite" ) ; return rewriteExpression ( expression , symbolScope , true ) ; }
Attempts to rewrite an Expression in terms of the symbols allowed by the symbol scope given the known equalities . Returns null if unsuccessful . This method checks if rewritten expression is non - deterministic .
20,277
public Expression rewriteExpressionAllowNonDeterministic ( Expression expression , Predicate < Symbol > symbolScope ) { return rewriteExpression ( expression , symbolScope , true ) ; }
Attempts to rewrite an Expression in terms of the symbols allowed by the symbol scope given the known equalities . Returns null if unsuccessful . This method allows rewriting non - deterministic expressions .
20,278
private static Expression getCanonical ( Iterable < Expression > expressions ) { if ( Iterables . isEmpty ( expressions ) ) { return null ; } return CANONICAL_ORDERING . min ( expressions ) ; }
Returns the most preferrable expression to be used as the canonical expression
20,279
public static Predicate < Expression > isInferenceCandidate ( ) { return expression -> { expression = normalizeInPredicateToEquality ( expression ) ; if ( expression instanceof ComparisonExpression && isDeterministic ( expression ) && ! mayReturnNullOnNonNullInput ( expression ) ) { ComparisonExpression comparison = ( ComparisonExpression ) expression ; if ( comparison . getOperator ( ) == ComparisonExpression . Operator . EQUAL ) { return ! comparison . getLeft ( ) . equals ( comparison . getRight ( ) ) ; } } return false ; } ; }
Determines whether an Expression may be successfully applied to the equality inference
20,280
private static Expression normalizeInPredicateToEquality ( Expression expression ) { if ( expression instanceof InPredicate ) { InPredicate inPredicate = ( InPredicate ) expression ; if ( inPredicate . getValueList ( ) instanceof InListExpression ) { InListExpression valueList = ( InListExpression ) inPredicate . getValueList ( ) ; if ( valueList . getValues ( ) . size ( ) == 1 ) { return new ComparisonExpression ( ComparisonExpression . Operator . EQUAL , inPredicate . getValue ( ) , Iterables . getOnlyElement ( valueList . getValues ( ) ) ) ; } } } return expression ; }
Rewrite single value InPredicates as equality if possible
20,281
public static Iterable < Expression > nonInferrableConjuncts ( Expression expression ) { return filter ( extractConjuncts ( expression ) , not ( isInferenceCandidate ( ) ) ) ; }
Provides a convenience Iterable of Expression conjuncts which have not been added to the inference
20,282
public synchronized ListenableFuture < ? > reserve ( long bytes ) { checkArgument ( bytes >= 0 , "bytes is negative" ) ; if ( ( currentBytes + bytes ) >= maxBytes ) { throw exceededLocalLimit ( succinctBytes ( maxBytes ) ) ; } currentBytes += bytes ; return NOT_BLOCKED ; }
Reserves the given number of bytes to spill . If more than the maximum throws an exception .
20,283
public ListenableFuture < ? > reserve ( QueryId queryId , String allocationTag , long bytes ) { checkArgument ( bytes >= 0 , "bytes is negative" ) ; ListenableFuture < ? > result ; synchronized ( this ) { if ( bytes != 0 ) { queryMemoryReservations . merge ( queryId , bytes , Long :: sum ) ; updateTaggedMemoryAllocations ( queryId , allocationTag , bytes ) ; } reservedBytes += bytes ; if ( getFreeBytes ( ) <= 0 ) { if ( future == null ) { future = NonCancellableMemoryFuture . create ( ) ; } checkState ( ! future . isDone ( ) , "future is already completed" ) ; result = future ; } else { result = NOT_BLOCKED ; } } onMemoryReserved ( ) ; return result ; }
Reserves the given number of bytes . Caller should wait on the returned future before allocating more memory .
20,284
public boolean tryReserve ( QueryId queryId , String allocationTag , long bytes ) { checkArgument ( bytes >= 0 , "bytes is negative" ) ; synchronized ( this ) { if ( getFreeBytes ( ) - bytes < 0 ) { return false ; } reservedBytes += bytes ; if ( bytes != 0 ) { queryMemoryReservations . merge ( queryId , bytes , Long :: sum ) ; updateTaggedMemoryAllocations ( queryId , allocationTag , bytes ) ; } } onMemoryReserved ( ) ; return true ; }
Try to reserve the given number of bytes . Return value indicates whether the caller may use the requested memory .
20,285
public static Partitioning jsonCreate ( @ JsonProperty ( "handle" ) PartitioningHandle handle , @ JsonProperty ( "arguments" ) List < ArgumentBinding > arguments ) { return new Partitioning ( handle , arguments ) ; }
Factory method for JSON serde only!
20,286
public boolean isRefinedPartitioningOver ( Partitioning right , Metadata metadata , Session session ) { if ( ! handle . equals ( right . handle ) && ! metadata . isRefinedPartitioningOver ( session , handle , right . handle ) ) { return false ; } return arguments . equals ( right . arguments ) ; }
Refined - over relation is reflexive .
20,287
public static < T > Optional < Map < T , NullableValue > > extractFixedValues ( TupleDomain < T > tupleDomain ) { if ( ! tupleDomain . getDomains ( ) . isPresent ( ) ) { return Optional . empty ( ) ; } return Optional . of ( tupleDomain . getDomains ( ) . get ( ) . entrySet ( ) . stream ( ) . filter ( entry -> entry . getValue ( ) . isNullableSingleValue ( ) ) . collect ( toLinkedMap ( Map . Entry :: getKey , entry -> new NullableValue ( entry . getValue ( ) . getType ( ) , entry . getValue ( ) . getNullableSingleValue ( ) ) ) ) ) ; }
Extract all column constraints that require exactly one value or only null in their respective Domains . Returns an empty Optional if the Domain is none .
20,288
public static < T > TupleDomain < T > fromFixedValues ( Map < T , NullableValue > fixedValues ) { return TupleDomain . withColumnDomains ( fixedValues . entrySet ( ) . stream ( ) . collect ( toLinkedMap ( Map . Entry :: getKey , entry -> { Type type = entry . getValue ( ) . getType ( ) ; Object value = entry . getValue ( ) . getValue ( ) ; return value == null ? Domain . onlyNull ( type ) : Domain . singleValue ( type , value ) ; } ) ) ) ; }
Convert a map of columns to values into the TupleDomain which requires those columns to be fixed to those values . Null is allowed as a fixed value .
20,289
public TupleDomain < T > intersect ( TupleDomain < T > other ) { if ( this . isNone ( ) || other . isNone ( ) ) { return none ( ) ; } Map < T , Domain > intersected = new LinkedHashMap < > ( this . getDomains ( ) . get ( ) ) ; for ( Map . Entry < T , Domain > entry : other . getDomains ( ) . get ( ) . entrySet ( ) ) { Domain intersectionDomain = intersected . get ( entry . getKey ( ) ) ; if ( intersectionDomain == null ) { intersected . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } else { intersected . put ( entry . getKey ( ) , intersectionDomain . intersect ( entry . getValue ( ) ) ) ; } } return withColumnDomains ( intersected ) ; }
Returns the strict intersection of the TupleDomains . The resulting TupleDomain represents the set of tuples that would be valid in both TupleDomains .
20,290
private static int getCollectionSize ( int [ ] repetitionLevels , int maxRepetitionLevel , int nextIndex ) { int size = 1 ; while ( hasMoreElements ( repetitionLevels , nextIndex ) && ! isCollectionBeginningMarker ( repetitionLevels , maxRepetitionLevel , nextIndex ) ) { if ( repetitionLevels [ nextIndex ] <= maxRepetitionLevel ) { size ++ ; } nextIndex ++ ; } return size ; }
This method is only called for non - empty collections
20,291
public static HiveColumnHandle bucketColumnHandle ( ) { return new HiveColumnHandle ( BUCKET_COLUMN_NAME , BUCKET_HIVE_TYPE , BUCKET_TYPE_SIGNATURE , BUCKET_COLUMN_INDEX , SYNTHESIZED , Optional . empty ( ) ) ; }
The column indicating the bucket id . When table bucketing differs from partition bucketing this column indicates what bucket the row will fall in under the table bucketing scheme .
20,292
public Optional < QualifiedName > getPrefix ( ) { if ( parts . size ( ) == 1 ) { return Optional . empty ( ) ; } List < String > subList = parts . subList ( 0 , parts . size ( ) - 1 ) ; return Optional . of ( new QualifiedName ( subList , subList ) ) ; }
For an identifier of the form a . b . c . d returns a . b . c For an identifier of the form a returns absent
20,293
public synchronized List < PrioritizedSplitRunner > destroy ( ) { destroyed = true ; ImmutableList . Builder < PrioritizedSplitRunner > builder = ImmutableList . builder ( ) ; builder . addAll ( runningIntermediateSplits ) ; builder . addAll ( runningLeafSplits ) ; builder . addAll ( queuedLeafSplits ) ; runningIntermediateSplits . clear ( ) ; runningLeafSplits . clear ( ) ; queuedLeafSplits . clear ( ) ; return builder . build ( ) ; }
Returns any remaining splits . The caller must destroy these .
20,294
public static Set < String > parseParameterList ( String values ) { Set < String > result = new TreeSet < String > ( ) ; if ( values != null && values . trim ( ) . length ( ) > 0 ) { String [ ] tokens = values . split ( "[\\s+]" ) ; result . addAll ( Arrays . asList ( tokens ) ) ; } return result ; }
Parses a string parameter value into a set of strings .
20,295
public static String formatParameterList ( Collection < String > value ) { return value == null ? null : StringUtils . collectionToDelimitedString ( value , " " ) ; }
Formats a set of string values into a format appropriate for sending as a single - valued form value .
20,296
public static Map < String , String > extractMap ( String query ) { Map < String , String > map = new HashMap < String , String > ( ) ; Properties properties = StringUtils . splitArrayElementsIntoProperties ( StringUtils . delimitedListToStringArray ( query , "&" ) , "=" ) ; if ( properties != null ) { for ( Object key : properties . keySet ( ) ) { map . put ( key . toString ( ) , properties . get ( key ) . toString ( ) ) ; } } return map ; }
Extract a map from a query string .
20,297
public static boolean containsAll ( Set < String > target , Set < String > members ) { target = new HashSet < String > ( target ) ; target . retainAll ( members ) ; return target . size ( ) == members . size ( ) ; }
Compare 2 sets and check that one contains all members of the other .
20,298
private int insertIndex ( List < BeanMetadataElement > filterChain ) { int i ; for ( i = 0 ; i < filterChain . size ( ) ; i ++ ) { BeanMetadataElement filter = filterChain . get ( i ) ; if ( filter instanceof BeanDefinition ) { String beanName = ( ( BeanDefinition ) filter ) . getBeanClassName ( ) ; if ( beanName . equals ( ExceptionTranslationFilter . class . getName ( ) ) ) { return i + 1 ; } } } return filterChain . size ( ) ; }
Attempts to find the place in the filter chain to insert the spring security oauth filters . Currently these filters are inserted after the ExceptionTranslationFilter .
20,299
public OAuth2AccessToken refreshAccessToken ( OAuth2ProtectedResourceDetails resource , OAuth2RefreshToken refreshToken , AccessTokenRequest request ) throws UserRedirectRequiredException { for ( AccessTokenProvider tokenProvider : chain ) { if ( tokenProvider . supportsRefresh ( resource ) ) { DefaultOAuth2AccessToken refreshedAccessToken = new DefaultOAuth2AccessToken ( tokenProvider . refreshAccessToken ( resource , refreshToken , request ) ) ; if ( refreshedAccessToken . getRefreshToken ( ) == null ) { refreshedAccessToken . setRefreshToken ( refreshToken ) ; } return refreshedAccessToken ; } } throw new OAuth2AccessDeniedException ( "Unable to obtain a new access token for resource '" + resource . getId ( ) + "'. The provider manager is not configured to support it." , resource ) ; }
Obtain a new access token for the specified resource using the refresh token .