idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
20,100
protected void daxpyi ( long N , double alpha , INDArray X , DataBuffer pointers , INDArray Y ) { cblas_daxpyi ( ( int ) N , alpha , ( DoublePointer ) X . data ( ) . addressPointer ( ) , ( IntPointer ) pointers . addressPointer ( ) , ( DoublePointer ) Y . data ( ) . addressPointer ( ) ) ; }
Adds a scalar multiple of double compressed sparse vector to a full - storage vector .
20,101
protected void saxpyi ( long N , double alpha , INDArray X , DataBuffer pointers , INDArray Y ) { cblas_saxpyi ( ( int ) N , ( float ) alpha , ( FloatPointer ) X . data ( ) . addressPointer ( ) , ( IntPointer ) pointers . addressPointer ( ) , ( FloatPointer ) Y . data ( ) . addressPointer ( ) ) ; }
Adds a scalar multiple of float compressed sparse vector to a full - storage vector .
20,102
protected void dscal ( long N , double a , INDArray X , int incx ) { cblas_dscal ( ( int ) N , a , ( DoublePointer ) X . data ( ) . addressPointer ( ) , incx ) ; }
Computes the product of a double vector by a scalar .
20,103
protected void sscal ( long N , double a , INDArray X , int incx ) { cblas_sscal ( ( int ) N , ( float ) a , ( FloatPointer ) X . data ( ) . addressPointer ( ) , incx ) ; }
Computes the product of a float vector by a scalar .
20,104
protected void ensureDeviceCacheHolder ( Integer deviceId , AllocationShape shape ) { if ( ! deviceCache . containsKey ( deviceId ) ) { try { synchronized ( this ) { if ( ! deviceCache . containsKey ( deviceId ) ) { deviceCache . put ( deviceId , new ConcurrentHashMap < AllocationShape , CacheHolder > ( ) ) ; } } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } if ( ! deviceCache . get ( deviceId ) . containsKey ( shape ) ) { try { singleLock . acquire ( ) ; if ( ! deviceCache . get ( deviceId ) . containsKey ( shape ) ) { deviceCache . get ( deviceId ) . put ( shape , new CacheHolder ( shape , deviceCachedAmount . get ( deviceId ) ) ) ; } } catch ( Exception e ) { } finally { singleLock . release ( ) ; } } }
This method checks if storage contains holder for specified shape
20,105
public static INDArray reshapeWeights ( long [ ] shape , INDArray paramsView , char flatteningOrder ) { return paramsView . reshape ( flatteningOrder , shape ) ; }
Reshape the parameters view without modifying the paramsView array values .
20,106
public Schema transform ( Schema inputSchema ) { Schema . Builder newSchema = new Schema . Builder ( ) ; for ( int i = 0 ; i < inputSchema . numColumns ( ) ; i ++ ) { if ( inputSchema . getType ( i ) == ColumnType . String ) { newSchema . addColumnDouble ( inputSchema . getMetaData ( i ) . getName ( ) ) ; } else newSchema . addColumn ( inputSchema . getMetaData ( i ) ) ; } return newSchema . build ( ) ; }
Get the output schema for this transformation given an input schema
20,107
private static Integer findDuplicate ( int [ ] array ) { Set < Integer > seenElements = new LinkedHashSet < > ( ) ; for ( int element : array ) { if ( ! seenElements . add ( element ) ) { return element ; } } return null ; }
Returns the first duplicate element inside an array null if there are no duplicates .
20,108
private boolean hasResourceBindingsNeedingResource ( int sdk ) { for ( ResourceBinding binding : resourceBindings ) { if ( binding . requiresResources ( sdk ) ) { return true ; } } return false ; }
True when this type s bindings use Resource directly instead of Context .
20,109
private static boolean validateReturnType ( Method method , Class < ? > expected ) { Class < ? > returnType = method . getReturnType ( ) ; if ( returnType == void . class ) { return false ; } if ( returnType != expected ) { String expectedType = "'" + expected . getName ( ) + "'" ; if ( expected != void . class ) { expectedType = "'void' or " + expectedType ; } throw new IllegalStateException ( method . getDeclaringClass ( ) . getName ( ) + "." + method . getName ( ) + " must have return type of " + expectedType ) ; } return true ; }
Returns true when the return value should be propagated . Use a default otherwise .
20,110
public synchronized SpiderMonitor register ( Spider ... spiders ) throws JMException { for ( Spider spider : spiders ) { MonitorSpiderListener monitorSpiderListener = new MonitorSpiderListener ( ) ; if ( spider . getSpiderListeners ( ) == null ) { List < SpiderListener > spiderListeners = new ArrayList < SpiderListener > ( ) ; spiderListeners . add ( monitorSpiderListener ) ; spider . setSpiderListeners ( spiderListeners ) ; } else { spider . getSpiderListeners ( ) . add ( monitorSpiderListener ) ; } SpiderStatusMXBean spiderStatusMBean = getSpiderStatusMBean ( spider , monitorSpiderListener ) ; registerMBean ( spiderStatusMBean ) ; spiderStatuses . add ( spiderStatusMBean ) ; } return this ; }
Register spider for monitor .
20,111
public Html download ( String url , String charset ) { Page page = download ( new Request ( url ) , Site . me ( ) . setCharset ( charset ) . toTask ( ) ) ; return ( Html ) page . getHtml ( ) ; }
A simple method to download a url .
20,112
public Html getHtml ( ) { if ( html == null ) { html = new Html ( rawText , request . getUrl ( ) ) ; } return html ; }
get html content of page
20,113
public void addTargetRequests ( List < String > requests ) { for ( String s : requests ) { if ( StringUtils . isBlank ( s ) || s . equals ( "#" ) || s . startsWith ( "javascript:" ) ) { continue ; } s = UrlUtils . canonicalizeUrl ( s , url . toString ( ) ) ; targetRequests . add ( new Request ( s ) ) ; } }
add urls to fetch
20,114
public void addTargetRequest ( String requestString ) { if ( StringUtils . isBlank ( requestString ) || requestString . equals ( "#" ) ) { return ; } requestString = UrlUtils . canonicalizeUrl ( requestString , url . toString ( ) ) ; targetRequests . add ( new Request ( requestString ) ) ; }
add url to fetch
20,115
public Json removePadding ( String padding ) { String text = getFirstSourceText ( ) ; XTokenQueue tokenQueue = new XTokenQueue ( text ) ; tokenQueue . consumeWhitespace ( ) ; tokenQueue . consume ( padding ) ; tokenQueue . consumeWhitespace ( ) ; String chompBalanced = tokenQueue . chompBalancedNotInQuotes ( '(' , ')' ) ; return new Json ( chompBalanced ) ; }
remove padding for JSONP
20,116
public Spider setScheduler ( Scheduler scheduler ) { checkIfRunning ( ) ; Scheduler oldScheduler = this . scheduler ; this . scheduler = scheduler ; if ( oldScheduler != null ) { Request request ; while ( ( request = oldScheduler . poll ( this ) ) != null ) { this . scheduler . push ( request , this ) ; } } return this ; }
set scheduler for Spider
20,117
public < T > List < T > getAll ( Collection < String > urls ) { destroyWhenExit = false ; spawnUrl = false ; if ( startRequests != null ) { startRequests . clear ( ) ; } for ( Request request : UrlUtils . convertToRequests ( urls ) ) { addRequest ( request ) ; } CollectorPipeline collectorPipeline = getCollectorPipeline ( ) ; pipelines . add ( collectorPipeline ) ; run ( ) ; spawnUrl = true ; destroyWhenExit = true ; return collectorPipeline . getCollected ( ) ; }
Download urls synchronizing .
20,118
public Spider thread ( ExecutorService executorService , int threadNum ) { checkIfRunning ( ) ; this . threadNum = threadNum ; if ( threadNum <= 0 ) { throw new IllegalArgumentException ( "threadNum should be more than one!" ) ; } this . executorService = executorService ; return this ; }
start with more than one threads
20,119
public Site addCookie ( String domain , String name , String value ) { if ( ! cookies . containsKey ( domain ) ) { cookies . put ( domain , new HashMap < String , String > ( ) ) ; } cookies . get ( domain ) . put ( name , value ) ; return this ; }
Add a cookie with specific domain .
20,120
public static List < OrderByElement > extraOrderBy ( SelectBody selectBody ) { if ( selectBody instanceof PlainSelect ) { List < OrderByElement > orderByElements = ( ( PlainSelect ) selectBody ) . getOrderByElements ( ) ; ( ( PlainSelect ) selectBody ) . setOrderByElements ( null ) ; return orderByElements ; } else if ( selectBody instanceof WithItem ) { WithItem withItem = ( WithItem ) selectBody ; if ( withItem . getSelectBody ( ) != null ) { return extraOrderBy ( withItem . getSelectBody ( ) ) ; } } else { SetOperationList operationList = ( SetOperationList ) selectBody ; if ( operationList . getSelects ( ) != null && operationList . getSelects ( ) . size ( ) > 0 ) { List < SelectBody > plainSelects = operationList . getSelects ( ) ; return extraOrderBy ( plainSelects . get ( plainSelects . size ( ) - 1 ) ) ; } } return null ; }
extra order by and set default orderby to null
20,121
public int decrementAndGet ( Object key ) { long hashCode = getHashCode ( key ) ; int previousCount = addTo ( hashCode , - 1 ) ; if ( previousCount == 1 ) { remove ( hashCode ) ; } return previousCount - 1 ; }
Decrements the reference count of an object by 1 and returns the updated reference count
20,122
public long sizeOf ( ) { return INSTANCE_SIZE + SizeOf . sizeOf ( key ) + SizeOf . sizeOf ( value ) + SizeOf . sizeOf ( used ) ; }
Returns the size of this map in bytes .
20,123
private static long getHashCode ( Object key ) { int extraIdentity ; if ( key == null ) { extraIdentity = 0 ; } else if ( key instanceof Block ) { extraIdentity = ( int ) ( ( Block ) key ) . getRetainedSizeInBytes ( ) ; } else if ( key instanceof Slice ) { extraIdentity = ( int ) ( ( Slice ) key ) . getRetainedSize ( ) ; } else if ( key . getClass ( ) . isArray ( ) ) { extraIdentity = getLength ( key ) ; } else if ( key instanceof AbstractMapBlock . HashTables ) { extraIdentity = ( int ) ( ( AbstractMapBlock . HashTables ) key ) . getRetainedSizeInBytes ( ) ; } else { throw new IllegalArgumentException ( format ( "Unsupported type for %s" , key ) ) ; } return ( ( ( long ) System . identityHashCode ( key ) ) << Integer . SIZE ) + extraIdentity ; }
Get the 64 - bit hash code for an object
20,124
private boolean isMoreSpecificThan ( ApplicableFunction left , ApplicableFunction right ) { List < TypeSignatureProvider > resolvedTypes = fromTypeSignatures ( left . getBoundSignature ( ) . getArgumentTypes ( ) ) ; Optional < BoundVariables > boundVariables = new SignatureBinder ( typeManager , right . getDeclaredSignature ( ) , true ) . bindVariables ( resolvedTypes ) ; return boundVariables . isPresent ( ) ; }
One method is more specific than another if invocation handled by the first method could be passed on to the other one
20,125
private boolean addConstraintPredicates ( KuduTable table , KuduScanToken . KuduScanTokenBuilder builder , TupleDomain < ColumnHandle > constraintSummary ) { if ( constraintSummary . isNone ( ) ) { return false ; } else if ( ! constraintSummary . isAll ( ) ) { Schema schema = table . getSchema ( ) ; for ( TupleDomain . ColumnDomain < ColumnHandle > columnDomain : constraintSummary . getColumnDomains ( ) . get ( ) ) { int position = ( ( KuduColumnHandle ) columnDomain . getColumn ( ) ) . getOrdinalPosition ( ) ; ColumnSchema columnSchema = schema . getColumnByIndex ( position ) ; Domain domain = columnDomain . getDomain ( ) ; if ( domain . isNone ( ) ) { return false ; } else if ( domain . isAll ( ) ) { } else if ( domain . isOnlyNull ( ) ) { builder . addPredicate ( KuduPredicate . newIsNullPredicate ( columnSchema ) ) ; } else if ( domain . getValues ( ) . isAll ( ) && domain . isNullAllowed ( ) ) { builder . addPredicate ( KuduPredicate . newIsNotNullPredicate ( columnSchema ) ) ; } else if ( domain . isSingleValue ( ) ) { KuduPredicate predicate = createEqualsPredicate ( columnSchema , domain . getSingleValue ( ) ) ; builder . addPredicate ( predicate ) ; } else { ValueSet valueSet = domain . getValues ( ) ; if ( valueSet instanceof EquatableValueSet ) { DiscreteValues discreteValues = valueSet . getDiscreteValues ( ) ; KuduPredicate predicate = createInListPredicate ( columnSchema , discreteValues ) ; builder . addPredicate ( predicate ) ; } else if ( valueSet instanceof SortedRangeSet ) { Ranges ranges = ( ( SortedRangeSet ) valueSet ) . getRanges ( ) ; Range span = ranges . getSpan ( ) ; Marker low = span . getLow ( ) ; if ( ! low . isLowerUnbounded ( ) ) { KuduPredicate . ComparisonOp op = ( low . getBound ( ) == Marker . Bound . ABOVE ) ? KuduPredicate . ComparisonOp . GREATER : KuduPredicate . ComparisonOp . GREATER_EQUAL ; KuduPredicate predicate = createComparisonPredicate ( columnSchema , op , low . getValue ( ) ) ; builder . addPredicate ( predicate ) ; } Marker high = span . getHigh ( ) ; if ( ! high . isUpperUnbounded ( ) ) { KuduPredicate . ComparisonOp op = ( low . getBound ( ) == Marker . Bound . BELOW ) ? KuduPredicate . ComparisonOp . LESS : KuduPredicate . ComparisonOp . LESS_EQUAL ; KuduPredicate predicate = createComparisonPredicate ( columnSchema , op , high . getValue ( ) ) ; builder . addPredicate ( predicate ) ; } } else { throw new IllegalStateException ( "Unexpected domain: " + domain ) ; } } } } return true ; }
translates TupleDomain to KuduPredicates .
20,126
private synchronized void populateFromDbHelper ( Map < Long , ResourceGroupSpecBuilder > recordMap , Set < Long > rootGroupIds , Map < Long , ResourceGroupIdTemplate > resourceGroupIdTemplateMap , Map < Long , Set < Long > > subGroupIdsToBuild ) { List < ResourceGroupSpecBuilder > records = dao . getResourceGroups ( environment ) ; for ( ResourceGroupSpecBuilder record : records ) { recordMap . put ( record . getId ( ) , record ) ; if ( ! record . getParentId ( ) . isPresent ( ) ) { rootGroupIds . add ( record . getId ( ) ) ; resourceGroupIdTemplateMap . put ( record . getId ( ) , new ResourceGroupIdTemplate ( record . getNameTemplate ( ) . toString ( ) ) ) ; } else { subGroupIdsToBuild . computeIfAbsent ( record . getParentId ( ) . get ( ) , k -> new HashSet < > ( ) ) . add ( record . getId ( ) ) ; } } }
Populate temporary data structures to build resource group specs and selectors from db
20,127
private synchronized MemoryPoolAssignmentsRequest updateAssignments ( Iterable < QueryExecution > queries ) { ClusterMemoryPool reservedPool = pools . get ( RESERVED_POOL ) ; ClusterMemoryPool generalPool = pools . get ( GENERAL_POOL ) ; verify ( generalPool != null , "generalPool is null" ) ; verify ( reservedPool != null , "reservedPool is null" ) ; long version = memoryPoolAssignmentsVersion . incrementAndGet ( ) ; if ( allAssignmentsHavePropagated ( queries ) ) { if ( reservedPool . getAssignedQueries ( ) == 0 && generalPool . getBlockedNodes ( ) > 0 ) { QueryExecution biggestQuery = null ; long maxMemory = - 1 ; for ( QueryExecution queryExecution : queries ) { if ( resourceOvercommit ( queryExecution . getSession ( ) ) ) { continue ; } long bytesUsed = getQueryMemoryReservation ( queryExecution ) ; if ( bytesUsed > maxMemory ) { biggestQuery = queryExecution ; maxMemory = bytesUsed ; } } if ( biggestQuery != null ) { log . info ( "Moving query %s to the reserved pool" , biggestQuery . getQueryId ( ) ) ; biggestQuery . setMemoryPool ( new VersionedMemoryPoolId ( RESERVED_POOL , version ) ) ; } } } ImmutableList . Builder < MemoryPoolAssignment > assignments = ImmutableList . builder ( ) ; for ( QueryExecution queryExecution : queries ) { assignments . add ( new MemoryPoolAssignment ( queryExecution . getQueryId ( ) , queryExecution . getMemoryPool ( ) . getId ( ) ) ) ; } return new MemoryPoolAssignmentsRequest ( coordinatorId , version , assignments . build ( ) ) ; }
RemoteNodeMemory as we don t need to POST anything .
20,128
private DictionaryCompressionProjection selectDictionaryColumnToConvert ( int totalNonDictionaryBytes , int stripeRowCount ) { checkState ( ! directConversionCandidates . isEmpty ( ) ) ; int totalNonDictionaryBytesPerRow = totalNonDictionaryBytes / stripeRowCount ; long totalDictionaryRawBytes = 0 ; long totalDictionaryBytes = 0 ; long totalDictionaryIndexBytes = 0 ; long totalDictionaryRawBytesPerRow = 0 ; long totalDictionaryBytesPerNewRow = 0 ; long totalDictionaryIndexBytesPerRow = 0 ; for ( DictionaryColumnManager column : allWriters ) { if ( ! column . isDirectEncoded ( ) ) { totalDictionaryRawBytes += column . getRawBytes ( ) ; totalDictionaryBytes += column . getDictionaryBytes ( ) ; totalDictionaryIndexBytes += column . getIndexBytes ( ) ; totalDictionaryRawBytesPerRow += column . getRawBytesPerRow ( ) ; totalDictionaryBytesPerNewRow += column . getDictionaryBytesPerFutureRow ( ) ; totalDictionaryIndexBytesPerRow += column . getIndexBytesPerRow ( ) ; } } long totalUncompressedBytesPerRow = totalNonDictionaryBytesPerRow + totalDictionaryRawBytesPerRow ; DictionaryCompressionProjection maxProjectedCompression = null ; for ( DictionaryColumnManager column : directConversionCandidates ) { long currentRawBytes = totalNonDictionaryBytes + column . getRawBytes ( ) ; long currentDictionaryBytes = totalDictionaryBytes - column . getDictionaryBytes ( ) ; long currentIndexBytes = totalDictionaryIndexBytes - column . getIndexBytes ( ) ; long currentTotalBytes = currentRawBytes + currentDictionaryBytes + currentIndexBytes ; double rawBytesPerFutureRow = totalNonDictionaryBytesPerRow + column . getRawBytesPerRow ( ) ; double dictionaryBytesPerFutureRow = totalDictionaryBytesPerNewRow - column . getDictionaryBytesPerFutureRow ( ) ; double indexBytesPerFutureRow = totalDictionaryIndexBytesPerRow - column . getIndexBytesPerRow ( ) ; double totalBytesPerFutureRow = rawBytesPerFutureRow + dictionaryBytesPerFutureRow + indexBytesPerFutureRow ; long rowsToDictionaryMemoryLimit = ( long ) ( ( dictionaryMemoryMaxBytesLow - currentDictionaryBytes ) / dictionaryBytesPerFutureRow ) ; long rowsToStripeMemoryLimit = ( long ) ( ( stripeMaxBytes - currentTotalBytes ) / totalBytesPerFutureRow ) ; long rowsToStripeRowLimit = stripeMaxRowCount - stripeRowCount ; long rowsToLimit = Longs . min ( rowsToDictionaryMemoryLimit , rowsToStripeMemoryLimit , rowsToStripeRowLimit ) ; long predictedUncompressedSizeAtLimit = totalNonDictionaryBytes + totalDictionaryRawBytes + ( totalUncompressedBytesPerRow * rowsToLimit ) ; long predictedCompressedSizeAtLimit = ( long ) ( currentTotalBytes + ( totalBytesPerFutureRow * rowsToLimit ) ) ; double predictedCompressionRatioAtLimit = 1.0 * predictedUncompressedSizeAtLimit / predictedCompressedSizeAtLimit ; if ( maxProjectedCompression == null || maxProjectedCompression . getPredictedFileCompressionRatio ( ) < predictedCompressionRatioAtLimit ) { maxProjectedCompression = new DictionaryCompressionProjection ( column , predictedCompressionRatioAtLimit ) ; } } return maxProjectedCompression ; }
Choose a dictionary column to convert to direct encoding . We do this by predicting the compression ration of the stripe if a singe column is flipped to direct . So for each column we try to predict the row count when we will hit a stripe flush limit if that column were converted to direct . Once we know the row count we calculate the predicted compression ratio .
20,129
public Domain simplify ( ) { ValueSet simplifiedValueSet = values . getValuesProcessor ( ) . < Optional < ValueSet > > transform ( ranges -> { if ( ranges . getOrderedRanges ( ) . size ( ) <= 32 ) { return Optional . empty ( ) ; } return Optional . of ( ValueSet . ofRanges ( ranges . getSpan ( ) ) ) ; } , discreteValues -> { if ( discreteValues . getValues ( ) . size ( ) <= 32 ) { return Optional . empty ( ) ; } return Optional . of ( ValueSet . all ( values . getType ( ) ) ) ; } , allOrNone -> Optional . empty ( ) ) . orElse ( values ) ; return Domain . create ( simplifiedValueSet , nullAllowed ) ; }
Reduces the number of discrete components in the Domain if there are too many .
20,130
private static < T > Comparator < List < Optional < LocalProperty < T > > > > matchedLayoutPreference ( ) { return ( matchLayout1 , matchLayout2 ) -> { Iterator < Optional < LocalProperty < T > > > match1Iterator = matchLayout1 . iterator ( ) ; Iterator < Optional < LocalProperty < T > > > match2Iterator = matchLayout2 . iterator ( ) ; while ( match1Iterator . hasNext ( ) && match2Iterator . hasNext ( ) ) { Optional < LocalProperty < T > > match1 = match1Iterator . next ( ) ; Optional < LocalProperty < T > > match2 = match2Iterator . next ( ) ; if ( match1 . isPresent ( ) && match2 . isPresent ( ) ) { return Integer . compare ( match1 . get ( ) . getColumns ( ) . size ( ) , match2 . get ( ) . getColumns ( ) . size ( ) ) ; } else if ( match1 . isPresent ( ) ) { return 1 ; } else if ( match2 . isPresent ( ) ) { return - 1 ; } } checkState ( ! match1Iterator . hasNext ( ) && ! match2Iterator . hasNext ( ) ) ; return 0 ; } ; }
Prefer the match result that satisfied the most requirements
20,131
private void store ( PriorityQueue < Entry > queue ) { nextIndex = 0 ; for ( Entry entry : queue ) { if ( entry . isValid ( ) ) { values [ nextIndex ] = entry . getValue ( ) ; weights [ nextIndex ] = entry . getWeight ( ) ; nextIndex ++ ; } } sort ( values , weights , nextIndex ) ; }
Dump the entries in the queue back into the bucket arrays The values are guaranteed to be sorted in increasing order after this method completes
20,132
private static void concat ( double [ ] target , double [ ] first , int firstLength , double [ ] second , int secondLength ) { System . arraycopy ( first , 0 , target , 0 , firstLength ) ; System . arraycopy ( second , 0 , target , firstLength , secondLength ) ; }
Copy two arrays back - to - back onto the target array starting at offset 0
20,133
private static int mergeSameBuckets ( double [ ] values , double [ ] weights , int nextIndex ) { sort ( values , weights , nextIndex ) ; int current = 0 ; for ( int i = 1 ; i < nextIndex ; i ++ ) { if ( values [ current ] == values [ i ] ) { weights [ current ] += weights [ i ] ; } else { current ++ ; values [ current ] = values [ i ] ; weights [ current ] = weights [ i ] ; } } return current + 1 ; }
Simple pass that merges entries with the same value
20,134
private static PriorityQueue < Entry > initializeQueue ( double [ ] values , double [ ] weights , int nextIndex ) { checkArgument ( nextIndex > 0 , "nextIndex must be > 0" ) ; PriorityQueue < Entry > queue = new PriorityQueue < > ( nextIndex ) ; Entry right = new Entry ( nextIndex - 1 , values [ nextIndex - 1 ] , weights [ nextIndex - 1 ] , null ) ; queue . add ( right ) ; for ( int i = nextIndex - 2 ; i >= 0 ; i -- ) { Entry current = new Entry ( i , values [ i ] , weights [ i ] , right ) ; queue . add ( current ) ; right = current ; } return queue ; }
Create a priority queue with an entry for each bucket ordered by the penalty score with respect to the bucket to its right The inputs must be sorted by value in increasing order The last bucket has a penalty of infinity Entries are doubly - linked to keep track of the relative position of each bucket
20,135
private int lowerBound ( JoinFilterFunction searchFunction , int [ ] links , int first , int last , int probePosition , Page allProbeChannelsPage ) { int middle ; int step ; int count = last - first ; while ( count > 0 ) { step = count / 2 ; middle = first + step ; if ( ! applySearchFunction ( searchFunction , links , middle , probePosition , allProbeChannelsPage ) ) { first = ++ middle ; count -= step + 1 ; } else { count = step ; } } return first ; }
Find the first element in position links that is NOT smaller than probePosition
20,136
public static PlanNodeStatsEstimate computeSemiJoin ( PlanNodeStatsEstimate sourceStats , PlanNodeStatsEstimate filteringSourceStats , Symbol sourceJoinSymbol , Symbol filteringSourceJoinSymbol ) { return compute ( sourceStats , filteringSourceStats , sourceJoinSymbol , filteringSourceJoinSymbol , ( sourceJoinSymbolStats , filteringSourceJoinSymbolStats ) -> min ( filteringSourceJoinSymbolStats . getDistinctValuesCount ( ) , sourceJoinSymbolStats . getDistinctValuesCount ( ) ) ) ; }
Basically it works as low and high values were the same for source and filteringSource and just looks at NDVs .
20,137
public static PlanNodeStatsEstimate subtractSubsetStats ( PlanNodeStatsEstimate superset , PlanNodeStatsEstimate subset ) { if ( superset . isOutputRowCountUnknown ( ) || subset . isOutputRowCountUnknown ( ) ) { return PlanNodeStatsEstimate . unknown ( ) ; } double supersetRowCount = superset . getOutputRowCount ( ) ; double subsetRowCount = subset . getOutputRowCount ( ) ; double outputRowCount = max ( supersetRowCount - subsetRowCount , 0 ) ; if ( outputRowCount == 0 ) { return createZeroStats ( superset ) ; } PlanNodeStatsEstimate . Builder result = PlanNodeStatsEstimate . builder ( ) ; result . setOutputRowCount ( outputRowCount ) ; superset . getSymbolsWithKnownStatistics ( ) . forEach ( symbol -> { SymbolStatsEstimate supersetSymbolStats = superset . getSymbolStatistics ( symbol ) ; SymbolStatsEstimate subsetSymbolStats = subset . getSymbolStatistics ( symbol ) ; SymbolStatsEstimate . Builder newSymbolStats = SymbolStatsEstimate . builder ( ) ; newSymbolStats . setAverageRowSize ( supersetSymbolStats . getAverageRowSize ( ) ) ; double supersetNullsCount = supersetSymbolStats . getNullsFraction ( ) * supersetRowCount ; double subsetNullsCount = subsetSymbolStats . getNullsFraction ( ) * subsetRowCount ; double newNullsCount = max ( supersetNullsCount - subsetNullsCount , 0 ) ; newSymbolStats . setNullsFraction ( min ( newNullsCount , outputRowCount ) / outputRowCount ) ; double supersetDistinctValues = supersetSymbolStats . getDistinctValuesCount ( ) ; double subsetDistinctValues = subsetSymbolStats . getDistinctValuesCount ( ) ; double newDistinctValuesCount ; if ( isNaN ( supersetDistinctValues ) || isNaN ( subsetDistinctValues ) ) { newDistinctValuesCount = NaN ; } else if ( supersetDistinctValues == 0 ) { newDistinctValuesCount = 0 ; } else if ( subsetDistinctValues == 0 ) { newDistinctValuesCount = supersetDistinctValues ; } else { double supersetNonNullsCount = supersetRowCount - supersetNullsCount ; double subsetNonNullsCount = subsetRowCount - subsetNullsCount ; double supersetValuesPerDistinctValue = supersetNonNullsCount / supersetDistinctValues ; double subsetValuesPerDistinctValue = subsetNonNullsCount / subsetDistinctValues ; if ( supersetValuesPerDistinctValue <= subsetValuesPerDistinctValue ) { newDistinctValuesCount = max ( supersetDistinctValues - subsetDistinctValues , 0 ) ; } else { newDistinctValuesCount = supersetDistinctValues ; } } newSymbolStats . setDistinctValuesCount ( newDistinctValuesCount ) ; newSymbolStats . setLowValue ( supersetSymbolStats . getLowValue ( ) ) ; newSymbolStats . setHighValue ( supersetSymbolStats . getHighValue ( ) ) ; result . addSymbolStatistics ( symbol , newSymbolStats . build ( ) ) ; } ) ; return result . build ( ) ; }
Subtracts subset stats from supersets stats . It is assumed that each NDV from subset has a matching NDV in superset .
20,138
private static StreamDescriptor copyStreamDescriptorWithSequence ( StreamDescriptor streamDescriptor , int sequence ) { List < StreamDescriptor > streamDescriptors = streamDescriptor . getNestedStreams ( ) . stream ( ) . map ( stream -> copyStreamDescriptorWithSequence ( stream , sequence ) ) . collect ( toImmutableList ( ) ) ; return new StreamDescriptor ( streamDescriptor . getStreamName ( ) , streamDescriptor . getStreamId ( ) , streamDescriptor . getFieldName ( ) , streamDescriptor . getStreamType ( ) , streamDescriptor . getOrcDataSource ( ) , streamDescriptors , sequence ) ; }
Creates StreamDescriptor which is a copy of this one with the value of sequence changed to the value passed in . Recursively calls itself on the nested streams .
20,139
public static boolean checkInBloomFilter ( BloomFilter bloomFilter , Object predicateValue , Type sqlType ) { if ( sqlType == TINYINT || sqlType == SMALLINT || sqlType == INTEGER || sqlType == BIGINT ) { return bloomFilter . testLong ( ( ( Number ) predicateValue ) . longValue ( ) ) ; } if ( sqlType == DOUBLE ) { return bloomFilter . testDouble ( ( Double ) predicateValue ) ; } if ( sqlType instanceof VarcharType || sqlType instanceof VarbinaryType ) { return bloomFilter . test ( ( ( Slice ) predicateValue ) . getBytes ( ) ) ; } return true ; }
checks whether a value part of the effective predicate is likely to be part of this bloom filter
20,140
private static List < StateField > enumerateFields ( Class < ? > clazz , Map < String , Type > fieldTypes ) { ImmutableList . Builder < StateField > builder = ImmutableList . builder ( ) ; final Set < Class < ? > > primitiveClasses = ImmutableSet . of ( byte . class , boolean . class , long . class , double . class , int . class ) ; Set < Class < ? > > supportedClasses = getSupportedFieldTypes ( ) ; for ( Method method : clazz . getMethods ( ) ) { if ( method . getName ( ) . equals ( "getEstimatedSize" ) ) { continue ; } if ( method . getName ( ) . startsWith ( "get" ) ) { Class < ? > type = method . getReturnType ( ) ; checkArgument ( supportedClasses . contains ( type ) , type . getName ( ) + " is not supported" ) ; String name = method . getName ( ) . substring ( 3 ) ; builder . add ( new StateField ( name , type , getInitialValue ( method ) , method . getName ( ) , Optional . ofNullable ( fieldTypes . get ( name ) ) ) ) ; } if ( method . getName ( ) . startsWith ( "is" ) ) { Class < ? > type = method . getReturnType ( ) ; checkArgument ( type == boolean . class , "Only boolean is support for 'is' methods" ) ; String name = method . getName ( ) . substring ( 2 ) ; builder . add ( new StateField ( name , type , getInitialValue ( method ) , method . getName ( ) , Optional . of ( BOOLEAN ) ) ) ; } } Ordering < StateField > ordering = new Ordering < StateField > ( ) { public int compare ( StateField left , StateField right ) { if ( primitiveClasses . contains ( left . getType ( ) ) && ! primitiveClasses . contains ( right . getType ( ) ) ) { return - 1 ; } if ( primitiveClasses . contains ( right . getType ( ) ) && ! primitiveClasses . contains ( left . getType ( ) ) ) { return 1 ; } return left . getName ( ) . compareTo ( right . getName ( ) ) ; } } ; List < StateField > fields = ordering . sortedCopy ( builder . build ( ) ) ; checkInterface ( clazz , fields ) ; return fields ; }
Enumerates all the fields in this state interface .
20,141
private List < SchemaTableName > listViews ( Optional < String > filterSchema ) { ImmutableList . Builder < SchemaTableName > builder = ImmutableList . builder ( ) ; if ( filterSchema . isPresent ( ) ) { for ( String view : client . getViewNames ( filterSchema . get ( ) ) ) { builder . add ( new SchemaTableName ( filterSchema . get ( ) , view ) ) ; } } else { for ( String schemaName : client . getSchemaNames ( ) ) { for ( String view : client . getViewNames ( schemaName ) ) { builder . add ( new SchemaTableName ( schemaName , view ) ) ; } } } return builder . build ( ) ; }
Gets all views in the given schema or all schemas if null .
20,142
private static Optional < Expression > tryResolveMissingExpression ( PlanBuilder subPlan , Expression expression ) { Expression rewritten = subPlan . rewrite ( expression ) ; if ( rewritten != expression ) { return Optional . of ( rewritten ) ; } return Optional . empty ( ) ; }
Checks if give reference expression can resolved within given plan .
20,143
public WorkProcessor < Page > merge ( List < Type > keyTypes , List < Type > allTypes , List < WorkProcessor < Page > > channels , DriverYieldSignal driverYieldSignal ) { InterpretedHashGenerator hashGenerator = createHashGenerator ( keyTypes ) ; return mergeSortedPages ( channels , createHashPageWithPositionComparator ( hashGenerator ) , IntStream . range ( 0 , allTypes . size ( ) ) . boxed ( ) . collect ( toImmutableList ( ) ) , allTypes , keepSameHashValuesWithinSinglePage ( hashGenerator ) , true , memoryContext , driverYieldSignal ) ; }
Rows with same hash value are guaranteed to be in the same result page .
20,144
public static QueryStateMachine begin ( String query , Session session , URI self , ResourceGroupId resourceGroup , Optional < QueryType > queryType , boolean transactionControl , TransactionManager transactionManager , AccessControl accessControl , Executor executor , Metadata metadata , WarningCollector warningCollector ) { return beginWithTicker ( query , session , self , resourceGroup , queryType , transactionControl , transactionManager , accessControl , executor , Ticker . systemTicker ( ) , metadata , warningCollector ) ; }
Created QueryStateMachines must be transitioned to terminal states to clean up resources .
20,145
private static Page extractNonEmptyPage ( PageBuffer pageBuffer ) { Page page = pageBuffer . poll ( ) ; while ( page != null && page . getPositionCount ( ) == 0 ) { page = pageBuffer . poll ( ) ; } return page ; }
Return the next page from pageBuffer that has a non - zero position count or null if none available
20,146
public void add ( Block key , Block value , int keyPosition , int valuePosition ) { if ( ! keyExists ( key , keyPosition ) ) { addKey ( key , keyPosition ) ; if ( value . isNull ( valuePosition ) ) { valueBlockBuilder . appendNull ( ) ; } else { valueType . appendTo ( value , valuePosition , valueBlockBuilder ) ; } } }
Only add this key value pair if we haven t seen this key before . Otherwise ignore it .
20,147
public byte [ ] toBytes ( T instance ) throws IllegalArgumentException { try { return mapper . writeValueAsBytes ( instance ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( format ( "%s could not be converted to SMILE" , instance . getClass ( ) . getName ( ) ) , e ) ; } }
Converts the specified instance to smile encoded bytes .
20,148
static SortedRangeSet of ( Type type , Object first , Object ... rest ) { List < Range > ranges = new ArrayList < > ( rest . length + 1 ) ; ranges . add ( Range . equal ( type , first ) ) ; for ( Object value : rest ) { ranges . add ( Range . equal ( type , value ) ) ; } return copyOf ( type , ranges ) ; }
Provided discrete values that are unioned together to form the SortedRangeSet
20,149
public static < X , Y > List < LocalProperty < Y > > translate ( List < ? extends LocalProperty < X > > properties , Function < X , Optional < Y > > translator ) { properties = normalizeAndPrune ( properties ) ; ImmutableList . Builder < LocalProperty < Y > > builder = ImmutableList . builder ( ) ; for ( LocalProperty < X > property : properties ) { Optional < LocalProperty < Y > > translated = property . translate ( translator ) ; if ( translated . isPresent ( ) ) { builder . add ( translated . get ( ) ) ; } else if ( ! ( property instanceof ConstantProperty ) ) { break ; } } return builder . build ( ) ; }
Translates the properties as much as possible and truncates at the first non - translatable property
20,150
public static < T > List < Optional < LocalProperty < T > > > normalize ( List < ? extends LocalProperty < T > > localProperties ) { List < Optional < LocalProperty < T > > > normalizedProperties = new ArrayList < > ( localProperties . size ( ) ) ; Set < T > constants = new HashSet < > ( ) ; for ( LocalProperty < T > localProperty : localProperties ) { normalizedProperties . add ( localProperty . withConstants ( constants ) ) ; constants . addAll ( localProperty . getColumns ( ) ) ; } return normalizedProperties ; }
Normalizes the local properties by removing redundant symbols but retains the original local property positions
20,151
private static OptionalLong fromMetastoreDistinctValuesCount ( OptionalLong distinctValuesCount , OptionalLong nullsCount , OptionalLong rowCount ) { if ( distinctValuesCount . isPresent ( ) && nullsCount . isPresent ( ) && rowCount . isPresent ( ) ) { return OptionalLong . of ( fromMetastoreDistinctValuesCount ( distinctValuesCount . getAsLong ( ) , nullsCount . getAsLong ( ) , rowCount . getAsLong ( ) ) ) ; } return OptionalLong . empty ( ) ; }
Hive calculates NDV considering null as a distinct value
20,152
private static int sortAbstractLongArray ( Block array , long [ ] buffer , AbstractType type ) { int arraySize = array . getPositionCount ( ) ; int nonNullSize = 0 ; for ( int i = 0 ; i < arraySize ; i ++ ) { if ( ! array . isNull ( i ) ) { buffer [ nonNullSize ++ ] = type . getLong ( array , i ) ; } } for ( int i = 1 ; i < nonNullSize ; i ++ ) { if ( buffer [ i - 1 ] > buffer [ i ] ) { Arrays . sort ( buffer , 0 , nonNullSize ) ; break ; } } return nonNullSize ; }
Assumes buffer is long enough returns count of non - null elements .
20,153
private Optional < PlanNode > recurseToPartial ( PlanNode node , Lookup lookup , PlanNodeIdAllocator idAllocator ) { if ( node instanceof AggregationNode && ( ( AggregationNode ) node ) . getStep ( ) == PARTIAL ) { return Optional . of ( addGatheringIntermediate ( ( AggregationNode ) node , idAllocator ) ) ; } if ( ! ( node instanceof ExchangeNode ) && ! ( node instanceof ProjectNode ) ) { return Optional . empty ( ) ; } ImmutableList . Builder < PlanNode > builder = ImmutableList . builder ( ) ; for ( PlanNode source : node . getSources ( ) ) { Optional < PlanNode > planNode = recurseToPartial ( lookup . resolve ( source ) , lookup , idAllocator ) ; if ( ! planNode . isPresent ( ) ) { return Optional . empty ( ) ; } builder . add ( planNode . get ( ) ) ; } return Optional . of ( node . replaceChildren ( builder . build ( ) ) ) ; }
Recurse through a series of preceding ExchangeNodes and ProjectNodes to find the preceding PARTIAL aggregation
20,154
private void validateCreateTable ( ConnectorTableMetadata meta ) { validateColumns ( meta ) ; validateLocalityGroups ( meta ) ; if ( ! AccumuloTableProperties . isExternal ( meta . getProperties ( ) ) ) { validateInternalTable ( meta ) ; } }
Validates the given metadata for a series of conditions to ensure the table is well - formed .
20,155
private static String getRowIdColumn ( ConnectorTableMetadata meta ) { Optional < String > rowIdColumn = AccumuloTableProperties . getRowId ( meta . getProperties ( ) ) ; return rowIdColumn . orElse ( meta . getColumns ( ) . get ( 0 ) . getName ( ) ) . toLowerCase ( Locale . ENGLISH ) ; }
Gets the row ID based on a table properties or the first column name .
20,156
private static Optional < String > getColumnLocalityGroup ( String columnName , Optional < Map < String , Set < String > > > groups ) { if ( groups . isPresent ( ) ) { for ( Map . Entry < String , Set < String > > group : groups . get ( ) . entrySet ( ) ) { if ( group . getValue ( ) . contains ( columnName . toLowerCase ( Locale . ENGLISH ) ) ) { return Optional . of ( group . getKey ( ) ) ; } } } return Optional . empty ( ) ; }
Searches through the given locality groups to find if this column has a locality group .
20,157
private Optional < String > getTabletLocation ( String table , Key key ) { try { String tableId = connector . tableOperations ( ) . tableIdMap ( ) . get ( table ) ; Scanner scanner = connector . createScanner ( "accumulo.metadata" , auths ) ; scanner . fetchColumnFamily ( new Text ( "loc" ) ) ; Key defaultTabletRow = new Key ( tableId + '<' ) ; Key start = new Key ( tableId ) ; Key end = defaultTabletRow . followingKey ( PartialKey . ROW ) ; scanner . setRange ( new Range ( start , end ) ) ; Optional < String > location = Optional . empty ( ) ; if ( key == null ) { Iterator < Entry < Key , Value > > iter = scanner . iterator ( ) ; if ( iter . hasNext ( ) ) { location = Optional . of ( iter . next ( ) . getValue ( ) . toString ( ) ) ; } } else { Text splitCompareKey = new Text ( ) ; key . getRow ( splitCompareKey ) ; Text scannedCompareKey = new Text ( ) ; for ( Entry < Key , Value > entry : scanner ) { byte [ ] keyBytes = entry . getKey ( ) . getRow ( ) . copyBytes ( ) ; if ( keyBytes [ keyBytes . length - 1 ] == '<' ) { location = Optional . of ( entry . getValue ( ) . toString ( ) ) ; break ; } else { scannedCompareKey . set ( keyBytes , 3 , keyBytes . length - 3 ) ; if ( scannedCompareKey . getLength ( ) > 0 ) { int compareTo = splitCompareKey . compareTo ( scannedCompareKey ) ; if ( compareTo <= 0 ) { location = Optional . of ( entry . getValue ( ) . toString ( ) ) ; } else { break ; } } } } scanner . close ( ) ; } return location . isPresent ( ) ? location : getDefaultTabletLocation ( table ) ; } catch ( Exception e ) { LOG . error ( "Failed to get tablet location, returning dummy location" , e ) ; return Optional . empty ( ) ; } }
Gets the TabletServer hostname for where the given key is located in the given table
20,158
public static Collection < Range > getRangesFromDomain ( Optional < Domain > domain , AccumuloRowSerializer serializer ) throws TableNotFoundException { if ( ! domain . isPresent ( ) ) { return ImmutableSet . of ( new Range ( ) ) ; } ImmutableSet . Builder < Range > rangeBuilder = ImmutableSet . builder ( ) ; for ( com . facebook . presto . spi . predicate . Range range : domain . get ( ) . getValues ( ) . getRanges ( ) . getOrderedRanges ( ) ) { rangeBuilder . add ( getRangeFromPrestoRange ( range , serializer ) ) ; } return rangeBuilder . build ( ) ; }
Gets a collection of Accumulo Range objects from the given Presto domain . This maps the column constraints of the given Domain to an Accumulo Range scan .
20,159
public BytecodeNode compile ( RowExpression rowExpression , Scope scope , Optional < Variable > outputBlockVariable , Optional < Class > lambdaInterface ) { return rowExpression . accept ( new Visitor ( ) , new Context ( scope , outputBlockVariable , lambdaInterface ) ) ; }
otherwise the value will be left on stack .
20,160
public Page getSingleValuePage ( int position ) { Block [ ] singleValueBlocks = new Block [ this . blocks . length ] ; for ( int i = 0 ; i < this . blocks . length ; i ++ ) { singleValueBlocks [ i ] = this . blocks [ i ] . getSingleValueBlock ( position ) ; } return new Page ( 1 , singleValueBlocks ) ; }
Gets the values at the specified position as a single element page . The method creates independent copy of the data .
20,161
private void checkFieldType ( int field , Type ... expected ) { Type actual = getType ( field ) ; for ( Type type : expected ) { if ( actual . equals ( type ) ) { return ; } } throw new IllegalArgumentException ( format ( "Expected field %s to be a type of %s but is %s" , field , StringUtils . join ( expected , "," ) , actual ) ) ; }
Checks that the given field is one of the provided types .
20,162
private int median3 ( PagesIndex pagesIndex , int a , int b , int c ) { int ab = comparator . compareTo ( pagesIndex , a , b ) ; int ac = comparator . compareTo ( pagesIndex , a , c ) ; int bc = comparator . compareTo ( pagesIndex , b , c ) ; return ( ab < 0 ? ( bc < 0 ? b : ac < 0 ? c : a ) : ( bc > 0 ? b : ac > 0 ? c : a ) ) ; }
Returns the index of the median of the three positions .
20,163
public BytecodeNode generateCall ( String name , ScalarFunctionImplementation function , List < BytecodeNode > arguments , Optional < OutputBlockVariableAndType > outputBlockVariableAndType ) { Optional < BytecodeNode > instance = Optional . empty ( ) ; if ( function . getInstanceFactory ( ) . isPresent ( ) ) { FieldDefinition field = cachedInstanceBinder . getCachedInstance ( function . getInstanceFactory ( ) . get ( ) ) ; instance = Optional . of ( scope . getThis ( ) . getField ( field ) ) ; } return generateInvocation ( scope , name , function , instance , arguments , callSiteBinder , outputBlockVariableAndType ) ; }
Generates a function call with null handling automatic binding of session parameter etc .
20,164
public static PlanNode transpose ( PlanNode parent , PlanNode child ) { return child . replaceChildren ( ImmutableList . of ( parent . replaceChildren ( child . getSources ( ) ) ) ) ; }
Transforms a plan like P - > C - > X to C - > P - > X
20,165
public static MapBlock fromKeyValueBlock ( Optional < boolean [ ] > mapIsNull , int [ ] offsets , Block keyBlock , Block valueBlock , MapType mapType , MethodHandle keyBlockNativeEquals , MethodHandle keyNativeHashCode , MethodHandle keyBlockHashCode ) { validateConstructorArguments ( 0 , offsets . length - 1 , mapIsNull . orElse ( null ) , offsets , keyBlock , valueBlock , mapType . getKeyType ( ) , keyBlockNativeEquals , keyNativeHashCode ) ; int mapCount = offsets . length - 1 ; return createMapBlockInternal ( 0 , mapCount , mapIsNull , offsets , keyBlock , valueBlock , new HashTables ( Optional . empty ( ) , keyBlock . getPositionCount ( ) * HASH_MULTIPLIER ) , mapType . getKeyType ( ) , keyBlockNativeEquals , keyNativeHashCode , keyBlockHashCode ) ; }
Create a map block directly from columnar nulls keys values and offsets into the keys and values . A null map must have no entries .
20,166
public boolean sameConfig ( ResourceGroupSpec other ) { if ( other == null ) { return false ; } return ( name . equals ( other . name ) && softMemoryLimit . equals ( other . softMemoryLimit ) && maxQueued == other . maxQueued && softConcurrencyLimit . equals ( other . softConcurrencyLimit ) && hardConcurrencyLimit == other . hardConcurrencyLimit && schedulingPolicy . equals ( other . schedulingPolicy ) && schedulingWeight . equals ( other . schedulingWeight ) && jmxExport . equals ( other . jmxExport ) && softCpuLimit . equals ( other . softCpuLimit ) && hardCpuLimit . equals ( other . hardCpuLimit ) ) ; }
Subgroups not included used to determine whether a group needs to be reconfigured
20,167
public static Set < QualifiedName > extractNames ( Expression expression , Set < NodeRef < Expression > > columnReferences ) { ImmutableSet . Builder < QualifiedName > builder = ImmutableSet . builder ( ) ; new QualifiedNameBuilderVisitor ( columnReferences ) . process ( expression , builder ) ; return builder . build ( ) ; }
to extract qualified name with prefix
20,168
static Properties updateSplitSchema ( Properties splitSchema , List < HiveColumnHandle > columns ) { requireNonNull ( splitSchema , "splitSchema is null" ) ; requireNonNull ( columns , "columns is null" ) ; Properties updatedSchema = new Properties ( ) ; updatedSchema . putAll ( splitSchema ) ; updatedSchema . setProperty ( LIST_COLUMNS , buildColumns ( columns ) ) ; updatedSchema . setProperty ( LIST_COLUMN_TYPES , buildColumnTypes ( columns ) ) ; ThriftTable thriftTable = parseThriftDdl ( splitSchema . getProperty ( SERIALIZATION_DDL ) ) ; updatedSchema . setProperty ( SERIALIZATION_DDL , thriftTableToDdl ( pruneThriftTable ( thriftTable , columns ) ) ) ; return updatedSchema ; }
otherwise Serde could not deserialize output from s3select to row data correctly
20,169
public void ensureNamespace ( String schema ) { try { if ( ! schema . equals ( DEFAULT ) && ! connector . namespaceOperations ( ) . exists ( schema ) ) { connector . namespaceOperations ( ) . create ( schema ) ; } } catch ( AccumuloException | AccumuloSecurityException e ) { throw new PrestoException ( UNEXPECTED_ACCUMULO_ERROR , "Failed to check for existence or create Accumulo namespace" , e ) ; } catch ( NamespaceExistsException e ) { LOG . warn ( "NamespaceExistsException suppressed when creating " + schema ) ; } }
Ensures the given Accumulo namespace exist creating it if necessary
20,170
static long calculateGrouping ( Set < Integer > groupingSet , List < Integer > columns ) { long grouping = ( 1L << columns . size ( ) ) - 1 ; for ( int index = 0 ; index < columns . size ( ) ; index ++ ) { int column = columns . get ( index ) ; if ( groupingSet . contains ( column ) ) { grouping = grouping & ~ ( 1L << ( columns . size ( ) - 1 - index ) ) ; } } return grouping ; }
The grouping function is used in conjunction with GROUPING SETS ROLLUP and CUBE to indicate which columns are present in that grouping .
20,171
private int calculateMaxPendingSplits ( int splitAffinity , int totalDepth ) { if ( totalDepth == 0 ) { return maxPendingSplitsPerTask ; } double queueFraction = 0.5 * ( 1.0 + splitAffinity / ( double ) totalDepth ) ; return ( int ) Math . ceil ( maxPendingSplitsPerTask * queueFraction ) ; }
Computes how much of the queue can be filled by splits with the network topology distance to a node given by splitAffinity . A split with zero affinity can only fill half the queue whereas one that matches exactly can fill the entire queue .
20,172
private void enforceMemoryLimits ( ) { List < QueryExecution > runningQueries = queryTracker . getAllQueries ( ) . stream ( ) . filter ( query -> query . getState ( ) == RUNNING ) . collect ( toImmutableList ( ) ) ; memoryManager . process ( runningQueries , this :: getQueries ) ; }
Enforce memory limits at the query level
20,173
private void enforceCpuLimits ( ) { for ( QueryExecution query : queryTracker . getAllQueries ( ) ) { Duration cpuTime = query . getTotalCpuTime ( ) ; Duration sessionLimit = getQueryMaxCpuTime ( query . getSession ( ) ) ; Duration limit = Ordering . natural ( ) . min ( maxQueryCpuTime , sessionLimit ) ; if ( cpuTime . compareTo ( limit ) > 0 ) { query . fail ( new ExceededCpuLimitException ( limit ) ) ; } } }
Enforce query CPU time limits
20,174
protected void append ( LoggingEvent loggingEvent ) { java . util . logging . Logger logger = java . util . logging . Logger . getLogger ( loggingEvent . getLoggerName ( ) ) ; if ( logger == null ) { LogLog . warn ( format ( "Cannot obtain JUL %s. Verify that this appender is used while an appropriate LogManager is active." , loggingEvent . getLoggerName ( ) ) ) ; return ; } Level level = loggingEvent . getLevel ( ) ; java . util . logging . Level julLevel = convertLog4jLevel ( level ) ; LogRecord record = new LogRecord ( julLevel , loggingEvent . getRenderedMessage ( ) ) ; record . setMillis ( loggingEvent . getTimeStamp ( ) ) ; LocationInfo location = loggingEvent . getLocationInformation ( ) ; if ( location != null ) { record . setSourceClassName ( location . getClassName ( ) ) ; record . setSourceMethodName ( location . getMethodName ( ) ) ; } logger . log ( record ) ; }
Append a log event at the appropriate JUL level depending on the log4j level .
20,175
private static boolean mySqlErrorCodeMatches ( Exception e , int errorCode ) { return Throwables . getCausalChain ( e ) . stream ( ) . filter ( SQLException . class :: isInstance ) . map ( SQLException . class :: cast ) . filter ( t -> t . getErrorCode ( ) == errorCode ) . map ( Throwable :: getStackTrace ) . anyMatch ( isMySQLException ( ) ) ; }
Check if an exception is caused by a MySQL exception of certain error code
20,176
public static List < Integer > intArrayFromBytes ( byte [ ] bytes ) { ImmutableList . Builder < Integer > list = ImmutableList . builder ( ) ; ByteBuffer buffer = ByteBuffer . wrap ( bytes ) ; while ( buffer . hasRemaining ( ) ) { list . add ( buffer . getInt ( ) ) ; } return list . build ( ) ; }
Unpack an array of big endian ints .
20,177
public static byte [ ] intArrayToBytes ( Collection < Integer > values ) { ByteBuffer buffer = ByteBuffer . allocate ( values . size ( ) * Integer . BYTES ) ; for ( int value : values ) { buffer . putInt ( value ) ; } return buffer . array ( ) ; }
Pack an array of ints as big endian .
20,178
public int getUnsetBits ( int batchSize , boolean [ ] vector , int offset ) throws IOException { int count = 0 ; for ( int i = offset ; i < batchSize + offset ; i ++ ) { vector [ i ] = ! nextBit ( ) ; count += vector [ i ] ? 1 : 0 ; } return count ; }
Sets the vector element to true for the batchSize number of elements starting at offset if the bit is not set .
20,179
public int getUnsetBits ( int batchSize ) throws IOException { int count = 0 ; for ( int i = 0 ; i < batchSize ; i ++ ) { count += nextBit ( ) ? 0 : 1 ; } return count ; }
Return the number of unset bits
20,180
public int seekKeyExact ( long nativeValue ) { if ( positionCount == 0 ) { return - 1 ; } mapBlock . ensureHashTableLoaded ( ) ; int [ ] hashTable = mapBlock . getHashTables ( ) . get ( ) . get ( ) ; long hashCode ; try { hashCode = ( long ) mapBlock . keyNativeHashCode . invokeExact ( nativeValue ) ; } catch ( Throwable throwable ) { throw handleThrowable ( throwable ) ; } int hashTableOffset = offset / 2 * HASH_MULTIPLIER ; int hashTableSize = positionCount / 2 * HASH_MULTIPLIER ; int position = computePosition ( hashCode , hashTableSize ) ; while ( true ) { int keyPosition = hashTable [ hashTableOffset + position ] ; if ( keyPosition == - 1 ) { return - 1 ; } Boolean match ; try { match = ( Boolean ) mapBlock . keyBlockNativeEquals . invokeExact ( mapBlock . getRawKeyBlock ( ) , offset / 2 + keyPosition , nativeValue ) ; } catch ( Throwable throwable ) { throw handleThrowable ( throwable ) ; } checkNotIndeterminate ( match ) ; if ( match ) { return keyPosition * 2 + 1 ; } position ++ ; if ( position == hashTableSize ) { position = 0 ; } } }
except MethodHandle . invoke is replaced with invokeExact .
20,181
@ SuppressWarnings ( { "NarrowingCompoundAssignment" , "ImplicitNumericConversion" } ) public static void encodeNullsAsBits ( SliceOutput sliceOutput , Block block ) { boolean mayHaveNull = block . mayHaveNull ( ) ; sliceOutput . writeBoolean ( mayHaveNull ) ; if ( ! mayHaveNull ) { return ; } int positionCount = block . getPositionCount ( ) ; for ( int position = 0 ; position < ( positionCount & ~ 0b111 ) ; position += 8 ) { byte value = 0 ; value |= block . isNull ( position ) ? 0b1000_0000 : 0 ; value |= block . isNull ( position + 1 ) ? 0b0100_0000 : 0 ; value |= block . isNull ( position + 2 ) ? 0b0010_0000 : 0 ; value |= block . isNull ( position + 3 ) ? 0b0001_0000 : 0 ; value |= block . isNull ( position + 4 ) ? 0b0000_1000 : 0 ; value |= block . isNull ( position + 5 ) ? 0b0000_0100 : 0 ; value |= block . isNull ( position + 6 ) ? 0b0000_0010 : 0 ; value |= block . isNull ( position + 7 ) ? 0b0000_0001 : 0 ; sliceOutput . appendByte ( value ) ; } if ( ( positionCount & 0b111 ) > 0 ) { byte value = 0 ; int mask = 0b1000_0000 ; for ( int position = positionCount & ~ 0b111 ; position < positionCount ; position ++ ) { value |= block . isNull ( position ) ? mask : 0 ; mask >>>= 1 ; } sliceOutput . appendByte ( value ) ; } }
Append null values for the block as a stream of bits .
20,182
public static Optional < boolean [ ] > decodeNullBits ( SliceInput sliceInput , int positionCount ) { if ( ! sliceInput . readBoolean ( ) ) { return Optional . empty ( ) ; } boolean [ ] valueIsNull = new boolean [ positionCount ] ; for ( int position = 0 ; position < ( positionCount & ~ 0b111 ) ; position += 8 ) { byte value = sliceInput . readByte ( ) ; valueIsNull [ position ] = ( ( value & 0b1000_0000 ) != 0 ) ; valueIsNull [ position + 1 ] = ( ( value & 0b0100_0000 ) != 0 ) ; valueIsNull [ position + 2 ] = ( ( value & 0b0010_0000 ) != 0 ) ; valueIsNull [ position + 3 ] = ( ( value & 0b0001_0000 ) != 0 ) ; valueIsNull [ position + 4 ] = ( ( value & 0b0000_1000 ) != 0 ) ; valueIsNull [ position + 5 ] = ( ( value & 0b0000_0100 ) != 0 ) ; valueIsNull [ position + 6 ] = ( ( value & 0b0000_0010 ) != 0 ) ; valueIsNull [ position + 7 ] = ( ( value & 0b0000_0001 ) != 0 ) ; } if ( ( positionCount & 0b111 ) > 0 ) { byte value = sliceInput . readByte ( ) ; int mask = 0b1000_0000 ; for ( int position = positionCount & ~ 0b111 ; position < positionCount ; position ++ ) { valueIsNull [ position ] = ( ( value & mask ) != 0 ) ; mask >>>= 1 ; } } return Optional . of ( valueIsNull ) ; }
Decode the bit stream created by encodeNullsAsBits .
20,183
static Slice compactSlice ( Slice slice , int index , int length ) { if ( slice . isCompact ( ) && index == 0 && length == slice . length ( ) ) { return slice ; } return Slices . copyOf ( slice , index , length ) ; }
Returns a slice containing values in the specified range of the specified slice . If the range matches the entire slice the input slice will be returned . Otherwise a copy will be returned .
20,184
static boolean [ ] compactArray ( boolean [ ] array , int index , int length ) { if ( index == 0 && length == array . length ) { return array ; } return Arrays . copyOfRange ( array , index , index + length ) ; }
Returns an array containing elements in the specified range of the specified array . If the range matches the entire array the input array will be returned . Otherwise a copy will be returned .
20,185
public FunctionHandle resolveFunction ( Session session , QualifiedName name , List < TypeSignatureProvider > parameterTypes ) { return staticFunctionNamespace . resolveFunction ( name , parameterTypes ) ; }
Resolves a function using the SQL path and implicit type coercions .
20,186
public static int decodeBitWidth ( int n ) { if ( n >= ONE . ordinal ( ) && n <= TWENTY_FOUR . ordinal ( ) ) { return n + 1 ; } else if ( n == TWENTY_SIX . ordinal ( ) ) { return 26 ; } else if ( n == TWENTY_EIGHT . ordinal ( ) ) { return 28 ; } else if ( n == THIRTY . ordinal ( ) ) { return 30 ; } else if ( n == THIRTY_TWO . ordinal ( ) ) { return 32 ; } else if ( n == FORTY . ordinal ( ) ) { return 40 ; } else if ( n == FORTY_EIGHT . ordinal ( ) ) { return 48 ; } else if ( n == FIFTY_SIX . ordinal ( ) ) { return 56 ; } else { return 64 ; } }
Decodes the ordinal fixed bit value to actual fixed bit width value .
20,187
public static int getClosestFixedBits ( int width ) { if ( width == 0 ) { return 1 ; } if ( width >= 1 && width <= 24 ) { return width ; } else if ( width > 24 && width <= 26 ) { return 26 ; } else if ( width > 26 && width <= 28 ) { return 28 ; } else if ( width > 28 && width <= 30 ) { return 30 ; } else if ( width > 30 && width <= 32 ) { return 32 ; } else if ( width > 32 && width <= 40 ) { return 40 ; } else if ( width > 40 && width <= 48 ) { return 48 ; } else if ( width > 48 && width <= 56 ) { return 56 ; } else { return 64 ; } }
Gets the closest supported fixed bit width for the specified bit width .
20,188
public static long doubleToSortableLong ( double value ) { long bits = Double . doubleToLongBits ( value ) ; return bits ^ ( bits >> 63 ) & Long . MAX_VALUE ; }
Converts a double value to a sortable long . The value is converted by getting their IEEE 754 floating - point bit layout . Some bits are swapped to be able to compare the result as long .
20,189
public static double sortableLongToDouble ( long value ) { value = value ^ ( value >> 63 ) & Long . MAX_VALUE ; return Double . longBitsToDouble ( value ) ; }
Converts a sortable long to double .
20,190
public static int floatToSortableInt ( float value ) { int bits = Float . floatToIntBits ( value ) ; return bits ^ ( bits >> 31 ) & Integer . MAX_VALUE ; }
Converts a float value to a sortable int .
20,191
public static float sortableIntToFloat ( int value ) { value = value ^ ( value >> 31 ) & Integer . MAX_VALUE ; return Float . intBitsToFloat ( value ) ; }
Coverts a sortable int to float .
20,192
public Map < Symbol , Symbol > sourceSymbolMap ( int sourceIndex ) { ImmutableMap . Builder < Symbol , Symbol > builder = ImmutableMap . builder ( ) ; for ( Map . Entry < Symbol , Collection < Symbol > > entry : outputToInputs . asMap ( ) . entrySet ( ) ) { builder . put ( entry . getKey ( ) , Iterables . get ( entry . getValue ( ) , sourceIndex ) ) ; } return builder . build ( ) ; }
Returns the output to input symbol mapping for the given source channel
20,193
public Multimap < Symbol , Symbol > outputSymbolMap ( int sourceIndex ) { return FluentIterable . from ( getOutputSymbols ( ) ) . toMap ( outputSymbol -> outputToInputs . get ( outputSymbol ) . get ( sourceIndex ) ) . asMultimap ( ) . inverse ( ) ; }
Returns the input to output symbol mapping for the given source channel . A single input symbol can map to multiple output symbols thus requiring a Multimap .
20,194
public static Collection < IteratorSetting > getMetricIterators ( AccumuloTable table ) { String cardQualifier = new String ( CARDINALITY_CQ ) ; String rowsFamily = new String ( METRICS_TABLE_ROWS_CF . array ( ) ) ; StringBuilder cardBuilder = new StringBuilder ( rowsFamily + ":" + cardQualifier + "," ) ; for ( String s : getLocalityGroups ( table ) . keySet ( ) ) { cardBuilder . append ( s ) . append ( ":" ) . append ( cardQualifier ) . append ( ',' ) ; } cardBuilder . deleteCharAt ( cardBuilder . length ( ) - 1 ) ; String firstRowColumn = rowsFamily + ":" + new String ( METRICS_TABLE_FIRST_ROW_CQ . array ( ) ) ; String lastRowColumn = rowsFamily + ":" + new String ( METRICS_TABLE_LAST_ROW_CQ . array ( ) ) ; IteratorSetting s1 = new IteratorSetting ( 1 , SummingCombiner . class , ImmutableMap . of ( "columns" , cardBuilder . toString ( ) , "type" , "STRING" ) ) ; IteratorSetting s2 = new IteratorSetting ( 2 , MinByteArrayCombiner . class , ImmutableMap . of ( "columns" , firstRowColumn ) ) ; IteratorSetting s3 = new IteratorSetting ( 3 , MaxByteArrayCombiner . class , ImmutableMap . of ( "columns" , lastRowColumn ) ) ; return ImmutableList . of ( s1 , s2 , s3 ) ; }
Gets a collection of iterator settings that should be added to the metric table for the given Accumulo table . Don t forget! Please!
20,195
public static ByteBuffer getIndexColumnFamily ( byte [ ] columnFamily , byte [ ] columnQualifier ) { return wrap ( ArrayUtils . addAll ( ArrayUtils . add ( columnFamily , UNDERSCORE ) , columnQualifier ) ) ; }
Gets the column family of the index table based on the given column family and qualifier .
20,196
public static String getIndexTableName ( String schema , String table ) { return schema . equals ( "default" ) ? table + "_idx" : schema + '.' + table + "_idx" ; }
Gets the fully - qualified index table name for the given table .
20,197
public static String getMetricsTableName ( String schema , String table ) { return schema . equals ( "default" ) ? table + "_idx_metrics" : schema + '.' + table + "_idx_metrics" ; }
Gets the fully - qualified index metrics table name for the given table .
20,198
public boolean isAdjacent ( Marker other ) { checkTypeCompatibility ( other ) ; if ( isUpperUnbounded ( ) || isLowerUnbounded ( ) || other . isUpperUnbounded ( ) || other . isLowerUnbounded ( ) ) { return false ; } if ( type . compareTo ( valueBlock . get ( ) , 0 , other . valueBlock . get ( ) , 0 ) != 0 ) { return false ; } return ( bound == Bound . EXACTLY && other . bound != Bound . EXACTLY ) || ( bound != Bound . EXACTLY && other . bound == Bound . EXACTLY ) ; }
Adjacency is defined by two Markers being infinitesimally close to each other . This means they must share the same value and have adjacent Bounds .
20,199
public static RowExpression toRowExpression ( Object object , Type type ) { requireNonNull ( type , "type is null" ) ; if ( object instanceof RowExpression ) { return ( RowExpression ) object ; } if ( object == null ) { return constantNull ( type ) ; } return constant ( object , type ) ; }
Unlike toExpression toRowExpression should be very straightforward given object is serializable