idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
18,900 | public static PeriodGranularity toQueryGranularity ( final TimeUnitRange timeUnitRange , final DateTimeZone timeZone ) { final Period period = PERIOD_MAP . get ( timeUnitRange ) ; if ( period == null ) { return null ; } return new PeriodGranularity ( period , null , timeZone ) ; } | Returns the Druid QueryGranularity corresponding to a Calcite TimeUnitRange or null if there is none . |
18,901 | public static ColumnSelectorFactory makeColumnSelectorFactory ( final VirtualColumns virtualColumns , final AggregatorFactory agg , final Supplier < InputRow > in , final boolean deserializeComplexMetrics ) { final RowBasedColumnSelectorFactory baseSelectorFactory = RowBasedColumnSelectorFactory . create ( in , null ) ; class IncrementalIndexInputRowColumnSelectorFactory implements ColumnSelectorFactory { public ColumnValueSelector < ? > makeColumnValueSelector ( final String column ) { final String typeName = agg . getTypeName ( ) ; boolean isComplexMetric = GuavaUtils . getEnumIfPresent ( ValueType . class , StringUtils . toUpperCase ( typeName ) ) == null || typeName . equalsIgnoreCase ( ValueType . COMPLEX . name ( ) ) ; if ( ! isComplexMetric || ! deserializeComplexMetrics ) { return baseSelectorFactory . makeColumnValueSelector ( column ) ; } else { final ComplexMetricSerde serde = ComplexMetrics . getSerdeForType ( typeName ) ; if ( serde == null ) { throw new ISE ( "Don't know how to handle type[%s]" , typeName ) ; } final ComplexMetricExtractor extractor = serde . getExtractor ( ) ; return new ColumnValueSelector ( ) { public boolean isNull ( ) { return in . get ( ) . getMetric ( column ) == null ; } public long getLong ( ) { Number metric = in . get ( ) . getMetric ( column ) ; assert NullHandling . replaceWithDefault ( ) || metric != null ; return DimensionHandlerUtils . nullToZero ( metric ) . longValue ( ) ; } public float getFloat ( ) { Number metric = in . get ( ) . getMetric ( column ) ; assert NullHandling . replaceWithDefault ( ) || metric != null ; return DimensionHandlerUtils . nullToZero ( metric ) . floatValue ( ) ; } public double getDouble ( ) { Number metric = in . get ( ) . getMetric ( column ) ; assert NullHandling . replaceWithDefault ( ) || metric != null ; return DimensionHandlerUtils . nullToZero ( metric ) . doubleValue ( ) ; } public Class classOfObject ( ) { return extractor . extractedClass ( ) ; } public Object getObject ( ) { return extractor . extractValue ( in . get ( ) , column , agg ) ; } public void inspectRuntimeShape ( RuntimeShapeInspector inspector ) { inspector . visit ( "in" , in ) ; inspector . visit ( "extractor" , extractor ) ; } } ; } } public DimensionSelector makeDimensionSelector ( DimensionSpec dimensionSpec ) { return baseSelectorFactory . makeDimensionSelector ( dimensionSpec ) ; } public ColumnCapabilities getColumnCapabilities ( String columnName ) { return baseSelectorFactory . getColumnCapabilities ( columnName ) ; } } return virtualColumns . wrap ( new IncrementalIndexInputRowColumnSelectorFactory ( ) ) ; } | Column selector used at ingestion time for inputs to aggregators . |
18,902 | private static void tokenToAuthCookie ( HttpServletResponse resp , String token , String domain , String path , long expires , boolean isCookiePersistent , boolean isSecure ) { resp . addHeader ( "Set-Cookie" , tokenToCookieString ( token , domain , path , expires , isCookiePersistent , isSecure ) ) ; } | Creates the Hadoop authentication HTTP cookie . |
18,903 | public static Pair < String , String > splitColumnName ( String columnName ) { final int i = columnName . indexOf ( '.' ) ; if ( i < 0 ) { return Pair . of ( columnName , null ) ; } else { return Pair . of ( columnName . substring ( 0 , i ) , columnName . substring ( i + 1 ) ) ; } } | Split a dot - style columnName into the main columnName and the subColumn name after the dot . Useful for columns that support dot notation . |
18,904 | public ColumnValueSelector < ? > makeColumnValueSelector ( String columnName , ColumnSelectorFactory factory ) { final VirtualColumn virtualColumn = getVirtualColumn ( columnName ) ; if ( virtualColumn == null ) { throw new IAE ( "No such virtual column[%s]" , columnName ) ; } else { final ColumnValueSelector < ? > selector = virtualColumn . makeColumnValueSelector ( columnName , factory ) ; Preconditions . checkNotNull ( selector , "selector" ) ; return selector ; } } | Create a column value selector . |
18,905 | public CacheKeyBuilder appendStrings ( Collection < String > input ) { appendItem ( STRING_LIST_KEY , stringCollectionToByteArray ( input , true ) ) ; return this ; } | Add a collection of strings to the cache key . Strings in the collection are concatenated with a separator of 0xFF and they appear in the cache key in their input order . |
18,906 | public CacheKeyBuilder appendStringsIgnoringOrder ( Collection < String > input ) { appendItem ( STRING_LIST_KEY , stringCollectionToByteArray ( input , false ) ) ; return this ; } | Add a collection of strings to the cache key . Strings in the collection are sorted by their byte representation and concatenated with a separator of 0xFF . |
18,907 | public CacheKeyBuilder appendCacheables ( Collection < ? extends Cacheable > input ) { appendItem ( CACHEABLE_LIST_KEY , cacheableCollectionToByteArray ( input , true ) ) ; return this ; } | Add a collection of Cacheables to the cache key . Cacheables in the collection are concatenated without any separator and they appear in the cache key in their input order . |
18,908 | public CacheKeyBuilder appendCacheablesIgnoringOrder ( Collection < ? extends Cacheable > input ) { appendItem ( CACHEABLE_LIST_KEY , cacheableCollectionToByteArray ( input , false ) ) ; return this ; } | Add a collection of Cacheables to the cache key . Cacheables in the collection are sorted by their byte representation and concatenated without any separator . |
18,909 | public RelDataType getRelDataType ( final RelDataTypeFactory typeFactory ) { final RelDataTypeFactory . Builder builder = typeFactory . builder ( ) ; for ( final String columnName : columnNames ) { final ValueType columnType = getColumnType ( columnName ) ; final RelDataType type ; if ( ColumnHolder . TIME_COLUMN_NAME . equals ( columnName ) ) { type = Calcites . createSqlType ( typeFactory , SqlTypeName . TIMESTAMP ) ; } else { switch ( columnType ) { case STRING : type = Calcites . createSqlTypeWithNullability ( typeFactory , SqlTypeName . VARCHAR , true ) ; break ; case LONG : type = Calcites . createSqlType ( typeFactory , SqlTypeName . BIGINT ) ; break ; case FLOAT : type = Calcites . createSqlType ( typeFactory , SqlTypeName . FLOAT ) ; break ; case DOUBLE : type = Calcites . createSqlType ( typeFactory , SqlTypeName . DOUBLE ) ; break ; case COMPLEX : type = Calcites . createSqlTypeWithNullability ( typeFactory , SqlTypeName . OTHER , true ) ; break ; default : throw new ISE ( "WTF?! valueType[%s] not translatable?" , columnType ) ; } } builder . add ( columnName , type ) ; } return builder . build ( ) ; } | Returns a Calcite RelDataType corresponding to this row signature . |
18,910 | public static String generateUuid ( String ... extraData ) { String extra = null ; if ( extraData != null && extraData . length > 0 ) { final ArrayList < String > extraStrings = new ArrayList < > ( extraData . length ) ; for ( String extraString : extraData ) { if ( ! Strings . isNullOrEmpty ( extraString ) ) { extraStrings . add ( extraString ) ; } } if ( ! extraStrings . isEmpty ( ) ) { extra = Joiner . on ( UUID_DELIM ) . join ( extraStrings ) ; } } final String uuid = StringUtils . removeChar ( UUID . randomUUID ( ) . toString ( ) , '-' ) ; return extra == null ? uuid : ( extra + UUID_DELIM + uuid ) ; } | Generates a universally unique identifier . |
18,911 | public static List < PostAggregator > prepareAggregations ( List < String > otherOutputNames , List < AggregatorFactory > aggFactories , List < PostAggregator > postAggs ) { Preconditions . checkNotNull ( otherOutputNames , "otherOutputNames cannot be null" ) ; Preconditions . checkNotNull ( aggFactories , "aggregations cannot be null" ) ; final Set < String > combinedOutputNames = new HashSet < > ( ) ; combinedOutputNames . addAll ( otherOutputNames ) ; final Map < String , AggregatorFactory > aggsFactoryMap = new HashMap < > ( ) ; for ( AggregatorFactory aggFactory : aggFactories ) { Preconditions . checkArgument ( combinedOutputNames . add ( aggFactory . getName ( ) ) , "[%s] already defined" , aggFactory . getName ( ) ) ; aggsFactoryMap . put ( aggFactory . getName ( ) , aggFactory ) ; } if ( postAggs != null && ! postAggs . isEmpty ( ) ) { List < PostAggregator > decorated = Lists . newArrayListWithExpectedSize ( postAggs . size ( ) ) ; for ( final PostAggregator postAgg : postAggs ) { final Set < String > dependencies = postAgg . getDependentFields ( ) ; final Set < String > missing = Sets . difference ( dependencies , combinedOutputNames ) ; Preconditions . checkArgument ( missing . isEmpty ( ) , "Missing fields [%s] for postAggregator [%s]" , missing , postAgg . getName ( ) ) ; Preconditions . checkArgument ( combinedOutputNames . add ( postAgg . getName ( ) ) , "[%s] already defined" , postAgg . getName ( ) ) ; decorated . add ( postAgg . decorate ( aggsFactoryMap ) ) ; } return decorated ; } return postAggs ; } | Returns decorated post - aggregators based on original un - decorated post - aggregators . In addition this method also verifies that there are no output name collisions and that all of the post - aggregators required input fields are present . |
18,912 | private long calculateLimit ( ScanQuery query , Map < String , Object > responseContext ) { if ( query . getOrder ( ) . equals ( ScanQuery . Order . NONE ) ) { return query . getLimit ( ) - ( long ) responseContext . get ( ScanQueryRunnerFactory . CTX_COUNT ) ; } return query . getLimit ( ) ; } | If we re performing time - ordering we want to scan through the first limit rows in each segment ignoring the number of rows already counted on other segments . |
18,913 | public void setValues ( int [ ] values , int size ) { if ( size < 0 || size > values . length ) { throw new IAE ( "Size[%d] should be between 0 and %d" , size , values . length ) ; } ensureSize ( size ) ; System . arraycopy ( values , 0 , expansion , 0 , size ) ; this . size = size ; } | Sets the values from the given array . The given values array is not reused and not prone to be mutated later . Instead the values from this array are copied into an array which is internal to ArrayBasedIndexedInts . |
18,914 | public static double computeJointSegmentsCost ( final DataSegment segmentA , final DataSegment segmentB ) { final Interval intervalA = segmentA . getInterval ( ) ; final Interval intervalB = segmentB . getInterval ( ) ; final double t0 = intervalA . getStartMillis ( ) ; final double t1 = ( intervalA . getEndMillis ( ) - t0 ) / MILLIS_FACTOR ; final double start = ( intervalB . getStartMillis ( ) - t0 ) / MILLIS_FACTOR ; final double end = ( intervalB . getEndMillis ( ) - t0 ) / MILLIS_FACTOR ; final double multiplier = segmentA . getDataSource ( ) . equals ( segmentB . getDataSource ( ) ) ? 2.0 : 1.0 ; return INV_LAMBDA_SQUARE * intervalCost ( t1 , start , end ) * multiplier ; } | This defines the unnormalized cost function between two segments . |
18,915 | public static double intervalCost ( double x1 , double y0 , double y1 ) { if ( x1 == 0 || y1 == y0 ) { return 0 ; } if ( y0 < 0 ) { double tmp = x1 ; x1 = y1 - y0 ; y1 = tmp - y0 ; y0 = - y0 ; } if ( y0 < x1 ) { final double beta ; final double gamma ; if ( y1 <= x1 ) { beta = y1 - y0 ; gamma = x1 - y0 ; } else { beta = x1 - y0 ; gamma = y1 - y0 ; } return intervalCost ( y0 , y0 , y1 ) + intervalCost ( beta , beta , gamma ) + 2 * ( beta + FastMath . exp ( - beta ) - 1 ) ; } else { final double exy0 = FastMath . exp ( x1 - y0 ) ; final double exy1 = FastMath . exp ( x1 - y1 ) ; final double ey0 = FastMath . exp ( 0f - y0 ) ; final double ey1 = FastMath . exp ( 0f - y1 ) ; return ( ey1 - ey0 ) - ( exy1 - exy0 ) ; } } | Computes the joint cost of two intervals X = [ x_0 = 0 x_1 ) and Y = [ y_0 y_1 ) |
18,916 | public double calculateInitialTotalCost ( final List < ServerHolder > serverHolders ) { double cost = 0 ; for ( ServerHolder server : serverHolders ) { Iterable < DataSegment > segments = server . getServer ( ) . getLazyAllSegments ( ) ; for ( DataSegment s : segments ) { cost += computeJointSegmentsCost ( s , segments ) ; } } return cost ; } | Calculates the initial cost of the Druid segment configuration . |
18,917 | protected Pair < Double , ServerHolder > chooseBestServer ( final DataSegment proposalSegment , final Iterable < ServerHolder > serverHolders , final boolean includeCurrentServer ) { Pair < Double , ServerHolder > bestServer = Pair . of ( Double . POSITIVE_INFINITY , null ) ; List < ListenableFuture < Pair < Double , ServerHolder > > > futures = new ArrayList < > ( ) ; for ( final ServerHolder server : serverHolders ) { futures . add ( exec . submit ( ( ) -> Pair . of ( computeCost ( proposalSegment , server , includeCurrentServer ) , server ) ) ) ; } final ListenableFuture < List < Pair < Double , ServerHolder > > > resultsFuture = Futures . allAsList ( futures ) ; final List < Pair < Double , ServerHolder > > bestServers = new ArrayList < > ( ) ; bestServers . add ( bestServer ) ; try { for ( Pair < Double , ServerHolder > server : resultsFuture . get ( ) ) { if ( server . lhs <= bestServers . get ( 0 ) . lhs ) { if ( server . lhs < bestServers . get ( 0 ) . lhs ) { bestServers . clear ( ) ; } bestServers . add ( server ) ; } } bestServer = bestServers . get ( ThreadLocalRandom . current ( ) . nextInt ( bestServers . size ( ) ) ) ; } catch ( Exception e ) { log . makeAlert ( e , "Cost Balancer Multithread strategy wasn't able to complete cost computation." ) . emit ( ) ; } return bestServer ; } | For assignment we want to move to the lowest cost server that isn t already serving the segment . |
18,918 | public ConnectionFactory build ( ) { return new DefaultConnectionFactory ( ) { public NodeLocator createLocator ( List < MemcachedNode > nodes ) { switch ( locator ) { case ARRAY_MOD : return new ArrayModNodeLocator ( nodes , getHashAlg ( ) ) ; case CONSISTENT : return new KetamaNodeLocator ( nodes , getHashAlg ( ) , new DefaultKetamaNodeLocatorConfiguration ( ) { public int getNodeRepetitions ( ) { return repetitions ; } } ) ; default : throw new IllegalStateException ( "Unhandled locator type: " + locator ) ; } } public BlockingQueue < Operation > createOperationQueue ( ) { return opQueueFactory == null ? super . createOperationQueue ( ) : opQueueFactory . create ( ) ; } public BlockingQueue < Operation > createReadOperationQueue ( ) { return readQueueFactory == null ? super . createReadOperationQueue ( ) : readQueueFactory . create ( ) ; } public BlockingQueue < Operation > createWriteOperationQueue ( ) { return writeQueueFactory == null ? super . createReadOperationQueue ( ) : writeQueueFactory . create ( ) ; } public Transcoder < Object > getDefaultTranscoder ( ) { return transcoder == null ? super . getDefaultTranscoder ( ) : transcoder ; } public FailureMode getFailureMode ( ) { return failureMode == null ? super . getFailureMode ( ) : failureMode ; } public HashAlgorithm getHashAlg ( ) { return hashAlg == null ? super . getHashAlg ( ) : hashAlg ; } public Collection < ConnectionObserver > getInitialObservers ( ) { return initialObservers ; } public OperationFactory getOperationFactory ( ) { return opFact == null ? super . getOperationFactory ( ) : opFact ; } public long getOperationTimeout ( ) { return opTimeout == - 1 ? super . getOperationTimeout ( ) : opTimeout ; } public int getReadBufSize ( ) { return readBufSize == - 1 ? super . getReadBufSize ( ) : readBufSize ; } public boolean isDaemon ( ) { return isDaemon ; } public boolean shouldOptimize ( ) { return shouldOptimize ; } public boolean useNagleAlgorithm ( ) { return useNagle ; } public long getMaxReconnectDelay ( ) { return maxReconnectDelay ; } public AuthDescriptor getAuthDescriptor ( ) { return authDescriptor ; } public long getOpQueueMaxBlockTime ( ) { return opQueueMaxBlockTime > - 1 ? opQueueMaxBlockTime : super . getOpQueueMaxBlockTime ( ) ; } public int getTimeoutExceptionThreshold ( ) { return timeoutExceptionThreshold ; } public MetricType enableMetrics ( ) { return metricType == null ? super . enableMetrics ( ) : metricType ; } public MetricCollector getMetricCollector ( ) { return collector == null ? super . getMetricCollector ( ) : collector ; } public ExecutorService getListenerExecutorService ( ) { return executorService == null ? super . getListenerExecutorService ( ) : executorService ; } public boolean isDefaultExecutorService ( ) { return executorService == null ; } public long getAuthWaitTime ( ) { return authWaitTime ; } } ; } | borrowed from ConnectionFactoryBuilder to allow setting number of repetitions for KetamaNodeLocator |
18,919 | protected Map < String , String > getLagPerPartition ( Map < String , String > currentOffsets ) { return ImmutableMap . of ( ) ; } | not yet supported will be implemented in the future |
18,920 | private int findMinGrandChild ( Comparator comparator , int index ) { int leftChildIndex = getLeftChildIndex ( index ) ; if ( leftChildIndex < 0 ) { return - 1 ; } return findMin ( comparator , getLeftChildIndex ( leftChildIndex ) , 4 ) ; } | Returns the minimum grand child or - 1 if no grand child exists . |
18,921 | private int findMaxElementIndex ( ) { switch ( heapSize ) { case 1 : return 0 ; case 2 : return 1 ; default : int offset1 = buf . getInt ( 1 * Integer . BYTES ) ; int offset2 = buf . getInt ( 2 * Integer . BYTES ) ; return maxComparator . compare ( offset1 , offset2 ) <= 0 ? 1 : 2 ; } } | Returns the index of the max element . |
18,922 | public static void register ( Binder binder , Annotation annotation ) { registerKey ( binder , Key . get ( new TypeLiteral < DruidNode > ( ) { } , annotation ) ) ; } | Requests that the annotated DruidNode instance be injected and published as part of the lifecycle . |
18,923 | public static void registerKey ( Binder binder , Key < DruidNode > key ) { DruidBinders . discoveryAnnouncementBinder ( binder ) . addBinding ( ) . toInstance ( new KeyHolder < > ( key ) ) ; LifecycleModule . register ( binder , ServiceAnnouncer . class ) ; } | Requests that the keyed DruidNode instance be injected and published as part of the lifecycle . |
18,924 | public Set < DataSegment > getInsertedSegments ( final String taskid ) { final Set < DataSegment > segments = new HashSet < > ( ) ; for ( final TaskAction action : storage . getAuditLogs ( taskid ) ) { if ( action instanceof SegmentInsertAction ) { segments . addAll ( ( ( SegmentInsertAction ) action ) . getSegments ( ) ) ; } else if ( action instanceof SegmentTransactionalInsertAction ) { segments . addAll ( ( ( SegmentTransactionalInsertAction ) action ) . getSegments ( ) ) ; } } return segments ; } | Returns all segments created by this task . |
18,925 | public static List < String > objectToStrings ( final Object inputValue ) { if ( inputValue == null ) { return Collections . emptyList ( ) ; } else if ( inputValue instanceof List ) { return ( ( List < ? > ) inputValue ) . stream ( ) . map ( String :: valueOf ) . collect ( Collectors . toList ( ) ) ; } else if ( inputValue instanceof byte [ ] ) { return Collections . singletonList ( StringUtils . encodeBase64String ( ( byte [ ] ) inputValue ) ) ; } else { return Collections . singletonList ( String . valueOf ( inputValue ) ) ; } } | Convert an object to a list of strings . |
18,926 | private int computeRequiredBufferNum ( int numChildNodes , int combineDegree ) { final int numChildrenForLastNode = numChildNodes % combineDegree ; final int numCurLevelNodes = numChildNodes / combineDegree + ( numChildrenForLastNode > 1 ? 1 : 0 ) ; final int numChildOfParentNodes = numCurLevelNodes + ( numChildrenForLastNode == 1 ? 1 : 0 ) ; if ( numChildOfParentNodes == 1 ) { return numCurLevelNodes ; } else { return numCurLevelNodes + computeRequiredBufferNum ( numChildOfParentNodes , intermediateCombineDegree ) ; } } | Recursively compute the number of required buffers for a combining tree in a bottom - up manner . Since each node of the combining tree represents a combining task and each combining task requires one buffer the number of required buffers is the number of nodes of the combining tree . |
18,927 | private Pair < List < CloseableIterator < Entry < KeyType > > > , List < Future > > buildCombineTree ( List < ? extends CloseableIterator < Entry < KeyType > > > childIterators , Supplier < ByteBuffer > bufferSupplier , AggregatorFactory [ ] combiningFactories , int combineDegree , List < String > dictionary ) { final int numChildLevelIterators = childIterators . size ( ) ; final List < CloseableIterator < Entry < KeyType > > > childIteratorsOfNextLevel = new ArrayList < > ( ) ; final List < Future > combineFutures = new ArrayList < > ( ) ; for ( int i = 0 ; i < numChildLevelIterators ; i += combineDegree ) { if ( i < numChildLevelIterators - 1 ) { final List < ? extends CloseableIterator < Entry < KeyType > > > subIterators = childIterators . subList ( i , Math . min ( i + combineDegree , numChildLevelIterators ) ) ; final Pair < CloseableIterator < Entry < KeyType > > , Future > iteratorAndFuture = runCombiner ( subIterators , bufferSupplier . get ( ) , combiningFactories , dictionary ) ; childIteratorsOfNextLevel . add ( iteratorAndFuture . lhs ) ; combineFutures . add ( iteratorAndFuture . rhs ) ; } else { childIteratorsOfNextLevel . add ( childIterators . get ( i ) ) ; } } if ( childIteratorsOfNextLevel . size ( ) == 1 ) { return Pair . of ( childIteratorsOfNextLevel , combineFutures ) ; } else { final Pair < List < CloseableIterator < Entry < KeyType > > > , List < Future > > parentIteratorsAndFutures = buildCombineTree ( childIteratorsOfNextLevel , bufferSupplier , combiningFactories , intermediateCombineDegree , dictionary ) ; combineFutures . addAll ( parentIteratorsAndFutures . rhs ) ; return Pair . of ( parentIteratorsAndFutures . lhs , combineFutures ) ; } } | Recursively build a combining tree in a bottom - up manner . Each node of the tree is a task that combines input iterators asynchronously . |
18,928 | public final T getSpecializedOrDefault ( T defaultInstance ) { T specialized = getSpecialized ( ) ; return specialized != null ? specialized : defaultInstance ; } | Returns an instance of specialized version of query processing algorithm if available defaultInstance otherwise . |
18,929 | public static float [ ] decode ( final String encodedCoordinate ) { if ( encodedCoordinate == null ) { return null ; } final ImmutableList < String > parts = ImmutableList . copyOf ( SPLITTER . split ( encodedCoordinate ) ) ; final float [ ] coordinate = new float [ parts . size ( ) ] ; for ( int i = 0 ; i < coordinate . length ; i ++ ) { final Float floatPart = tryParseFloat ( parts . get ( i ) ) ; if ( floatPart == null ) { return null ; } else { coordinate [ i ] = floatPart ; } } return coordinate ; } | Decodes encodedCoordinate . |
18,930 | private List < Pair < DataSegment , Boolean > > getDataSegmentsOverlappingInterval ( final String dataSource , final Interval interval ) { return connector . inReadOnlyTransaction ( ( handle , status ) -> handle . createQuery ( StringUtils . format ( "SELECT used, payload FROM %1$s WHERE dataSource = :dataSource AND start < :end AND %2$send%2$s > :start" , getSegmentsTable ( ) , connector . getQuoteString ( ) ) ) . setFetchSize ( connector . getStreamingFetchSize ( ) ) . bind ( "dataSource" , dataSource ) . bind ( "start" , interval . getStart ( ) . toString ( ) ) . bind ( "end" , interval . getEnd ( ) . toString ( ) ) . map ( this :: usedPayloadMapper ) . list ( ) ) ; } | Gets a list of all datasegments that overlap the provided interval along with thier used status . |
18,931 | private VersionedIntervalTimeline < String , DataSegment > buildVersionedIntervalTimeline ( final String dataSource , final Collection < Interval > intervals , final Handle handle ) { return VersionedIntervalTimeline . forSegments ( intervals . stream ( ) . flatMap ( interval -> handle . createQuery ( StringUtils . format ( "SELECT payload FROM %1$s WHERE dataSource = :dataSource AND start < :end AND %2$send%2$s > :start AND used = true" , getSegmentsTable ( ) , connector . getQuoteString ( ) ) ) . setFetchSize ( connector . getStreamingFetchSize ( ) ) . bind ( "dataSource" , dataSource ) . bind ( "start" , interval . getStart ( ) . toString ( ) ) . bind ( "end" , interval . getEnd ( ) . toString ( ) ) . map ( ( i , resultSet , context ) -> { try { return jsonMapper . readValue ( resultSet . getBytes ( "payload" ) , DataSegment . class ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } ) . list ( ) . stream ( ) ) . iterator ( ) ) ; } | Builds a VersionedIntervalTimeline containing used segments that overlap the intervals passed . |
18,932 | protected int findBucket ( final boolean allowNewBucket , final int buckets , final ByteBuffer targetTableBuffer , final ByteBuffer keyBuffer , final int keyHash ) { final int startBucket = keyHash % buckets ; int bucket = startBucket ; outer : while ( true ) { final int bucketOffset = bucket * bucketSizeWithHash ; if ( ( targetTableBuffer . get ( bucketOffset ) & 0x80 ) == 0 ) { return allowNewBucket ? bucket : - 1 ; } for ( int i = bucketOffset + HASH_SIZE , j = keyBuffer . position ( ) ; j < keyBuffer . position ( ) + keySize ; i ++ , j ++ ) { if ( targetTableBuffer . get ( i ) != keyBuffer . get ( j ) ) { bucket += 1 ; if ( bucket == buckets ) { bucket = 0 ; } if ( bucket == startBucket ) { return - 1 ; } continue outer ; } } return bucket ; } } | Finds the bucket into which we should insert a key . |
18,933 | public LockResult lock ( final TaskLockType lockType , final Task task , final Interval interval ) throws InterruptedException { giant . lockInterruptibly ( ) ; try { LockResult lockResult ; while ( ! ( lockResult = tryLock ( lockType , task , interval ) ) . isOk ( ) ) { if ( lockResult . isRevoked ( ) ) { return lockResult ; } lockReleaseCondition . await ( ) ; } return lockResult ; } finally { giant . unlock ( ) ; } } | Acquires a lock on behalf of a task . Blocks until the lock is acquired . |
18,934 | public LockResult lock ( final TaskLockType lockType , final Task task , final Interval interval , long timeoutMs ) throws InterruptedException { long nanos = TimeUnit . MILLISECONDS . toNanos ( timeoutMs ) ; giant . lockInterruptibly ( ) ; try { LockResult lockResult ; while ( ! ( lockResult = tryLock ( lockType , task , interval ) ) . isOk ( ) ) { if ( nanos <= 0 || lockResult . isRevoked ( ) ) { return lockResult ; } nanos = lockReleaseCondition . awaitNanos ( nanos ) ; } return lockResult ; } finally { giant . unlock ( ) ; } } | Acquires a lock on behalf of a task waiting up to the specified wait time if necessary . |
18,935 | public LockResult tryLock ( final TaskLockType lockType , final Task task , final Interval interval ) { giant . lock ( ) ; try { if ( ! activeTasks . contains ( task . getId ( ) ) ) { throw new ISE ( "Unable to grant lock to inactive Task [%s]" , task . getId ( ) ) ; } Preconditions . checkArgument ( interval . toDurationMillis ( ) > 0 , "interval empty" ) ; final TaskLockPosse posseToUse = createOrFindLockPosse ( task , interval , lockType ) ; if ( posseToUse != null && ! posseToUse . getTaskLock ( ) . isRevoked ( ) ) { if ( posseToUse . addTask ( task ) ) { log . info ( "Added task[%s] to TaskLock[%s]" , task . getId ( ) , posseToUse . getTaskLock ( ) . getGroupId ( ) ) ; try { taskStorage . addLock ( task . getId ( ) , posseToUse . getTaskLock ( ) ) ; return LockResult . ok ( posseToUse . getTaskLock ( ) ) ; } catch ( Exception e ) { log . makeAlert ( "Failed to persist lock in storage" ) . addData ( "task" , task . getId ( ) ) . addData ( "dataSource" , posseToUse . getTaskLock ( ) . getDataSource ( ) ) . addData ( "interval" , posseToUse . getTaskLock ( ) . getInterval ( ) ) . addData ( "version" , posseToUse . getTaskLock ( ) . getVersion ( ) ) . emit ( ) ; unlock ( task , interval ) ; return LockResult . fail ( false ) ; } } else { log . info ( "Task[%s] already present in TaskLock[%s]" , task . getId ( ) , posseToUse . getTaskLock ( ) . getGroupId ( ) ) ; return LockResult . ok ( posseToUse . getTaskLock ( ) ) ; } } else { final boolean lockRevoked = posseToUse != null && posseToUse . getTaskLock ( ) . isRevoked ( ) ; return LockResult . fail ( lockRevoked ) ; } } finally { giant . unlock ( ) ; } } | Attempt to acquire a lock for a task without removing it from the queue . Can safely be called multiple times on the same task until the lock is preempted . |
18,936 | public < T > T doInCriticalSection ( Task task , List < Interval > intervals , CriticalAction < T > action ) throws Exception { giant . lock ( ) ; try { return action . perform ( isTaskLocksValid ( task , intervals ) ) ; } finally { giant . unlock ( ) ; } } | Perform the given action with a guarantee that the locks of the task are not revoked in the middle of action . This method first checks that all locks for the given task and intervals are valid and perform the right action . |
18,937 | public List < TaskLock > findLocksForTask ( final Task task ) { giant . lock ( ) ; try { return Lists . transform ( findLockPossesForTask ( task ) , new Function < TaskLockPosse , TaskLock > ( ) { public TaskLock apply ( TaskLockPosse taskLockPosse ) { return taskLockPosse . getTaskLock ( ) ; } } ) ; } finally { giant . unlock ( ) ; } } | Return the currently - active locks for some task . |
18,938 | public void unlock ( final Task task , final Interval interval ) { giant . lock ( ) ; try { final String dataSource = task . getDataSource ( ) ; final NavigableMap < DateTime , SortedMap < Interval , List < TaskLockPosse > > > dsRunning = running . get ( task . getDataSource ( ) ) ; if ( dsRunning == null || dsRunning . isEmpty ( ) ) { return ; } final SortedMap < Interval , List < TaskLockPosse > > intervalToPosses = dsRunning . get ( interval . getStart ( ) ) ; if ( intervalToPosses == null || intervalToPosses . isEmpty ( ) ) { return ; } final List < TaskLockPosse > possesHolder = intervalToPosses . get ( interval ) ; if ( possesHolder == null || possesHolder . isEmpty ( ) ) { return ; } final List < TaskLockPosse > posses = possesHolder . stream ( ) . filter ( posse -> posse . containsTask ( task ) ) . collect ( Collectors . toList ( ) ) ; for ( TaskLockPosse taskLockPosse : posses ) { final TaskLock taskLock = taskLockPosse . getTaskLock ( ) ; log . info ( "Removing task[%s] from TaskLock[%s]" , task . getId ( ) , taskLock . getGroupId ( ) ) ; final boolean removed = taskLockPosse . removeTask ( task ) ; if ( taskLockPosse . isTasksEmpty ( ) ) { log . info ( "TaskLock is now empty: %s" , taskLock ) ; possesHolder . remove ( taskLockPosse ) ; } if ( possesHolder . isEmpty ( ) ) { intervalToPosses . remove ( interval ) ; } if ( intervalToPosses . isEmpty ( ) ) { dsRunning . remove ( interval . getStart ( ) ) ; } if ( running . get ( dataSource ) . size ( ) == 0 ) { running . remove ( dataSource ) ; } lockReleaseCondition . signalAll ( ) ; try { taskStorage . removeLock ( task . getId ( ) , taskLock ) ; } catch ( Exception e ) { log . makeAlert ( e , "Failed to clean up lock from storage" ) . addData ( "task" , task . getId ( ) ) . addData ( "dataSource" , taskLock . getDataSource ( ) ) . addData ( "interval" , taskLock . getInterval ( ) ) . addData ( "version" , taskLock . getVersion ( ) ) . emit ( ) ; } if ( ! removed ) { log . makeAlert ( "Lock release without acquire" ) . addData ( "task" , task . getId ( ) ) . addData ( "interval" , interval ) . emit ( ) ; } } } finally { giant . unlock ( ) ; } } | Release lock held for a task on a particular interval . Does nothing if the task does not currently hold the mentioned lock . |
18,939 | public void remove ( final Task task ) { giant . lock ( ) ; try { try { log . info ( "Removing task[%s] from activeTasks" , task . getId ( ) ) ; for ( final TaskLockPosse taskLockPosse : findLockPossesForTask ( task ) ) { unlock ( task , taskLockPosse . getTaskLock ( ) . getInterval ( ) ) ; } } finally { activeTasks . remove ( task . getId ( ) ) ; } } finally { giant . unlock ( ) ; } } | Release all locks for a task and remove task from set of active tasks . Does nothing if the task is not currently locked or not an active task . |
18,940 | private List < TaskLockPosse > findLockPossesForTask ( final Task task ) { giant . lock ( ) ; try { final NavigableMap < DateTime , SortedMap < Interval , List < TaskLockPosse > > > dsRunning = running . get ( task . getDataSource ( ) ) ; if ( dsRunning == null ) { return ImmutableList . of ( ) ; } else { return dsRunning . values ( ) . stream ( ) . flatMap ( map -> map . values ( ) . stream ( ) ) . flatMap ( Collection :: stream ) . filter ( taskLockPosse -> taskLockPosse . containsTask ( task ) ) . collect ( Collectors . toList ( ) ) ; } } finally { giant . unlock ( ) ; } } | Return the currently - active lock posses for some task . |
18,941 | private List < TaskLockPosse > findLockPossesOverlapsInterval ( final String dataSource , final Interval interval ) { giant . lock ( ) ; try { final NavigableMap < DateTime , SortedMap < Interval , List < TaskLockPosse > > > dsRunning = running . get ( dataSource ) ; if ( dsRunning == null ) { return Collections . emptyList ( ) ; } else { final NavigableSet < DateTime > dsLockbox = dsRunning . navigableKeySet ( ) ; final Iterable < DateTime > searchStartTimes = Iterables . concat ( Collections . singletonList ( dsLockbox . floor ( interval . getStart ( ) ) ) , dsLockbox . subSet ( interval . getStart ( ) , false , interval . getEnd ( ) , false ) ) ; return StreamSupport . stream ( searchStartTimes . spliterator ( ) , false ) . filter ( java . util . Objects :: nonNull ) . map ( dsRunning :: get ) . filter ( java . util . Objects :: nonNull ) . flatMap ( sortedMap -> sortedMap . entrySet ( ) . stream ( ) ) . filter ( entry -> entry . getKey ( ) . overlaps ( interval ) ) . flatMap ( entry -> entry . getValue ( ) . stream ( ) ) . collect ( Collectors . toList ( ) ) ; } } finally { giant . unlock ( ) ; } } | Return all locks that overlap some search interval . |
18,942 | public static < T > T retryCloudFilesOperation ( Task < T > f , final int maxTries ) throws Exception { return RetryUtils . retry ( f , CLOUDFILESRETRY , maxTries ) ; } | Retries CloudFiles operations that fail due to io - related exceptions . |
18,943 | public RangeSet < String > getDimensionRangeSet ( String dimension ) { if ( field instanceof AndDimFilter ) { List < DimFilter > fields = ( ( AndDimFilter ) field ) . getFields ( ) ; return new OrDimFilter ( Lists . transform ( fields , NotDimFilter :: new ) ) . getDimensionRangeSet ( dimension ) ; } if ( field instanceof OrDimFilter ) { List < DimFilter > fields = ( ( OrDimFilter ) field ) . getFields ( ) ; return new AndDimFilter ( Lists . transform ( fields , NotDimFilter :: new ) ) . getDimensionRangeSet ( dimension ) ; } if ( field instanceof NotDimFilter ) { return ( ( NotDimFilter ) field ) . getField ( ) . getDimensionRangeSet ( dimension ) ; } RangeSet < String > rangeSet = field . getDimensionRangeSet ( dimension ) ; return rangeSet == null ? null : rangeSet . complement ( ) ; } | There are some special cases involving null that require special casing for And and Or instead of simply taking the complement |
18,944 | public FullResponseHolder go ( Request request , HttpResponseHandler < FullResponseHolder , FullResponseHolder > responseHandler ) throws IOException , InterruptedException { Preconditions . checkState ( lifecycleLock . awaitStarted ( 1 , TimeUnit . MILLISECONDS ) ) ; for ( int counter = 0 ; counter < MAX_RETRIES ; counter ++ ) { final FullResponseHolder fullResponseHolder ; try { try { fullResponseHolder = httpClient . go ( request , responseHandler ) . get ( ) ; } catch ( ExecutionException e ) { Throwables . propagateIfInstanceOf ( e . getCause ( ) , IOException . class ) ; Throwables . propagateIfInstanceOf ( e . getCause ( ) , ChannelException . class ) ; throw new RE ( e , "HTTP request to[%s] failed" , request . getUrl ( ) ) ; } } catch ( IOException | ChannelException ex ) { log . warn ( ex , "Request[%s] failed." , request . getUrl ( ) ) ; try { if ( request . getUrl ( ) . getQuery ( ) == null ) { request = withUrl ( request , new URL ( StringUtils . format ( "%s%s" , getCurrentKnownLeader ( false ) , request . getUrl ( ) . getPath ( ) ) ) ) ; } else { request = withUrl ( request , new URL ( StringUtils . format ( "%s%s?%s" , getCurrentKnownLeader ( false ) , request . getUrl ( ) . getPath ( ) , request . getUrl ( ) . getQuery ( ) ) ) ) ; } continue ; } catch ( MalformedURLException e ) { throw new ISE ( e , "failed to build url with path[%] and query string [%s]." , request . getUrl ( ) . getPath ( ) , request . getUrl ( ) . getQuery ( ) ) ; } } if ( HttpResponseStatus . TEMPORARY_REDIRECT . equals ( fullResponseHolder . getResponse ( ) . getStatus ( ) ) ) { String redirectUrlStr = fullResponseHolder . getResponse ( ) . headers ( ) . get ( "Location" ) ; if ( redirectUrlStr == null ) { throw new IOE ( "No redirect location is found in response from url[%s]." , request . getUrl ( ) ) ; } log . info ( "Request[%s] received redirect response to location [%s]." , request . getUrl ( ) , redirectUrlStr ) ; final URL redirectUrl ; try { redirectUrl = new URL ( redirectUrlStr ) ; } catch ( MalformedURLException ex ) { throw new IOE ( ex , "Malformed redirect location is found in response from url[%s], new location[%s]." , request . getUrl ( ) , redirectUrlStr ) ; } currentKnownLeader . set ( StringUtils . format ( "%s://%s:%s" , redirectUrl . getProtocol ( ) , redirectUrl . getHost ( ) , redirectUrl . getPort ( ) ) ) ; request = withUrl ( request , redirectUrl ) ; } else { return fullResponseHolder ; } } throw new IOE ( "Retries exhausted, couldn't fulfill request to [%s]." , request . getUrl ( ) ) ; } | Executes a Request object aimed at the leader . Throws IOException if the leader cannot be located . |
18,945 | public List < String > mergeAndGetDictionary ( ) { final Set < String > mergedDictionary = new HashSet < > ( ) ; mergedDictionary . addAll ( keySerde . getDictionary ( ) ) ; for ( File dictFile : dictionaryFiles ) { try ( final MappingIterator < String > dictIterator = spillMapper . readValues ( spillMapper . getFactory ( ) . createParser ( new LZ4BlockInputStream ( new FileInputStream ( dictFile ) ) ) , spillMapper . getTypeFactory ( ) . constructType ( String . class ) ) ) { while ( dictIterator . hasNext ( ) ) { mergedDictionary . add ( dictIterator . next ( ) ) ; } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } return new ArrayList < > ( mergedDictionary ) ; } | Returns a dictionary of string keys added to this grouper . Note that the dictionary of keySerde is spilled on local storage whenever the inner grouper is spilled . If there are spilled dictionaries this method loads them from disk and returns a merged dictionary . |
18,946 | public static BoundDimFilter not ( final BoundDimFilter bound ) { if ( bound . getUpper ( ) != null && bound . getLower ( ) != null ) { return null ; } else if ( bound . getUpper ( ) != null ) { return new BoundDimFilter ( bound . getDimension ( ) , bound . getUpper ( ) , null , ! bound . isUpperStrict ( ) , false , null , bound . getExtractionFn ( ) , bound . getOrdering ( ) ) ; } else { return new BoundDimFilter ( bound . getDimension ( ) , null , bound . getLower ( ) , false , ! bound . isLowerStrict ( ) , null , bound . getExtractionFn ( ) , bound . getOrdering ( ) ) ; } } | Negates single - ended Bound filters . |
18,947 | private SegmentIdWithShardSpec getSegment ( final InputRow row , final String sequenceName , final boolean skipSegmentLineageCheck ) throws IOException { synchronized ( segments ) { final DateTime timestamp = row . getTimestamp ( ) ; final SegmentIdWithShardSpec existing = getAppendableSegment ( timestamp , sequenceName ) ; if ( existing != null ) { return existing ; } else { final SegmentsForSequence segmentsForSequence = segments . get ( sequenceName ) ; final SegmentIdWithShardSpec newSegment = segmentAllocator . allocate ( row , sequenceName , segmentsForSequence == null ? null : segmentsForSequence . lastSegmentId , skipSegmentLineageCheck ) ; if ( newSegment != null ) { for ( SegmentIdWithShardSpec identifier : appenderator . getSegments ( ) ) { if ( identifier . equals ( newSegment ) ) { throw new ISE ( "WTF?! Allocated segment[%s] which conflicts with existing segment[%s]." , newSegment , identifier ) ; } } log . info ( "New segment[%s] for row[%s] sequenceName[%s]." , newSegment , row , sequenceName ) ; addSegment ( sequenceName , newSegment ) ; } else { log . warn ( "Cannot allocate segment for timestamp[%s], sequenceName[%s]. " , timestamp , sequenceName ) ; } return newSegment ; } } } | Return a segment usable for timestamp . May return null if no segment can be allocated . |
18,948 | ListenableFuture < SegmentsAndMetadata > pushInBackground ( final WrappedCommitter wrappedCommitter , final Collection < SegmentIdWithShardSpec > segmentIdentifiers , final boolean useUniquePath ) { log . info ( "Pushing segments in background: [%s]" , Joiner . on ( ", " ) . join ( segmentIdentifiers ) ) ; return Futures . transform ( appenderator . push ( segmentIdentifiers , wrappedCommitter , useUniquePath ) , ( Function < SegmentsAndMetadata , SegmentsAndMetadata > ) segmentsAndMetadata -> { final Set < SegmentIdWithShardSpec > pushedSegments = segmentsAndMetadata . getSegments ( ) . stream ( ) . map ( SegmentIdWithShardSpec :: fromDataSegment ) . collect ( Collectors . toSet ( ) ) ; if ( ! pushedSegments . equals ( Sets . newHashSet ( segmentIdentifiers ) ) ) { log . warn ( "Removing segments from deep storage because sanity check failed: %s" , segmentsAndMetadata . getSegments ( ) ) ; segmentsAndMetadata . getSegments ( ) . forEach ( dataSegmentKiller :: killQuietly ) ; throw new ISE ( "WTF?! Pushed different segments than requested. Pushed[%s], requested[%s]." , pushedSegments , segmentIdentifiers ) ; } return segmentsAndMetadata ; } , executor ) ; } | Push the given segments in background . |
18,949 | private int tryReserveEventSizeAndLock ( long state , int size ) { Preconditions . checkArgument ( size > 0 ) ; int bufferWatermark = bufferWatermark ( state ) ; while ( true ) { if ( compareAndSetState ( state , state + size + PARTY ) ) { return bufferWatermark ; } state = getState ( ) ; if ( isSealed ( state ) ) { return - 1 ; } bufferWatermark = bufferWatermark ( state ) ; int newBufferWatermark = bufferWatermark + size ; Preconditions . checkState ( newBufferWatermark > 0 ) ; if ( newBufferWatermark > emitter . maxBufferWatermark ) { return - 1 ; } } } | Returns the buffer offset at which the caller has reserved the ability to write size bytes exclusively or negative number if the reservation attempt failed . |
18,950 | public static void serialize ( OutputStream out , BloomKFilter bloomFilter ) throws IOException { DataOutputStream dataOutputStream = new DataOutputStream ( out ) ; dataOutputStream . writeByte ( bloomFilter . k ) ; dataOutputStream . writeInt ( bloomFilter . getBitSet ( ) . length ) ; for ( long value : bloomFilter . getBitSet ( ) ) { dataOutputStream . writeLong ( value ) ; } } | Serialize a bloom filter |
18,951 | public static void serialize ( ByteBuffer out , int position , BloomKFilter bloomFilter ) { ByteBuffer view = out . duplicate ( ) . order ( ByteOrder . BIG_ENDIAN ) ; view . position ( position ) ; view . put ( ( byte ) bloomFilter . k ) ; view . putInt ( bloomFilter . getBitSet ( ) . length ) ; for ( long value : bloomFilter . getBitSet ( ) ) { view . putLong ( value ) ; } } | Serialize a bloom filter to a ByteBuffer . Does not mutate buffer position . |
18,952 | public static int computeSizeBytes ( long maxNumEntries ) { checkArgument ( maxNumEntries > 0 , "expectedEntries should be > 0" ) ; long numBits = optimalNumOfBits ( maxNumEntries , DEFAULT_FPP ) ; int nLongs = ( int ) Math . ceil ( ( double ) numBits / ( double ) Long . SIZE ) ; int padLongs = DEFAULT_BLOCK_SIZE - nLongs % DEFAULT_BLOCK_SIZE ; return START_OF_SERIALIZED_LONGS + ( ( nLongs + padLongs ) * Long . BYTES ) ; } | Calculate size in bytes of a BloomKFilter for a given number of entries |
18,953 | public void merge ( BloomKFilter that ) { if ( this != that && this . m == that . m && this . k == that . k ) { this . bitSet . putAll ( that . bitSet ) ; } else { throw new IllegalArgumentException ( "BloomKFilters are not compatible for merging." + " this - " + this + " that - " + that ) ; } } | Merge the specified bloom filter with current bloom filter . |
18,954 | public Boolean isHandOffComplete ( String dataSource , SegmentDescriptor descriptor ) { try { FullResponseHolder response = druidLeaderClient . go ( druidLeaderClient . makeRequest ( HttpMethod . GET , StringUtils . format ( "/druid/coordinator/v1/datasources/%s/handoffComplete?interval=%s&partitionNumber=%d&version=%s" , StringUtils . urlEncode ( dataSource ) , descriptor . getInterval ( ) , descriptor . getPartitionNumber ( ) , descriptor . getVersion ( ) ) ) ) ; if ( response . getStatus ( ) . equals ( HttpResponseStatus . NOT_FOUND ) ) { return null ; } if ( ! response . getStatus ( ) . equals ( HttpResponseStatus . OK ) ) { throw new ISE ( "Error while fetching serverView status[%s] content[%s]" , response . getStatus ( ) , response . getContent ( ) ) ; } return jsonMapper . readValue ( response . getContent ( ) , new TypeReference < Boolean > ( ) { } ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Checks the given segment is handed off or not . It can return null if the HTTP call returns 404 which can happen during rolling update . |
18,955 | public static boolean sortingOrderHasNonGroupingFields ( DefaultLimitSpec limitSpec , List < DimensionSpec > dimensions ) { for ( OrderByColumnSpec orderSpec : limitSpec . getColumns ( ) ) { int dimIndex = OrderByColumnSpec . getDimIndexForOrderBy ( orderSpec , dimensions ) ; if ( dimIndex < 0 ) { return true ; } } return false ; } | Check if a limitSpec has columns in the sorting order that are not part of the grouping fields represented by dimensions . |
18,956 | public static < T > MapBinder < String , T > optionBinder ( Binder binder , Key < T > interfaceKey ) { final TypeLiteral < T > interfaceType = interfaceKey . getTypeLiteral ( ) ; if ( interfaceKey . getAnnotation ( ) != null ) { return MapBinder . newMapBinder ( binder , TypeLiteral . get ( String . class ) , interfaceType , interfaceKey . getAnnotation ( ) ) ; } else if ( interfaceKey . getAnnotationType ( ) != null ) { Class < ? extends Annotation > annotationType = interfaceKey . getAnnotationType ( ) ; return MapBinder . newMapBinder ( binder , TypeLiteral . get ( String . class ) , interfaceType , annotationType ) ; } else { return MapBinder . newMapBinder ( binder , TypeLiteral . get ( String . class ) , interfaceType ) ; } } | Binds an option for a specific choice . The choice must already be registered on the injector for this to work . |
18,957 | public void offer ( float value ) { if ( value < min ) { min = value ; } if ( value > max ) { max = value ; } if ( binCount == 0 ) { positions [ 0 ] = value ; bins [ 0 ] = 1 ; count ++ ; binCount ++ ; return ; } final int index = Arrays . binarySearch ( positions , 0 , binCount , value ) ; if ( index >= 0 ) { bins [ index ] = ( bins [ index ] & APPROX_FLAG_BIT ) | ( ( bins [ index ] & COUNT_BITS ) + 1 ) ; count ++ ; return ; } final int insertAt = - ( index + 1 ) ; if ( binCount < size ) { shiftRight ( insertAt , binCount ) ; positions [ insertAt ] = value ; bins [ insertAt ] = 1 ; count ++ ; binCount ++ ; return ; } int minPos = minDeltaIndex ( ) ; float minDelta = minPos >= 0 ? positions [ minPos + 1 ] - positions [ minPos ] : Float . POSITIVE_INFINITY ; final float deltaRight = insertAt < binCount ? positions [ insertAt ] - value : Float . POSITIVE_INFINITY ; final float deltaLeft = insertAt > 0 ? value - positions [ insertAt - 1 ] : Float . POSITIVE_INFINITY ; boolean mergeValue = false ; if ( deltaRight < minDelta ) { minDelta = deltaRight ; minPos = insertAt ; mergeValue = true ; } if ( deltaLeft < minDelta ) { minPos = insertAt - 1 ; mergeValue = true ; } if ( mergeValue ) { final long k = bins [ minPos ] & COUNT_BITS ; positions [ minPos ] = ( positions [ minPos ] * k + value ) / ( k + 1 ) ; bins [ minPos ] = ( k + 1 ) | APPROX_FLAG_BIT ; count ++ ; } else { mergeInsert ( minPos , insertAt , value , 1 ) ; } } | Adds the given value to the histogram |
18,958 | protected void shiftRight ( int start , int end ) { float prevVal = positions [ start ] ; long prevCnt = bins [ start ] ; for ( int i = start + 1 ; i <= end ; ++ i ) { float tmpVal = positions [ i ] ; long tmpCnt = bins [ i ] ; positions [ i ] = prevVal ; bins [ i ] = prevCnt ; prevVal = tmpVal ; prevCnt = tmpCnt ; } } | Shifts the given range the histogram bins one slot to the right |
18,959 | protected void shiftLeft ( int start , int end ) { for ( int i = start ; i < end ; ++ i ) { positions [ i ] = positions [ i + 1 ] ; bins [ i ] = bins [ i + 1 ] ; } } | Shifts the given range of histogram bins one slot to the left |
18,960 | public ApproximateHistogram copy ( ApproximateHistogram h ) { if ( h . size > this . size ) { this . size = h . size ; this . positions = new float [ size ] ; this . bins = new long [ size ] ; } System . arraycopy ( h . positions , 0 , this . positions , 0 , h . binCount ) ; System . arraycopy ( h . bins , 0 , this . bins , 0 , h . binCount ) ; this . min = h . min ; this . max = h . max ; this . binCount = h . binCount ; this . count = h . count ; return this ; } | Copies histogram h into the current histogram . |
18,961 | protected ApproximateHistogram foldMin ( ApproximateHistogram h , float [ ] mergedPositions , long [ ] mergedBins , float [ ] deltas ) { float mergedMin = this . min < h . min ? this . min : h . min ; float mergedMax = this . max > h . max ? this . max : h . max ; long mergedCount = this . count + h . count ; int maxSize = this . binCount + h . binCount ; int [ ] next = new int [ maxSize ] ; int [ ] prev = new int [ maxSize ] ; if ( mergedPositions == null || mergedBins == null || deltas == null ) { mergedPositions = new float [ maxSize ] ; mergedBins = new long [ maxSize ] ; deltas = new float [ maxSize ] ; } else { Preconditions . checkArgument ( mergedPositions . length >= maxSize , "temp buffer [mergedPositions] too small: length must be at least [%s], got [%s]" , maxSize , mergedPositions . length ) ; Preconditions . checkArgument ( mergedBins . length >= maxSize , "temp buffer [mergedBins] too small: length must be at least [%s], got [%s]" , maxSize , mergedPositions . length ) ; Preconditions . checkArgument ( deltas . length >= maxSize , "temp buffer [deltas] too small: length must be at least [%s], got [%s]" , maxSize , mergedPositions . length ) ; } int mergedBinCount = combineBins ( this . binCount , this . positions , this . bins , h . binCount , h . positions , h . bins , mergedPositions , mergedBins , deltas ) ; if ( mergedBinCount == 0 ) { return this ; } int numMerge = mergedBinCount - this . size ; if ( numMerge < 0 ) { numMerge = 0 ; } mergeBins ( mergedBinCount , mergedPositions , mergedBins , deltas , numMerge , next , prev ) ; int i = 0 ; int k = 0 ; while ( i < mergedBinCount ) { this . positions [ k ] = mergedPositions [ i ] ; this . bins [ k ] = mergedBins [ i ] ; ++ k ; i = next [ i ] ; } this . binCount = mergedBinCount - numMerge ; this . min = mergedMin ; this . max = mergedMax ; this . count = mergedCount ; return this ; } | approximate histogram solution using min heap to store location of min deltas |
18,962 | private static void mergeBins ( int mergedBinCount , float [ ] mergedPositions , long [ ] mergedBins , float [ ] deltas , int numMerge , int [ ] next , int [ ] prev ) { int lastValidIndex = mergedBinCount - 1 ; for ( int i = 0 ; i < mergedBinCount ; ++ i ) { next [ i ] = i + 1 ; } for ( int i = 0 ; i < mergedBinCount ; ++ i ) { prev [ i ] = i - 1 ; } int heapSize = mergedBinCount - 1 ; int [ ] heap = new int [ heapSize ] ; int [ ] reverseIndex = new int [ heapSize ] ; for ( int i = 0 ; i < heapSize ; ++ i ) { heap [ i ] = i ; } for ( int i = 0 ; i < heapSize ; ++ i ) { reverseIndex [ i ] = i ; } heapify ( heap , reverseIndex , heapSize , deltas ) ; { int i = 0 ; while ( i < numMerge ) { int currentIndex = heap [ 0 ] ; final int nextIndex = next [ currentIndex ] ; final int prevIndex = prev [ currentIndex ] ; final long k0 = mergedBins [ currentIndex ] & COUNT_BITS ; final long k1 = mergedBins [ nextIndex ] & COUNT_BITS ; final float m0 = mergedPositions [ currentIndex ] ; final float m1 = mergedPositions [ nextIndex ] ; final float d1 = deltas [ nextIndex ] ; final long sum = k0 + k1 ; final float w = ( float ) k0 / ( float ) sum ; final float mm0 = ( m0 - m1 ) * w + m1 ; mergedPositions [ currentIndex ] = mm0 ; mergedBins [ currentIndex ] = sum | APPROX_FLAG_BIT ; if ( nextIndex == lastValidIndex ) { heapSize = heapDelete ( heap , reverseIndex , heapSize , reverseIndex [ currentIndex ] , deltas ) ; } else { heapSize = heapDelete ( heap , reverseIndex , heapSize , reverseIndex [ nextIndex ] , deltas ) ; deltas [ currentIndex ] = m1 - mm0 + d1 ; siftDown ( heap , reverseIndex , reverseIndex [ currentIndex ] , heapSize - 1 , deltas ) ; } if ( prevIndex >= 0 ) { deltas [ prevIndex ] = mm0 - mergedPositions [ prevIndex ] ; siftDown ( heap , reverseIndex , reverseIndex [ prevIndex ] , heapSize - 1 , deltas ) ; } if ( nextIndex == lastValidIndex ) { lastValidIndex = currentIndex ; } next [ currentIndex ] = next [ nextIndex ] ; if ( nextIndex < lastValidIndex ) { prev [ next [ nextIndex ] ] = currentIndex ; } ++ i ; } } } | mergeBins performs the given number of bin merge operations on the given histogram |
18,963 | private static void heapify ( int [ ] heap , int [ ] reverseIndex , int count , float [ ] values ) { int start = ( count - 2 ) / 2 ; while ( start >= 0 ) { siftDown ( heap , reverseIndex , start , count - 1 , values ) ; start -- ; } } | Builds a min - heap and a reverseIndex into the heap from the given array of values |
18,964 | private static void siftDown ( int [ ] heap , int [ ] reverseIndex , int start , int end , float [ ] values ) { int root = start ; while ( root * 2 + 1 <= end ) { int child = root * 2 + 1 ; int swap = root ; if ( values [ heap [ swap ] ] > values [ heap [ child ] ] ) { swap = child ; } if ( child + 1 <= end && values [ heap [ swap ] ] > values [ heap [ child + 1 ] ] ) { swap = child + 1 ; } if ( swap != root ) { int tmp = heap [ swap ] ; heap [ swap ] = heap [ root ] ; heap [ root ] = tmp ; reverseIndex [ heap [ swap ] ] = swap ; reverseIndex [ heap [ root ] ] = root ; root = swap ; } else { return ; } } } | Rebalances the min - heap by pushing values from the top down and simultaneously updating the reverse index |
18,965 | private static int heapDelete ( int [ ] heap , int [ ] reverseIndex , int count , int heapIndex , float [ ] values ) { int end = count - 1 ; reverseIndex [ heap [ heapIndex ] ] = - 1 ; heap [ heapIndex ] = heap [ end ] ; reverseIndex [ heap [ heapIndex ] ] = heapIndex ; end -- ; siftDown ( heap , reverseIndex , heapIndex , end , values ) ; return count - 1 ; } | Deletes an item from the min - heap and updates the reverse index |
18,966 | private static int combineBins ( int leftBinCount , float [ ] leftPositions , long [ ] leftBins , int rightBinCount , float [ ] rightPositions , long [ ] rightBins , float [ ] mergedPositions , long [ ] mergedBins , float [ ] deltas ) { int i = 0 ; int j = 0 ; int k = 0 ; while ( j < leftBinCount || k < rightBinCount ) { if ( j < leftBinCount && ( k == rightBinCount || leftPositions [ j ] < rightPositions [ k ] ) ) { mergedPositions [ i ] = leftPositions [ j ] ; mergedBins [ i ] = leftBins [ j ] ; ++ j ; } else if ( k < rightBinCount && ( j == leftBinCount || leftPositions [ j ] > rightPositions [ k ] ) ) { mergedPositions [ i ] = rightPositions [ k ] ; mergedBins [ i ] = rightBins [ k ] ; ++ k ; } else { mergedPositions [ i ] = leftPositions [ j ] ; mergedBins [ i ] = leftBins [ j ] + rightBins [ k ] ; ++ j ; ++ k ; } if ( deltas != null && i > 0 ) { deltas [ i - 1 ] = mergedPositions [ i ] - mergedPositions [ i - 1 ] ; } ++ i ; } return i ; } | Combines two sets of histogram bins using merge - sort and computes the delta between consecutive bin positions . Duplicate bins are merged together . |
18,967 | public byte [ ] toBytes ( ) { ByteBuffer buf = ByteBuffer . allocate ( getMinStorageSize ( ) ) ; toBytes ( buf ) ; return buf . array ( ) ; } | Returns a byte - array representation of this ApproximateHistogram object |
18,968 | public boolean canStoreCompact ( ) { final long exactCount = getExactCount ( ) ; return ( size <= Short . MAX_VALUE && exactCount <= Byte . MAX_VALUE && ( count - exactCount ) <= Byte . MAX_VALUE ) ; } | Checks whether this approximate histogram can be stored in a compact form |
18,969 | public void toBytes ( ByteBuffer buf ) { if ( canStoreCompact ( ) && getCompactStorageSize ( ) < getSparseStorageSize ( ) ) { toBytesCompact ( buf ) ; } else { toBytesSparse ( buf ) ; } } | Writes the representation of this ApproximateHistogram object to the given byte - buffer |
18,970 | public void toBytesDense ( ByteBuffer buf ) { buf . putInt ( size ) ; buf . putInt ( binCount ) ; buf . asFloatBuffer ( ) . put ( positions ) ; buf . position ( buf . position ( ) + Float . BYTES * positions . length ) ; buf . asLongBuffer ( ) . put ( bins ) ; buf . position ( buf . position ( ) + Long . BYTES * bins . length ) ; buf . putFloat ( min ) ; buf . putFloat ( max ) ; } | Writes the dense representation of this ApproximateHistogram object to the given byte - buffer |
18,971 | public void toBytesSparse ( ByteBuffer buf ) { buf . putInt ( size ) ; buf . putInt ( - 1 * binCount ) ; for ( int i = 0 ; i < binCount ; ++ i ) { buf . putFloat ( positions [ i ] ) ; } for ( int i = 0 ; i < binCount ; ++ i ) { buf . putLong ( bins [ i ] ) ; } buf . putFloat ( min ) ; buf . putFloat ( max ) ; } | Writes the sparse representation of this ApproximateHistogram object to the given byte - buffer |
18,972 | public void toBytesCompact ( ByteBuffer buf ) { Preconditions . checkState ( canStoreCompact ( ) , "Approximate histogram cannot be stored in compact form" ) ; buf . putShort ( ( short ) ( - 1 * size ) ) ; final long exactCount = getExactCount ( ) ; if ( exactCount != count ) { buf . put ( ( byte ) ( - 1 * ( count - exactCount ) ) ) ; for ( int i = 0 ; i < binCount ; ++ i ) { if ( ( bins [ i ] & APPROX_FLAG_BIT ) != 0 ) { for ( int k = 0 ; k < ( bins [ i ] & COUNT_BITS ) ; ++ k ) { buf . putFloat ( positions [ i ] ) ; } } } buf . putFloat ( min ) ; buf . putFloat ( max ) ; } buf . put ( ( byte ) exactCount ) ; for ( int i = 0 ; i < binCount ; ++ i ) { if ( ( bins [ i ] & APPROX_FLAG_BIT ) == 0 ) { for ( int k = 0 ; k < ( bins [ i ] & COUNT_BITS ) ; ++ k ) { buf . putFloat ( positions [ i ] ) ; } } } } | Returns a compact byte - buffer representation of this ApproximateHistogram object storing actual values as opposed to histogram bins |
18,973 | public static ApproximateHistogram fromBytes ( byte [ ] bytes ) { ByteBuffer buf = ByteBuffer . wrap ( bytes ) ; return fromBytes ( buf ) ; } | Constructs an Approximate Histogram object from the given byte - array representation |
18,974 | public static ApproximateHistogram fromBytesCompact ( ByteBuffer buf ) { short size = ( short ) ( - 1 * buf . getShort ( ) ) ; byte count = buf . get ( ) ; if ( count >= 0 ) { ApproximateHistogram histogram = new ApproximateHistogram ( size ) ; for ( int i = 0 ; i < count ; ++ i ) { histogram . offer ( buf . getFloat ( ) ) ; } return histogram ; } else { byte approxCount = ( byte ) ( - 1 * count ) ; Map < Float , Long > approx = new HashMap < > ( ) ; for ( int i = 0 ; i < approxCount ; ++ i ) { final float value = buf . getFloat ( ) ; if ( approx . containsKey ( value ) ) { approx . put ( value , approx . get ( value ) + 1 ) ; } else { approx . put ( value , 1L ) ; } } float min = buf . getFloat ( ) ; float max = buf . getFloat ( ) ; byte exactCount = buf . get ( ) ; Map < Float , Long > exact = new HashMap < > ( ) ; for ( int i = 0 ; i < exactCount ; ++ i ) { final float value = buf . getFloat ( ) ; if ( exact . containsKey ( value ) ) { exact . put ( value , exact . get ( value ) + 1 ) ; } else { exact . put ( value , 1L ) ; } } int binCount = exact . size ( ) + approx . size ( ) ; List < Float > pos = new ArrayList < > ( ) ; pos . addAll ( exact . keySet ( ) ) ; pos . addAll ( approx . keySet ( ) ) ; Collections . sort ( pos ) ; float [ ] positions = new float [ size ] ; long [ ] bins = new long [ size ] ; for ( int i = 0 ; i < pos . size ( ) ; ++ i ) { positions [ i ] = pos . get ( i ) ; } for ( int i = 0 ; i < pos . size ( ) ; ++ i ) { final float value = pos . get ( i ) ; if ( exact . containsKey ( value ) ) { bins [ i ] = exact . get ( value ) ; } else { bins [ i ] = approx . get ( value ) | APPROX_FLAG_BIT ; } } return new ApproximateHistogram ( binCount , positions , bins , min , max ) ; } } | Constructs an ApproximateHistogram object from the given compact byte - buffer representation |
18,975 | public static ApproximateHistogram fromBytes ( ByteBuffer buf ) { if ( buf . getShort ( buf . position ( ) ) < 0 ) { return fromBytesCompact ( buf ) ; } else { if ( buf . getInt ( buf . position ( ) + Integer . BYTES ) < 0 ) { return fromBytesSparse ( buf ) ; } else { return fromBytesDense ( buf ) ; } } } | Constructs an ApproximateHistogram object from the given byte - buffer representation |
18,976 | public double sum ( final float b ) { if ( b < min ) { return 0 ; } if ( b >= max ) { return count ; } int index = Arrays . binarySearch ( positions , 0 , binCount , b ) ; boolean exactMatch = index >= 0 ; index = exactMatch ? index : - ( index + 1 ) ; if ( ! exactMatch ) { index -- ; } final boolean outerLeft = index < 0 ; final boolean outerRight = index >= ( binCount - 1 ) ; final long m0 = outerLeft ? 0 : ( bins [ index ] & COUNT_BITS ) ; final long m1 = outerRight ? 0 : ( bins [ index + 1 ] & COUNT_BITS ) ; final double p0 = outerLeft ? min : positions [ index ] ; final double p1 = outerRight ? max : positions [ index + 1 ] ; final boolean exact0 = ( ! outerLeft && ( bins [ index ] & APPROX_FLAG_BIT ) == 0 ) ; final boolean exact1 = ( ! outerRight && ( bins [ index + 1 ] & APPROX_FLAG_BIT ) == 0 ) ; final double l = ( p1 == p0 ) ? 0 : ( b - p0 ) / ( p1 - p0 ) ; long tm0 = m0 ; long tm1 = m1 ; if ( exact0 ) { tm0 = 0 ; } if ( exact1 ) { tm1 = 0 ; } final double mb = tm0 + ( tm1 - tm0 ) * l ; double s = 0.5 * ( tm0 + mb ) * l ; for ( int i = 0 ; i < index ; ++ i ) { s += ( bins [ i ] & COUNT_BITS ) ; } if ( exact0 ) { return ( s + m0 ) ; } else { return ( s + 0.5 * m0 ) ; } } | Returns the approximate number of items less than or equal to b in the histogram |
18,977 | public Histogram toHistogram ( final float [ ] breaks ) { final double [ ] approximateBins = new double [ breaks . length - 1 ] ; double prev = sum ( breaks [ 0 ] ) ; for ( int i = 1 ; i < breaks . length ; ++ i ) { double s = sum ( breaks [ i ] ) ; approximateBins [ i - 1 ] = ( float ) ( s - prev ) ; prev = s ; } return new Histogram ( breaks , approximateBins ) ; } | Computes a visual representation of the approximate histogram with bins laid out according to the given breaks |
18,978 | public Histogram toHistogram ( int size ) { Preconditions . checkArgument ( size > 1 , "histogram size must be greater than 1" ) ; float [ ] breaks = new float [ size + 1 ] ; float delta = ( max - min ) / ( size - 1 ) ; breaks [ 0 ] = min - delta ; for ( int i = 1 ; i < breaks . length - 1 ; ++ i ) { breaks [ i ] = breaks [ i - 1 ] + delta ; } breaks [ breaks . length - 1 ] = max ; return toHistogram ( breaks ) ; } | Computes a visual representation of the approximate histogram with a given number of equal - sized bins |
18,979 | public Histogram toHistogram ( final float bucketSize , final float offset ) { final float minFloor = ( float ) Math . floor ( ( min ( ) - offset ) / bucketSize ) * bucketSize + offset ; final float lowerLimitFloor = ( float ) Math . floor ( ( lowerLimit - offset ) / bucketSize ) * bucketSize + offset ; final float firstBreak = Math . max ( minFloor , lowerLimitFloor ) ; final float maxCeil = ( float ) Math . ceil ( ( max ( ) - offset ) / bucketSize ) * bucketSize + offset ; final float upperLimitCeil = ( float ) Math . ceil ( ( upperLimit - offset ) / bucketSize ) * bucketSize + offset ; final float lastBreak = Math . min ( maxCeil , upperLimitCeil ) ; final float cutoff = 0.1f ; final ArrayList < Float > breaks = new ArrayList < Float > ( ) ; final float bottomBreak = minFloor - bucketSize ; if ( bottomBreak != firstBreak && ( sum ( firstBreak ) - sum ( bottomBreak ) > cutoff ) ) { breaks . add ( bottomBreak ) ; } float left = firstBreak ; boolean leftSet = false ; while ( left + bucketSize <= lastBreak + ( bucketSize / 10 ) ) { final float right = left + bucketSize ; if ( sum ( right ) - sum ( left ) > cutoff ) { if ( ! leftSet ) { breaks . add ( left ) ; } breaks . add ( right ) ; leftSet = true ; } else { leftSet = false ; } left = right ; } if ( breaks . get ( breaks . size ( ) - 1 ) != maxCeil && ( sum ( maxCeil ) - sum ( breaks . get ( breaks . size ( ) - 1 ) ) > cutoff ) ) { breaks . add ( maxCeil ) ; } return toHistogram ( Floats . toArray ( breaks ) ) ; } | Computes a visual representation given an initial breakpoint offset and a bucket size . |
18,980 | public Optional < Bucket > getBucket ( InputRow inputRow ) { final Optional < Interval > timeBucket = schema . getDataSchema ( ) . getGranularitySpec ( ) . bucketInterval ( DateTimes . utc ( inputRow . getTimestampFromEpoch ( ) ) ) ; if ( ! timeBucket . isPresent ( ) ) { return Optional . absent ( ) ; } final DateTime bucketStart = timeBucket . get ( ) . getStart ( ) ; final ShardSpec actualSpec = shardSpecLookups . get ( bucketStart . getMillis ( ) ) . getShardSpec ( rollupGran . bucketStart ( inputRow . getTimestamp ( ) ) . getMillis ( ) , inputRow ) ; final HadoopyShardSpec hadoopyShardSpec = hadoopShardSpecLookup . get ( bucketStart . getMillis ( ) ) . get ( actualSpec ) ; return Optional . of ( new Bucket ( hadoopyShardSpec . getShardNum ( ) , bucketStart , actualSpec . getPartitionNum ( ) ) ) ; } | Get the proper bucket for some input row . |
18,981 | public Path makeIntermediatePath ( ) { return new Path ( StringUtils . format ( "%s/%s/%s_%s" , getWorkingPath ( ) , schema . getDataSchema ( ) . getDataSource ( ) , StringUtils . removeChar ( schema . getTuningConfig ( ) . getVersion ( ) , ':' ) , schema . getUniqueId ( ) ) ) ; } | Make the intermediate path for this job run . |
18,982 | public LookupExtractor get ( ) { final Lock readLock = startStopSync . readLock ( ) ; try { readLock . lockInterruptibly ( ) ; } catch ( InterruptedException e ) { throw new RuntimeException ( e ) ; } try { if ( entry == null ) { throw new ISE ( "Factory [%s] not started" , extractorID ) ; } final CacheScheduler . CacheState cacheState = entry . getCacheState ( ) ; if ( cacheState instanceof CacheScheduler . NoCache ) { final String noCacheReason = ( ( CacheScheduler . NoCache ) cacheState ) . name ( ) ; throw new ISE ( "%s: %s, extractorID = %s" , entry , noCacheReason , extractorID ) ; } CacheScheduler . VersionedCache versionedCache = ( CacheScheduler . VersionedCache ) cacheState ; Map < String , String > map = versionedCache . getCache ( ) ; final byte [ ] v = StringUtils . toUtf8 ( versionedCache . getVersion ( ) ) ; final byte [ ] id = StringUtils . toUtf8 ( extractorID ) ; return new MapLookupExtractor ( map , isInjective ( ) ) { public byte [ ] getCacheKey ( ) { return ByteBuffer . allocate ( CLASS_CACHE_KEY . length + id . length + 1 + v . length + 1 + 1 ) . put ( CLASS_CACHE_KEY ) . put ( id ) . put ( ( byte ) 0xFF ) . put ( v ) . put ( ( byte ) 0xFF ) . put ( isOneToOne ( ) ? ( byte ) 1 : ( byte ) 0 ) . array ( ) ; } } ; } finally { readLock . unlock ( ) ; } } | Grab the latest snapshot from the CacheScheduler s entry |
18,983 | private static void addInputPath ( Job job , Iterable < String > pathStrings , Class < ? extends InputFormat > inputFormatClass ) { Configuration conf = job . getConfiguration ( ) ; StringBuilder inputFormats = new StringBuilder ( StringUtils . nullToEmptyNonDruidDataString ( conf . get ( MultipleInputs . DIR_FORMATS ) ) ) ; String [ ] paths = Iterables . toArray ( pathStrings , String . class ) ; for ( int i = 0 ; i < paths . length - 1 ; i ++ ) { if ( inputFormats . length ( ) > 0 ) { inputFormats . append ( ',' ) ; } inputFormats . append ( paths [ i ] ) . append ( ';' ) . append ( inputFormatClass . getName ( ) ) ; } if ( inputFormats . length ( ) > 0 ) { conf . set ( MultipleInputs . DIR_FORMATS , inputFormats . toString ( ) ) ; } MultipleInputs . addInputPath ( job , new Path ( paths [ paths . length - 1 ] ) , inputFormatClass ) ; } | copied from MultipleInputs . addInputPath with slight modifications |
18,984 | public static FileCopyResult retryCopy ( final ByteSource byteSource , final File outFile , final Predicate < Throwable > shouldRetry , final int maxAttempts ) { try { StreamUtils . retryCopy ( byteSource , Files . asByteSink ( outFile ) , shouldRetry , maxAttempts ) ; return new FileCopyResult ( outFile ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Copy input byte source to outFile . If outFile exists it is attempted to be deleted . |
18,985 | public ListenableFuture < TaskStatus > run ( final Task task ) { final RemoteTaskRunnerWorkItem completeTask , runningTask , pendingTask ; if ( ( pendingTask = pendingTasks . get ( task . getId ( ) ) ) != null ) { log . info ( "Assigned a task[%s] that is already pending!" , task . getId ( ) ) ; runPendingTasks ( ) ; return pendingTask . getResult ( ) ; } else if ( ( runningTask = runningTasks . get ( task . getId ( ) ) ) != null ) { ZkWorker zkWorker = findWorkerRunningTask ( task . getId ( ) ) ; if ( zkWorker == null ) { log . warn ( "Told to run task[%s], but no worker has started running it yet." , task . getId ( ) ) ; } else { log . info ( "Task[%s] already running on %s." , task . getId ( ) , zkWorker . getWorker ( ) . getHost ( ) ) ; TaskAnnouncement announcement = zkWorker . getRunningTasks ( ) . get ( task . getId ( ) ) ; if ( announcement . getTaskStatus ( ) . isComplete ( ) ) { taskComplete ( runningTask , zkWorker , announcement . getTaskStatus ( ) ) ; } } return runningTask . getResult ( ) ; } else if ( ( completeTask = completeTasks . get ( task . getId ( ) ) ) != null ) { return completeTask . getResult ( ) ; } else { return addPendingTask ( task ) . getResult ( ) ; } } | A task will be run only if there is no current knowledge in the RemoteTaskRunner of the task . |
18,986 | public void shutdown ( final String taskId , String reason ) { log . info ( "Shutdown [%s] because: [%s]" , taskId , reason ) ; if ( ! lifecycleLock . awaitStarted ( 1 , TimeUnit . SECONDS ) ) { log . info ( "This TaskRunner is stopped or not yet started. Ignoring shutdown command for task: %s" , taskId ) ; } else if ( pendingTasks . remove ( taskId ) != null ) { pendingTaskPayloads . remove ( taskId ) ; log . info ( "Removed task from pending queue: %s" , taskId ) ; } else if ( completeTasks . containsKey ( taskId ) ) { cleanup ( taskId ) ; } else { final ZkWorker zkWorker = findWorkerRunningTask ( taskId ) ; if ( zkWorker == null ) { log . info ( "Can't shutdown! No worker running task %s" , taskId ) ; return ; } URL url = null ; try { url = TaskRunnerUtils . makeWorkerURL ( zkWorker . getWorker ( ) , "/druid/worker/v1/task/%s/shutdown" , taskId ) ; final StatusResponseHolder response = httpClient . go ( new Request ( HttpMethod . POST , url ) , RESPONSE_HANDLER , shutdownTimeout ) . get ( ) ; log . info ( "Sent shutdown message to worker: %s, status %s, response: %s" , zkWorker . getWorker ( ) . getHost ( ) , response . getStatus ( ) , response . getContent ( ) ) ; if ( ! HttpResponseStatus . OK . equals ( response . getStatus ( ) ) ) { log . error ( "Shutdown failed for %s! Are you sure the task was running?" , taskId ) ; } } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new RE ( e , "Interrupted posting shutdown to [%s] for task [%s]" , url , taskId ) ; } catch ( Exception e ) { throw new RE ( e , "Error in handling post to [%s] for task [%s]" , zkWorker . getWorker ( ) . getHost ( ) , taskId ) ; } } } | Finds the worker running the task and forwards the shutdown signal to the worker . |
18,987 | private void runPendingTasks ( ) { runPendingTasksExec . submit ( new Callable < Void > ( ) { public Void call ( ) { try { List < RemoteTaskRunnerWorkItem > copy = Lists . newArrayList ( pendingTasks . values ( ) ) ; sortByInsertionTime ( copy ) ; for ( RemoteTaskRunnerWorkItem taskRunnerWorkItem : copy ) { String taskId = taskRunnerWorkItem . getTaskId ( ) ; if ( tryAssignTasks . putIfAbsent ( taskId , taskId ) == null ) { try { Task task = pendingTaskPayloads . get ( taskId ) ; if ( task != null && tryAssignTask ( task , taskRunnerWorkItem ) ) { pendingTaskPayloads . remove ( taskId ) ; } } catch ( Exception e ) { log . makeAlert ( e , "Exception while trying to assign task" ) . addData ( "taskId" , taskRunnerWorkItem . getTaskId ( ) ) . emit ( ) ; RemoteTaskRunnerWorkItem workItem = pendingTasks . remove ( taskId ) ; if ( workItem != null ) { taskComplete ( workItem , null , TaskStatus . failure ( taskId ) ) ; } } finally { tryAssignTasks . remove ( taskId ) ; } } } } catch ( Exception e ) { log . makeAlert ( e , "Exception in running pending tasks" ) . emit ( ) ; } return null ; } } ) ; } | This method uses a multi - threaded executor to extract all pending tasks and attempt to run them . Any tasks that are successfully assigned to a worker will be moved from pendingTasks to runningTasks . This method is thread - safe . This method should be run each time there is new worker capacity or if new tasks are assigned . |
18,988 | private void cleanup ( final String taskId ) { if ( ! lifecycleLock . awaitStarted ( 1 , TimeUnit . SECONDS ) ) { return ; } final RemoteTaskRunnerWorkItem removed = completeTasks . remove ( taskId ) ; final Worker worker = removed . getWorker ( ) ; if ( removed == null || worker == null ) { log . makeAlert ( "WTF?! Asked to cleanup nonexistent task" ) . addData ( "taskId" , taskId ) . emit ( ) ; } else { final String workerId = worker . getHost ( ) ; log . info ( "Cleaning up task[%s] on worker[%s]" , taskId , workerId ) ; final String statusPath = JOINER . join ( indexerZkConfig . getStatusPath ( ) , workerId , taskId ) ; try { cf . delete ( ) . guaranteed ( ) . forPath ( statusPath ) ; } catch ( KeeperException . NoNodeException e ) { log . info ( "Tried to delete status path[%s] that didn't exist! Must've gone away already?" , statusPath ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } } | Removes a task from the complete queue and clears out the ZK status path of the task . |
18,989 | private boolean tryAssignTask ( final Task task , final RemoteTaskRunnerWorkItem taskRunnerWorkItem ) throws Exception { Preconditions . checkNotNull ( task , "task" ) ; Preconditions . checkNotNull ( taskRunnerWorkItem , "taskRunnerWorkItem" ) ; Preconditions . checkArgument ( task . getId ( ) . equals ( taskRunnerWorkItem . getTaskId ( ) ) , "task id != workItem id" ) ; if ( runningTasks . containsKey ( task . getId ( ) ) || findWorkerRunningTask ( task . getId ( ) ) != null ) { log . info ( "Task[%s] already running." , task . getId ( ) ) ; return true ; } else { WorkerBehaviorConfig workerConfig = workerConfigRef . get ( ) ; WorkerSelectStrategy strategy ; if ( workerConfig == null || workerConfig . getSelectStrategy ( ) == null ) { strategy = WorkerBehaviorConfig . DEFAULT_STRATEGY ; log . debug ( "No worker selection strategy set. Using default of [%s]" , strategy . getClass ( ) . getSimpleName ( ) ) ; } else { strategy = workerConfig . getSelectStrategy ( ) ; } ZkWorker assignedWorker = null ; final ImmutableWorkerInfo immutableZkWorker ; try { synchronized ( workersWithUnacknowledgedTask ) { immutableZkWorker = strategy . findWorkerForTask ( config , ImmutableMap . copyOf ( Maps . transformEntries ( Maps . filterEntries ( zkWorkers , new Predicate < Map . Entry < String , ZkWorker > > ( ) { public boolean apply ( Map . Entry < String , ZkWorker > input ) { return ! lazyWorkers . containsKey ( input . getKey ( ) ) && ! workersWithUnacknowledgedTask . containsKey ( input . getKey ( ) ) && ! blackListedWorkers . contains ( input . getValue ( ) ) ; } } ) , ( String key , ZkWorker value ) -> value . toImmutable ( ) ) ) , task ) ; if ( immutableZkWorker != null && workersWithUnacknowledgedTask . putIfAbsent ( immutableZkWorker . getWorker ( ) . getHost ( ) , task . getId ( ) ) == null ) { assignedWorker = zkWorkers . get ( immutableZkWorker . getWorker ( ) . getHost ( ) ) ; } } if ( assignedWorker != null ) { return announceTask ( task , assignedWorker , taskRunnerWorkItem ) ; } else { log . debug ( "Unsuccessful task-assign attempt for task [%s] on workers [%s]. Workers to ack tasks are [%s]." , task . getId ( ) , zkWorkers . values ( ) , workersWithUnacknowledgedTask ) ; } return false ; } finally { if ( assignedWorker != null ) { workersWithUnacknowledgedTask . remove ( assignedWorker . getWorker ( ) . getHost ( ) ) ; runPendingTasks ( ) ; } } } } | Ensures no workers are already running a task before assigning the task to a worker . It is possible that a worker is running a task that the RTR has no knowledge of . This occurs when the RTR needs to bootstrap after a restart . |
18,990 | private boolean announceTask ( final Task task , final ZkWorker theZkWorker , final RemoteTaskRunnerWorkItem taskRunnerWorkItem ) throws Exception { Preconditions . checkArgument ( task . getId ( ) . equals ( taskRunnerWorkItem . getTaskId ( ) ) , "task id != workItem id" ) ; final String worker = theZkWorker . getWorker ( ) . getHost ( ) ; synchronized ( statusLock ) { if ( ! zkWorkers . containsKey ( worker ) || lazyWorkers . containsKey ( worker ) ) { log . info ( "Not assigning task to already removed worker[%s]" , worker ) ; return false ; } log . info ( "Coordinator asking Worker[%s] to add task[%s]" , worker , task . getId ( ) ) ; CuratorUtils . createIfNotExists ( cf , JOINER . join ( indexerZkConfig . getTasksPath ( ) , worker , task . getId ( ) ) , CreateMode . EPHEMERAL , jsonMapper . writeValueAsBytes ( task ) , config . getMaxZnodeBytes ( ) ) ; RemoteTaskRunnerWorkItem workItem = pendingTasks . remove ( task . getId ( ) ) ; if ( workItem == null ) { log . makeAlert ( "WTF?! Got a null work item from pending tasks?! How can this be?!" ) . addData ( "taskId" , task . getId ( ) ) . emit ( ) ; return false ; } RemoteTaskRunnerWorkItem newWorkItem = workItem . withWorker ( theZkWorker . getWorker ( ) , null ) ; runningTasks . put ( task . getId ( ) , newWorkItem ) ; log . info ( "Task %s switched from pending to running (on [%s])" , task . getId ( ) , newWorkItem . getWorker ( ) . getHost ( ) ) ; TaskRunnerUtils . notifyStatusChanged ( listeners , task . getId ( ) , TaskStatus . running ( task . getId ( ) ) ) ; Stopwatch timeoutStopwatch = Stopwatch . createStarted ( ) ; while ( ! isWorkerRunningTask ( theZkWorker , task . getId ( ) ) ) { final long waitMs = config . getTaskAssignmentTimeout ( ) . toStandardDuration ( ) . getMillis ( ) ; statusLock . wait ( waitMs ) ; long elapsed = timeoutStopwatch . elapsed ( TimeUnit . MILLISECONDS ) ; if ( elapsed >= waitMs ) { log . makeAlert ( "Task assignment timed out on worker [%s], never ran task [%s]! Timeout: (%s >= %s)!" , worker , task . getId ( ) , elapsed , config . getTaskAssignmentTimeout ( ) ) . emit ( ) ; taskComplete ( taskRunnerWorkItem , theZkWorker , TaskStatus . failure ( task . getId ( ) ) ) ; break ; } } return true ; } } | Creates a ZK entry under a specific path associated with a worker . The worker is responsible for removing the task ZK entry and creating a task status ZK entry . |
18,991 | private void updateWorker ( final Worker worker ) { final ZkWorker zkWorker = zkWorkers . get ( worker . getHost ( ) ) ; if ( zkWorker != null ) { log . info ( "Worker[%s] updated its announcement from[%s] to[%s]." , worker . getHost ( ) , zkWorker . getWorker ( ) , worker ) ; zkWorker . setWorker ( worker ) ; } else { log . warn ( "WTF, worker[%s] updated its announcement but we didn't have a ZkWorker for it. Ignoring." , worker . getHost ( ) ) ; } } | We allow workers to change their own capacities and versions . They cannot change their own hosts or ips without dropping themselves and re - announcing . |
18,992 | private void removeWorker ( final Worker worker ) { log . info ( "Kaboom! Worker[%s] removed!" , worker . getHost ( ) ) ; final ZkWorker zkWorker = zkWorkers . get ( worker . getHost ( ) ) ; if ( zkWorker != null ) { try { scheduleTasksCleanupForWorker ( worker . getHost ( ) , getAssignedTasks ( worker ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } finally { try { zkWorker . close ( ) ; } catch ( Exception e ) { log . error ( e , "Exception closing worker[%s]!" , worker . getHost ( ) ) ; } zkWorkers . remove ( worker . getHost ( ) ) ; checkBlackListedNodes ( ) ; } } lazyWorkers . remove ( worker . getHost ( ) ) ; } | When a ephemeral worker node disappears from ZK incomplete running tasks will be retried by the logic in the status listener . We still have to make sure there are no tasks assigned to the worker but not yet running . |
18,993 | private void scheduleTasksCleanupForWorker ( final String worker , final List < String > tasksToFail ) { cancelWorkerCleanup ( worker ) ; final ListenableScheduledFuture < ? > cleanupTask = cleanupExec . schedule ( new Runnable ( ) { public void run ( ) { log . info ( "Running scheduled cleanup for Worker[%s]" , worker ) ; try { for ( String assignedTask : tasksToFail ) { String taskPath = JOINER . join ( indexerZkConfig . getTasksPath ( ) , worker , assignedTask ) ; String statusPath = JOINER . join ( indexerZkConfig . getStatusPath ( ) , worker , assignedTask ) ; if ( cf . checkExists ( ) . forPath ( taskPath ) != null ) { cf . delete ( ) . guaranteed ( ) . forPath ( taskPath ) ; } if ( cf . checkExists ( ) . forPath ( statusPath ) != null ) { cf . delete ( ) . guaranteed ( ) . forPath ( statusPath ) ; } log . info ( "Failing task[%s]" , assignedTask ) ; RemoteTaskRunnerWorkItem taskRunnerWorkItem = runningTasks . remove ( assignedTask ) ; if ( taskRunnerWorkItem != null ) { taskRunnerWorkItem . setResult ( TaskStatus . failure ( assignedTask ) ) ; TaskRunnerUtils . notifyStatusChanged ( listeners , assignedTask , TaskStatus . failure ( assignedTask ) ) ; } else { log . warn ( "RemoteTaskRunner has no knowledge of task[%s]" , assignedTask ) ; } } String workerStatusPath = JOINER . join ( indexerZkConfig . getStatusPath ( ) , worker ) ; if ( cf . checkExists ( ) . forPath ( workerStatusPath ) != null ) { cf . delete ( ) . guaranteed ( ) . forPath ( JOINER . join ( indexerZkConfig . getStatusPath ( ) , worker ) ) ; } } catch ( Exception e ) { log . makeAlert ( "Exception while cleaning up worker[%s]" , worker ) . emit ( ) ; throw new RuntimeException ( e ) ; } } } , config . getTaskCleanupTimeout ( ) . toStandardDuration ( ) . getMillis ( ) , TimeUnit . MILLISECONDS ) ; removedWorkerCleanups . put ( worker , cleanupTask ) ; Futures . addCallback ( cleanupTask , new FutureCallback < Object > ( ) { public void onSuccess ( Object result ) { removedWorkerCleanups . remove ( worker , cleanupTask ) ; } public void onFailure ( Throwable t ) { removedWorkerCleanups . remove ( worker , cleanupTask ) ; } } ) ; } | Schedule a task that will at some point in the future clean up znodes and issue failures for tasksToFail if they are being run by worker . |
18,994 | private int persistHydrant ( FireHydrant indexToPersist , SegmentIdWithShardSpec identifier ) { synchronized ( indexToPersist ) { if ( indexToPersist . hasSwapped ( ) ) { log . info ( "Segment[%s], Hydrant[%s] already swapped. Ignoring request to persist." , identifier , indexToPersist ) ; return 0 ; } log . info ( "Segment[%s], persisting Hydrant[%s]" , identifier , indexToPersist ) ; try { int numRows = indexToPersist . getIndex ( ) . size ( ) ; final File persistedFile ; final File persistDir = createPersistDirIfNeeded ( identifier ) ; final IndexSpec indexSpec = tuningConfig . getIndexSpec ( ) ; persistedFile = indexMerger . persist ( indexToPersist . getIndex ( ) , identifier . getInterval ( ) , new File ( persistDir , String . valueOf ( indexToPersist . getCount ( ) ) ) , indexSpec , tuningConfig . getSegmentWriteOutMediumFactory ( ) ) ; indexToPersist . swapSegment ( new QueryableIndexSegment ( indexIO . loadIndex ( persistedFile ) , indexToPersist . getSegmentId ( ) ) ) ; return numRows ; } catch ( IOException e ) { log . makeAlert ( "dataSource[%s] -- incremental persist failed" , schema . getDataSource ( ) ) . addData ( "segment" , identifier . toString ( ) ) . addData ( "count" , indexToPersist . getCount ( ) ) . emit ( ) ; throw new RuntimeException ( e ) ; } } } | Persists the given hydrant and returns the number of rows persisted . Must only be called in the single - threaded persistExecutor . |
18,995 | public static DateTime getUniversalTimestamp ( final GroupByQuery query ) { final Granularity gran = query . getGranularity ( ) ; final String timestampStringFromContext = query . getContextValue ( CTX_KEY_FUDGE_TIMESTAMP , "" ) ; if ( ! timestampStringFromContext . isEmpty ( ) ) { return DateTimes . utc ( Long . parseLong ( timestampStringFromContext ) ) ; } else if ( Granularities . ALL . equals ( gran ) ) { final DateTime timeStart = query . getIntervals ( ) . get ( 0 ) . getStart ( ) ; return gran . getIterable ( new Interval ( timeStart , timeStart . plus ( 1 ) ) ) . iterator ( ) . next ( ) . getStart ( ) ; } else { return null ; } } | If query has a single universal timestamp return it . Otherwise return null . This is useful for keeping timestamps in sync across partial queries that may have different intervals . |
18,996 | public static List < PostAggregator > pruneDependentPostAgg ( List < PostAggregator > postAggregatorList , String postAggName ) { ArrayList < PostAggregator > rv = new ArrayList < > ( ) ; Set < String > deps = new HashSet < > ( ) ; deps . add ( postAggName ) ; for ( PostAggregator agg : Lists . reverse ( postAggregatorList ) ) { if ( deps . contains ( agg . getName ( ) ) ) { rv . add ( agg ) ; deps . remove ( agg . getName ( ) ) ; deps . addAll ( agg . getDependentFields ( ) ) ; } } Collections . reverse ( rv ) ; return rv ; } | returns the list of dependent postAggregators that should be calculated in order to calculate given postAgg |
18,997 | public DruidNodeDiscovery getForService ( String serviceName ) { return serviceDiscoveryMap . computeIfAbsent ( serviceName , service -> { Set < NodeType > nodeTypesToWatch = DruidNodeDiscoveryProvider . SERVICE_TO_NODE_TYPES . get ( service ) ; if ( nodeTypesToWatch == null ) { throw new IAE ( "Unknown service [%s]." , service ) ; } ServiceDruidNodeDiscovery serviceDiscovery = new ServiceDruidNodeDiscovery ( service , nodeTypesToWatch . size ( ) ) ; DruidNodeDiscovery . Listener filteringGatheringUpstreamListener = serviceDiscovery . filteringUpstreamListener ( ) ; for ( NodeType nodeType : nodeTypesToWatch ) { getForNodeType ( nodeType ) . registerListener ( filteringGatheringUpstreamListener ) ; } return serviceDiscovery ; } ) ; } | Get DruidNodeDiscovery instance to discover nodes that announce given service in its metadata . |
18,998 | private < T > Sequence < T > run ( final QueryPlus < T > queryPlus , final Map < String , Object > responseContext , final UnaryOperator < TimelineLookup < String , ServerSelector > > timelineConverter ) { return new SpecificQueryRunnable < > ( queryPlus , responseContext ) . run ( timelineConverter ) ; } | Run a query . The timelineConverter will be given the master timeline and can be used to return a different timeline if desired . This is used by getQueryRunnerForSegments . |
18,999 | private static int aggregateDimValue ( final int [ ] positions , final BufferAggregator [ ] theAggregators , final ByteBuffer resultsBuf , final int numBytesPerRecord , final int [ ] aggregatorOffsets , final int aggSize , final int aggExtra , final int dimIndex , int currentPosition ) { if ( SKIP_POSITION_VALUE == positions [ dimIndex ] ) { return currentPosition ; } if ( INIT_POSITION_VALUE == positions [ dimIndex ] ) { positions [ dimIndex ] = currentPosition * numBytesPerRecord ; currentPosition ++ ; final int pos = positions [ dimIndex ] ; for ( int j = 0 ; j < aggSize ; ++ j ) { theAggregators [ j ] . init ( resultsBuf , pos + aggregatorOffsets [ j ] ) ; } } final int position = positions [ dimIndex ] ; switch ( aggExtra ) { case 7 : theAggregators [ 6 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 6 ] ) ; case 6 : theAggregators [ 5 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 5 ] ) ; case 5 : theAggregators [ 4 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 4 ] ) ; case 4 : theAggregators [ 3 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 3 ] ) ; case 3 : theAggregators [ 2 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 2 ] ) ; case 2 : theAggregators [ 1 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 1 ] ) ; case 1 : theAggregators [ 0 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ 0 ] ) ; } for ( int j = aggExtra ; j < aggSize ; j += AGG_UNROLL_COUNT ) { theAggregators [ j ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j ] ) ; theAggregators [ j + 1 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 1 ] ) ; theAggregators [ j + 2 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 2 ] ) ; theAggregators [ j + 3 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 3 ] ) ; theAggregators [ j + 4 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 4 ] ) ; theAggregators [ j + 5 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 5 ] ) ; theAggregators [ j + 6 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 6 ] ) ; theAggregators [ j + 7 ] . aggregate ( resultsBuf , position + aggregatorOffsets [ j + 7 ] ) ; } return currentPosition ; } | Returns a new currentPosition incremented if a new position was initialized otherwise the same position as passed in the last argument . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.