idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
18,800 | private ListenableFuture < TaskStatus > attachCallbacks ( final Task task , final ListenableFuture < TaskStatus > statusFuture ) { final ServiceMetricEvent . Builder metricBuilder = new ServiceMetricEvent . Builder ( ) ; IndexTaskUtils . setTaskDimensions ( metricBuilder , task ) ; Futures . addCallback ( statusFuture , new FutureCallback < TaskStatus > ( ) { public void onSuccess ( final TaskStatus status ) { log . info ( "Received %s status for task: %s" , status . getStatusCode ( ) , status . getId ( ) ) ; handleStatus ( status ) ; } public void onFailure ( final Throwable t ) { log . makeAlert ( t , "Failed to run task" ) . addData ( "task" , task . getId ( ) ) . addData ( "type" , task . getType ( ) ) . addData ( "dataSource" , task . getDataSource ( ) ) . emit ( ) ; handleStatus ( TaskStatus . failure ( task . getId ( ) ) ) ; } private void handleStatus ( final TaskStatus status ) { try { if ( ! active ) { log . info ( "Abandoning task due to shutdown: %s" , task . getId ( ) ) ; return ; } notifyStatus ( task , status , "notified status change from task" ) ; if ( status . isComplete ( ) ) { IndexTaskUtils . setTaskStatusDimensions ( metricBuilder , status ) ; emitter . emit ( metricBuilder . build ( "task/run/time" , status . getDuration ( ) ) ) ; log . info ( "Task %s: %s (%d run duration)" , status . getStatusCode ( ) , task , status . getDuration ( ) ) ; if ( status . isSuccess ( ) ) { Counters . incrementAndGetLong ( totalSuccessfulTaskCount , task . getDataSource ( ) ) ; } else { Counters . incrementAndGetLong ( totalFailedTaskCount , task . getDataSource ( ) ) ; } } } catch ( Exception e ) { log . makeAlert ( e , "Failed to handle task status" ) . addData ( "task" , task . getId ( ) ) . addData ( "statusCode" , status . getStatusCode ( ) ) . emit ( ) ; } } } ) ; return statusFuture ; } | Attach success and failure handlers to a task status future such that when it completes we perform the appropriate updates . |
18,801 | private void syncFromStorage ( ) { giant . lock ( ) ; try { if ( active ) { final Map < String , Task > newTasks = toTaskIDMap ( taskStorage . getActiveTasks ( ) ) ; final int tasksSynced = newTasks . size ( ) ; final Map < String , Task > oldTasks = toTaskIDMap ( tasks ) ; Set < String > commonIds = Sets . newHashSet ( Sets . intersection ( newTasks . keySet ( ) , oldTasks . keySet ( ) ) ) ; for ( String taskID : commonIds ) { newTasks . remove ( taskID ) ; oldTasks . remove ( taskID ) ; } Collection < Task > addedTasks = newTasks . values ( ) ; Collection < Task > removedTasks = oldTasks . values ( ) ; for ( Task task : removedTasks ) { removeTaskInternal ( task ) ; } for ( Task task : addedTasks ) { addTaskInternal ( task ) ; } log . info ( "Synced %d tasks from storage (%d tasks added, %d tasks removed)." , tasksSynced , addedTasks . size ( ) , removedTasks . size ( ) ) ; managementMayBeNecessary . signalAll ( ) ; } else { log . info ( "Not active. Skipping storage sync." ) ; } } catch ( Exception e ) { log . warn ( e , "Failed to sync tasks from storage!" ) ; throw new RuntimeException ( e ) ; } finally { giant . unlock ( ) ; } } | Resync the contents of this task queue with our storage facility . Useful to make sure our in - memory state corresponds to the storage facility even if the latter is manually modified . |
18,802 | private void appendFill ( int length , int fillType ) { assert length > 0 ; assert lastWordIndex >= - 1 ; fillType &= ConciseSetUtils . SEQUENCE_BIT ; if ( length == 1 ) { appendLiteral ( fillType == 0 ? ConciseSetUtils . ALL_ZEROS_LITERAL : ConciseSetUtils . ALL_ONES_LITERAL ) ; return ; } if ( lastWordIndex < 0 ) { words [ lastWordIndex = 0 ] = fillType | ( length - 1 ) ; return ; } final int lastWord = words [ lastWordIndex ] ; if ( isLiteral ( lastWord ) ) { if ( fillType == 0 && lastWord == ConciseSetUtils . ALL_ZEROS_LITERAL ) { words [ lastWordIndex ] = length ; } else if ( fillType == ConciseSetUtils . SEQUENCE_BIT && lastWord == ConciseSetUtils . ALL_ONES_LITERAL ) { words [ lastWordIndex ] = ConciseSetUtils . SEQUENCE_BIT | length ; } else if ( ! simulateWAH ) { if ( fillType == 0 && containsOnlyOneBit ( getLiteralBits ( lastWord ) ) ) { words [ lastWordIndex ] = length | ( ( 1 + Integer . numberOfTrailingZeros ( lastWord ) ) << 25 ) ; } else if ( fillType == ConciseSetUtils . SEQUENCE_BIT && containsOnlyOneBit ( ~ lastWord ) ) { words [ lastWordIndex ] = ConciseSetUtils . SEQUENCE_BIT | length | ( ( 1 + Integer . numberOfTrailingZeros ( ~ lastWord ) ) << 25 ) ; } else { words [ ++ lastWordIndex ] = fillType | ( length - 1 ) ; } } else { words [ ++ lastWordIndex ] = fillType | ( length - 1 ) ; } } else { if ( ( lastWord & 0xC0000000 ) == fillType ) { words [ lastWordIndex ] += length ; } else { words [ ++ lastWordIndex ] = fillType | ( length - 1 ) ; } } } | Append a sequence word after the last word |
18,803 | private void trimZeros ( ) { int w ; do { w = words [ lastWordIndex ] ; if ( w == ConciseSetUtils . ALL_ZEROS_LITERAL ) { lastWordIndex -- ; } else if ( isZeroSequence ( w ) ) { if ( simulateWAH || isSequenceWithNoBits ( w ) ) { lastWordIndex -- ; } else { words [ lastWordIndex ] = getLiteral ( w ) ; return ; } } else { return ; } if ( lastWordIndex < 0 ) { reset ( ) ; return ; } } while ( true ) ; } | Removes trailing zeros |
18,804 | private void writeObject ( ObjectOutputStream s ) throws IOException { if ( words != null && lastWordIndex < words . length - 1 ) { words = Arrays . copyOf ( words , lastWordIndex + 1 ) ; } s . defaultWriteObject ( ) ; } | Save the state of the instance to a stream |
18,805 | private void readObject ( ObjectInputStream s ) throws IOException , ClassNotFoundException { s . defaultReadObject ( ) ; if ( words == null ) { reset ( ) ; return ; } lastWordIndex = words . length - 1 ; updateLast ( ) ; size = - 1 ; } | Reconstruct the instance from a stream |
18,806 | public static Aggregation translateAggregateCall ( final PlannerContext plannerContext , final DruidQuerySignature querySignature , final RexBuilder rexBuilder , final Project project , final List < Aggregation > existingAggregations , final String name , final AggregateCall call , final boolean finalizeAggregations ) { final DimFilter filter ; if ( call . filterArg >= 0 ) { if ( project == null ) { return null ; } final RexNode expression = project . getChildExps ( ) . get ( call . filterArg ) ; final DimFilter nonOptimizedFilter = Expressions . toFilter ( plannerContext , querySignature , expression ) ; if ( nonOptimizedFilter == null ) { return null ; } else { filter = Filtration . create ( nonOptimizedFilter ) . optimizeFilterOnly ( querySignature ) . getDimFilter ( ) ; } } else { filter = null ; } final SqlAggregator sqlAggregator = plannerContext . getOperatorTable ( ) . lookupAggregator ( call . getAggregation ( ) ) ; if ( sqlAggregator == null ) { return null ; } final List < Aggregation > existingAggregationsWithSameFilter = new ArrayList < > ( ) ; for ( Aggregation existingAggregation : existingAggregations ) { if ( filter == null ) { final boolean doesMatch = existingAggregation . getAggregatorFactories ( ) . stream ( ) . noneMatch ( factory -> factory instanceof FilteredAggregatorFactory ) ; if ( doesMatch ) { existingAggregationsWithSameFilter . add ( existingAggregation ) ; } } else { final boolean doesMatch = existingAggregation . getAggregatorFactories ( ) . stream ( ) . allMatch ( factory -> factory instanceof FilteredAggregatorFactory && ( ( FilteredAggregatorFactory ) factory ) . getFilter ( ) . equals ( filter ) ) ; if ( doesMatch ) { existingAggregationsWithSameFilter . add ( Aggregation . create ( existingAggregation . getVirtualColumns ( ) , existingAggregation . getAggregatorFactories ( ) . stream ( ) . map ( factory -> ( ( FilteredAggregatorFactory ) factory ) . getAggregator ( ) ) . collect ( Collectors . toList ( ) ) , existingAggregation . getPostAggregator ( ) ) ) ; } } } final Aggregation retVal = sqlAggregator . toDruidAggregation ( plannerContext , querySignature , rexBuilder , name , call , project , existingAggregationsWithSameFilter , finalizeAggregations ) ; if ( retVal == null ) { return null ; } else { if ( isUsingExistingAggregation ( retVal , existingAggregationsWithSameFilter ) ) { return retVal ; } else { return retVal . filter ( querySignature , filter ) ; } } } | Translate an AggregateCall to Druid equivalents . |
18,807 | private static boolean isUsingExistingAggregation ( final Aggregation aggregation , final List < Aggregation > existingAggregations ) { if ( ! aggregation . getAggregatorFactories ( ) . isEmpty ( ) ) { return false ; } final Set < String > existingAggregationNames = existingAggregations . stream ( ) . flatMap ( xs -> xs . getAggregatorFactories ( ) . stream ( ) ) . map ( AggregatorFactory :: getName ) . collect ( Collectors . toSet ( ) ) ; return existingAggregationNames . containsAll ( aggregation . getPostAggregator ( ) . getDependentFields ( ) ) ; } | Checks if aggregation is exclusively based on existing aggregations from existingAggregations . |
18,808 | private static JsonParserIterator < TaskStatusPlus > getTasks ( DruidLeaderClient indexingServiceClient , ObjectMapper jsonMapper , BytesAccumulatingResponseHandler responseHandler ) { Request request ; try { request = indexingServiceClient . makeRequest ( HttpMethod . GET , StringUtils . format ( "/druid/indexer/v1/tasks" ) , false ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } ListenableFuture < InputStream > future = indexingServiceClient . goAsync ( request , responseHandler ) ; final JavaType typeRef = jsonMapper . getTypeFactory ( ) . constructType ( new TypeReference < TaskStatusPlus > ( ) { } ) ; return new JsonParserIterator < > ( typeRef , future , request . getUrl ( ) . toString ( ) , null , request . getUrl ( ) . getHost ( ) , jsonMapper , responseHandler ) ; } | Note that overlord must be up to get tasks |
18,809 | private boolean announceHistoricalSegment ( final Handle handle , final DataSegment segment , final boolean used ) throws IOException { try { if ( segmentExists ( handle , segment ) ) { log . info ( "Found [%s] in DB, not updating DB" , segment . getId ( ) ) ; return false ; } handle . createStatement ( StringUtils . format ( "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)" , dbTables . getSegmentsTable ( ) , connector . getQuoteString ( ) ) ) . bind ( "id" , segment . getId ( ) . toString ( ) ) . bind ( "dataSource" , segment . getDataSource ( ) ) . bind ( "created_date" , DateTimes . nowUtc ( ) . toString ( ) ) . bind ( "start" , segment . getInterval ( ) . getStart ( ) . toString ( ) ) . bind ( "end" , segment . getInterval ( ) . getEnd ( ) . toString ( ) ) . bind ( "partitioned" , ( segment . getShardSpec ( ) instanceof NoneShardSpec ) ? false : true ) . bind ( "version" , segment . getVersion ( ) ) . bind ( "used" , used ) . bind ( "payload" , jsonMapper . writeValueAsBytes ( segment ) ) . execute ( ) ; log . info ( "Published segment [%s] to DB with used flag [%s]" , segment . getId ( ) , used ) ; } catch ( Exception e ) { log . error ( e , "Exception inserting segment [%s] with used flag [%s] into DB" , segment . getId ( ) , used ) ; throw e ; } return true ; } | Attempts to insert a single segment to the database . If the segment already exists will do nothing ; although this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions . |
18,810 | public DataSourceMetadata getDataSourceMetadata ( final String dataSource ) { final byte [ ] bytes = connector . lookup ( dbTables . getDataSourceTable ( ) , "dataSource" , "commit_metadata_payload" , dataSource ) ; if ( bytes == null ) { return null ; } try { return jsonMapper . readValue ( bytes , DataSourceMetadata . class ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Read dataSource metadata . Returns null if there is no metadata . |
18,811 | private byte [ ] getDataSourceMetadataWithHandleAsBytes ( final Handle handle , final String dataSource ) { return connector . lookupWithHandle ( handle , dbTables . getDataSourceTable ( ) , "dataSource" , "commit_metadata_payload" , dataSource ) ; } | Read dataSource metadata as bytes from a specific handle . Returns null if there is no metadata . |
18,812 | public ObjectContainer < T > take ( ) throws InterruptedException { final ObjectContainer < T > ret = queue . take ( ) ; currentMemory . addAndGet ( - ret . getSize ( ) ) ; return ret ; } | blocks until at least one item is available to take |
18,813 | private void processRows ( IncrementalIndex < ? > index , BitmapFactory bitmapFactory , List < IncrementalIndex . DimensionDesc > dimensions ) { int rowNum = 0 ; for ( IncrementalIndexRow row : index . getFacts ( ) . persistIterable ( ) ) { final Object [ ] dims = row . getDims ( ) ; for ( IncrementalIndex . DimensionDesc dimension : dimensions ) { final int dimIndex = dimension . getIndex ( ) ; DimensionAccessor accessor = accessors . get ( dimension . getName ( ) ) ; if ( dimIndex >= dims . length || dims [ dimIndex ] == null ) { accessor . indexer . processRowValsToUnsortedEncodedKeyComponent ( null , true ) ; continue ; } final ColumnCapabilities capabilities = dimension . getCapabilities ( ) ; if ( capabilities . hasBitmapIndexes ( ) ) { final MutableBitmap [ ] bitmapIndexes = accessor . invertedIndexes ; final DimensionIndexer indexer = accessor . indexer ; indexer . fillBitmapsFromUnsortedEncodedKeyComponent ( dims [ dimIndex ] , rowNum , bitmapIndexes , bitmapFactory ) ; } } ++ rowNum ; } } | Sometimes it s hard to tell whether one dimension contains a null value or not . If one dimension had show a null or empty value explicitly then yes it contains null value . But if one dimension s values are all non - null it still early to say this dimension does not contain null value . Consider a two row case first row had dimA = 1 and dimB = 2 the second row only had dimA = 3 . To dimB its value are 2 and never showed a null or empty value . But when we combines these two rows dimB is null in row 2 . So we should iterate all rows to determine whether one dimension contains a null value . |
18,814 | public static RealtimeTuningConfig makeDefaultTuningConfig ( final File basePersistDirectory ) { return new RealtimeTuningConfig ( defaultMaxRowsInMemory , 0L , defaultIntermediatePersistPeriod , defaultWindowPeriod , basePersistDirectory == null ? createNewBasePersistDirectory ( ) : basePersistDirectory , defaultVersioningPolicy , defaultRejectionPolicyFactory , defaultMaxPendingPersists , defaultShardSpec , defaultIndexSpec , true , 0 , 0 , defaultReportParseExceptions , defaultHandoffConditionTimeout , defaultAlertTimeout , null , defaultDedupColumn ) ; } | Might make sense for this to be a builder |
18,815 | public FireDepartmentMetrics merge ( FireDepartmentMetrics other ) { Preconditions . checkNotNull ( other , "Cannot merge a null FireDepartmentMetrics" ) ; FireDepartmentMetrics otherSnapshot = other . snapshot ( ) ; processedCount . addAndGet ( otherSnapshot . processed ( ) ) ; processedWithErrorsCount . addAndGet ( otherSnapshot . processedWithErrors ( ) ) ; thrownAwayCount . addAndGet ( otherSnapshot . thrownAway ( ) ) ; rowOutputCount . addAndGet ( otherSnapshot . rowOutput ( ) ) ; unparseableCount . addAndGet ( otherSnapshot . unparseable ( ) ) ; dedupCount . addAndGet ( otherSnapshot . dedup ( ) ) ; numPersists . addAndGet ( otherSnapshot . numPersists ( ) ) ; persistTimeMillis . addAndGet ( otherSnapshot . persistTimeMillis ( ) ) ; persistBackPressureMillis . addAndGet ( otherSnapshot . persistBackPressureMillis ( ) ) ; failedPersists . addAndGet ( otherSnapshot . failedPersists ( ) ) ; failedHandoffs . addAndGet ( otherSnapshot . failedHandoffs ( ) ) ; mergeTimeMillis . addAndGet ( otherSnapshot . mergeTimeMillis ( ) ) ; mergeCpuTime . addAndGet ( otherSnapshot . mergeCpuTime ( ) ) ; persistCpuTime . addAndGet ( otherSnapshot . persistCpuTime ( ) ) ; handOffCount . addAndGet ( otherSnapshot . handOffCount ( ) ) ; sinkCount . addAndGet ( otherSnapshot . sinkCount ( ) ) ; messageMaxTimestamp . set ( Math . max ( messageMaxTimestamp ( ) , otherSnapshot . messageMaxTimestamp ( ) ) ) ; messageGap . set ( Math . max ( messageGap ( ) , otherSnapshot . messageGap ( ) ) ) ; return this ; } | merge other FireDepartmentMetrics will modify this object s data |
18,816 | public static TaskAnnouncement create ( Task task , TaskStatus status , TaskLocation location ) { return create ( task . getId ( ) , task . getType ( ) , task . getTaskResource ( ) , status , location , task . getDataSource ( ) ) ; } | nullable for backward compatibility |
18,817 | public ResourceHolder < ByteBuffer > getMergeBuffer ( ) { final ByteBuffer buffer = mergeBuffers . pop ( ) ; return new ResourceHolder < ByteBuffer > ( ) { public ByteBuffer get ( ) { return buffer ; } public void close ( ) { mergeBuffers . add ( buffer ) ; } } ; } | Get a merge buffer from the pre - acquired resources . |
18,818 | public static Supplier < ColumnarFloats > getFloatSupplier ( int totalSize , int sizePer , ByteBuffer fromBuffer , ByteOrder order , CompressionStrategy strategy ) { if ( strategy == CompressionStrategy . NONE ) { return new EntireLayoutColumnarFloatsSupplier ( totalSize , fromBuffer , order ) ; } else { return new BlockLayoutColumnarFloatsSupplier ( totalSize , sizePer , fromBuffer , order , strategy ) ; } } | Float currently does not support any encoding types and stores values as 4 byte float |
18,819 | public LookupExtractorFactoryMapContainer getLookup ( final String tier , final String lookupName ) { final Map < String , Map < String , LookupExtractorFactoryMapContainer > > prior = getKnownLookups ( ) ; if ( prior == null ) { LOG . warn ( "Requested tier [%s] lookupName [%s]. But no lookups exist!" , tier , lookupName ) ; return null ; } final Map < String , LookupExtractorFactoryMapContainer > tierLookups = prior . get ( tier ) ; if ( tierLookups == null ) { LOG . warn ( "Tier [%s] does not exist" , tier ) ; return null ; } return tierLookups . get ( lookupName ) ; } | Try to find a lookupName spec for the specified lookupName . |
18,820 | private long getAvgSizePerGranularity ( String datasource ) { return connector . retryWithHandle ( new HandleCallback < Long > ( ) { Set < Interval > intervals = new HashSet < > ( ) ; long totalSize = 0 ; public Long withHandle ( Handle handle ) { handle . createQuery ( StringUtils . format ( "SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource" , connector . getQuoteString ( ) , dbTables . get ( ) . getSegmentsTable ( ) ) ) . bind ( "dataSource" , datasource ) . map ( new ResultSetMapper < Object > ( ) { public Object map ( int index , ResultSet r , StatementContext ctx ) throws SQLException { try { intervals . add ( Intervals . utc ( DateTimes . of ( r . getString ( "start" ) ) . getMillis ( ) , DateTimes . of ( r . getString ( "end" ) ) . getMillis ( ) ) ) ; DataSegment segment = objectMapper . readValue ( r . getBytes ( "payload" ) , DataSegment . class ) ; totalSize += segment . getSize ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } return null ; } } ) . first ( ) ; return intervals . isEmpty ( ) ? 0L : totalSize / intervals . size ( ) ; } } ) ; } | calculate the average data size per segment granularity for a given datasource . |
18,821 | public Filtration optimize ( final DruidQuerySignature querySignature ) { return transform ( this , ImmutableList . of ( CombineAndSimplifyBounds . instance ( ) , MoveTimeFiltersToIntervals . instance ( ) , ConvertBoundsToSelectors . create ( querySignature ) , ConvertSelectorsToIns . create ( querySignature . getRowSignature ( ) ) , MoveMarkerFiltersToIntervals . instance ( ) , ValidateNoMarkerFiltersRemain . instance ( ) ) ) ; } | Optimize a Filtration for querying possibly pulling out intervals and simplifying the dimFilter in the process . |
18,822 | public Filtration optimizeFilterOnly ( final DruidQuerySignature querySignature ) { if ( ! intervals . equals ( ImmutableList . of ( eternity ( ) ) ) ) { throw new ISE ( "Cannot optimizeFilterOnly when intervals are set" ) ; } final Filtration transformed = transform ( this , ImmutableList . of ( CombineAndSimplifyBounds . instance ( ) , ConvertBoundsToSelectors . create ( querySignature ) , ConvertSelectorsToIns . create ( querySignature . getRowSignature ( ) ) ) ) ; if ( ! transformed . getIntervals ( ) . equals ( ImmutableList . of ( eternity ( ) ) ) ) { throw new ISE ( "WTF?! optimizeFilterOnly was about to return filtration with intervals?!" ) ; } return transformed ; } | Optimize a Filtration containing only a DimFilter avoiding pulling out intervals . |
18,823 | private static boolean postAggregatorDirectColumnIsOk ( final RowSignature aggregateRowSignature , final DruidExpression expression , final RexNode rexNode ) { if ( ! expression . isDirectColumnAccess ( ) ) { return false ; } final ExprType toExprType = Expressions . exprTypeForValueType ( aggregateRowSignature . getColumnType ( expression . getDirectColumn ( ) ) ) ; final ExprType fromExprType = Expressions . exprTypeForValueType ( Calcites . getValueTypeForSqlTypeName ( rexNode . getType ( ) . getSqlTypeName ( ) ) ) ; return toExprType . equals ( fromExprType ) ; } | Returns true if a post - aggregation expression can be realized as a direct field access . This is true if it s a direct column access that doesn t require an implicit cast . |
18,824 | public TimeseriesQuery toTimeseriesQuery ( ) { if ( grouping == null || grouping . getHavingFilter ( ) != null ) { return null ; } final Granularity queryGranularity ; final boolean descending ; int timeseriesLimit = 0 ; if ( grouping . getDimensions ( ) . isEmpty ( ) ) { queryGranularity = Granularities . ALL ; descending = false ; } else if ( grouping . getDimensions ( ) . size ( ) == 1 ) { final DimensionExpression dimensionExpression = Iterables . getOnlyElement ( grouping . getDimensions ( ) ) ; queryGranularity = Expressions . toQueryGranularity ( dimensionExpression . getDruidExpression ( ) , plannerContext . getExprMacroTable ( ) ) ; if ( queryGranularity == null ) { return null ; } if ( limitSpec != null ) { if ( limitSpec . isLimited ( ) ) { timeseriesLimit = limitSpec . getLimit ( ) ; } if ( limitSpec . getColumns ( ) . isEmpty ( ) ) { descending = false ; } else { final OrderByColumnSpec firstOrderBy = limitSpec . getColumns ( ) . get ( 0 ) ; if ( firstOrderBy . getDimension ( ) . equals ( dimensionExpression . getOutputName ( ) ) ) { descending = firstOrderBy . getDirection ( ) == OrderByColumnSpec . Direction . DESCENDING ; } else { return null ; } } } else { descending = false ; } } else { return null ; } final Filtration filtration = Filtration . create ( filter ) . optimize ( sourceQuerySignature ) ; final List < PostAggregator > postAggregators = new ArrayList < > ( grouping . getPostAggregators ( ) ) ; if ( sortProject != null ) { postAggregators . addAll ( sortProject . getPostAggregators ( ) ) ; } final Map < String , Object > theContext = new HashMap < > ( ) ; theContext . put ( "skipEmptyBuckets" , true ) ; theContext . putAll ( plannerContext . getQueryContext ( ) ) ; return new TimeseriesQuery ( dataSource , filtration . getQuerySegmentSpec ( ) , descending , getVirtualColumns ( false ) , filtration . getDimFilter ( ) , queryGranularity , grouping . getAggregatorFactories ( ) , postAggregators , timeseriesLimit , ImmutableSortedMap . copyOf ( theContext ) ) ; } | Return this query as a Timeseries query or null if this query is not compatible with Timeseries . |
18,825 | public TopNQuery toTopNQuery ( ) { final boolean topNOk = grouping != null && grouping . getDimensions ( ) . size ( ) == 1 && limitSpec != null && ( limitSpec . getColumns ( ) . size ( ) <= 1 && limitSpec . getLimit ( ) <= plannerContext . getPlannerConfig ( ) . getMaxTopNLimit ( ) ) && grouping . getHavingFilter ( ) == null ; if ( ! topNOk ) { return null ; } final DimensionSpec dimensionSpec = Iterables . getOnlyElement ( grouping . getDimensions ( ) ) . toDimensionSpec ( ) ; final OrderByColumnSpec limitColumn ; if ( limitSpec . getColumns ( ) . isEmpty ( ) ) { limitColumn = new OrderByColumnSpec ( dimensionSpec . getOutputName ( ) , OrderByColumnSpec . Direction . ASCENDING , Calcites . getStringComparatorForValueType ( dimensionSpec . getOutputType ( ) ) ) ; } else { limitColumn = Iterables . getOnlyElement ( limitSpec . getColumns ( ) ) ; } final TopNMetricSpec topNMetricSpec ; if ( limitColumn . getDimension ( ) . equals ( dimensionSpec . getOutputName ( ) ) ) { final DimensionTopNMetricSpec baseMetricSpec = new DimensionTopNMetricSpec ( null , limitColumn . getDimensionComparator ( ) ) ; topNMetricSpec = limitColumn . getDirection ( ) == OrderByColumnSpec . Direction . ASCENDING ? baseMetricSpec : new InvertedTopNMetricSpec ( baseMetricSpec ) ; } else if ( plannerContext . getPlannerConfig ( ) . isUseApproximateTopN ( ) ) { final NumericTopNMetricSpec baseMetricSpec = new NumericTopNMetricSpec ( limitColumn . getDimension ( ) ) ; topNMetricSpec = limitColumn . getDirection ( ) == OrderByColumnSpec . Direction . ASCENDING ? new InvertedTopNMetricSpec ( baseMetricSpec ) : baseMetricSpec ; } else { return null ; } final Filtration filtration = Filtration . create ( filter ) . optimize ( sourceQuerySignature ) ; final List < PostAggregator > postAggregators = new ArrayList < > ( grouping . getPostAggregators ( ) ) ; if ( sortProject != null ) { postAggregators . addAll ( sortProject . getPostAggregators ( ) ) ; } return new TopNQuery ( dataSource , getVirtualColumns ( true ) , dimensionSpec , topNMetricSpec , limitSpec . getLimit ( ) , filtration . getQuerySegmentSpec ( ) , filtration . getDimFilter ( ) , Granularities . ALL , grouping . getAggregatorFactories ( ) , postAggregators , ImmutableSortedMap . copyOf ( plannerContext . getQueryContext ( ) ) ) ; } | Return this query as a TopN query or null if this query is not compatible with TopN . |
18,826 | public GroupByQuery toGroupByQuery ( ) { if ( grouping == null ) { return null ; } final Filtration filtration = Filtration . create ( filter ) . optimize ( sourceQuerySignature ) ; final DimFilterHavingSpec havingSpec ; if ( grouping . getHavingFilter ( ) != null ) { havingSpec = new DimFilterHavingSpec ( Filtration . create ( grouping . getHavingFilter ( ) ) . optimizeFilterOnly ( sourceQuerySignature ) . getDimFilter ( ) , true ) ; } else { havingSpec = null ; } final List < PostAggregator > postAggregators = new ArrayList < > ( grouping . getPostAggregators ( ) ) ; if ( sortProject != null ) { postAggregators . addAll ( sortProject . getPostAggregators ( ) ) ; } return new GroupByQuery ( dataSource , filtration . getQuerySegmentSpec ( ) , getVirtualColumns ( true ) , filtration . getDimFilter ( ) , Granularities . ALL , grouping . getDimensionSpecs ( ) , grouping . getAggregatorFactories ( ) , postAggregators , havingSpec , limitSpec , null , ImmutableSortedMap . copyOf ( plannerContext . getQueryContext ( ) ) ) ; } | Return this query as a GroupBy query or null if this query is not compatible with GroupBy . |
18,827 | public ScanQuery toScanQuery ( ) { if ( grouping != null ) { return null ; } if ( limitSpec != null && ( limitSpec . getColumns ( ) . size ( ) > 1 || ( limitSpec . getColumns ( ) . size ( ) == 1 && ! Iterables . getOnlyElement ( limitSpec . getColumns ( ) ) . getDimension ( ) . equals ( ColumnHolder . TIME_COLUMN_NAME ) ) ) ) { return null ; } if ( outputRowSignature . getRowOrder ( ) . isEmpty ( ) ) { throw new ISE ( "WTF?! Attempting to convert to Scan query without any columns?" ) ; } final Filtration filtration = Filtration . create ( filter ) . optimize ( sourceQuerySignature ) ; final long scanLimit = limitSpec == null || limitSpec . getLimit ( ) == Integer . MAX_VALUE ? 0L : ( long ) limitSpec . getLimit ( ) ; ScanQuery . Order order ; if ( limitSpec == null || limitSpec . getColumns ( ) . size ( ) == 0 ) { order = ScanQuery . Order . NONE ; } else if ( limitSpec . getColumns ( ) . get ( 0 ) . getDirection ( ) == OrderByColumnSpec . Direction . ASCENDING ) { order = ScanQuery . Order . ASCENDING ; } else { order = ScanQuery . Order . DESCENDING ; } return new ScanQuery ( dataSource , filtration . getQuerySegmentSpec ( ) , getVirtualColumns ( true ) , ScanQuery . ResultFormat . RESULT_FORMAT_COMPACTED_LIST , 0 , scanLimit , order , filtration . getDimFilter ( ) , Ordering . natural ( ) . sortedCopy ( ImmutableSet . copyOf ( outputRowSignature . getRowOrder ( ) ) ) , false , ImmutableSortedMap . copyOf ( plannerContext . getQueryContext ( ) ) ) ; } | Return this query as a Scan query or null if this query is not compatible with Scan . |
18,828 | public Object finalizeComputation ( Object object ) { if ( shouldFinalize ) { SketchHolder holder = ( SketchHolder ) object ; if ( errorBoundsStdDev != null ) { return holder . getEstimateWithErrorBounds ( errorBoundsStdDev ) ; } else { return holder . getEstimate ( ) ; } } else { return object ; } } | Finalize the computation on sketch object and returns estimate from underlying sketch . |
18,829 | public static RexNode fromFieldAccess ( final RowSignature rowSignature , final Project project , final int fieldNumber ) { if ( project == null ) { return RexInputRef . of ( fieldNumber , rowSignature . getRelDataType ( new JavaTypeFactoryImpl ( ) ) ) ; } else { return project . getChildExps ( ) . get ( fieldNumber ) ; } } | Translate a field access possibly through a projection to an underlying Druid dataSource . |
18,830 | public static DimFilter toFilter ( final PlannerContext plannerContext , final DruidQuerySignature querySignature , final RexNode expression ) { final SqlKind kind = expression . getKind ( ) ; if ( kind == SqlKind . IS_TRUE || kind == SqlKind . IS_NOT_FALSE ) { return toFilter ( plannerContext , querySignature , Iterables . getOnlyElement ( ( ( RexCall ) expression ) . getOperands ( ) ) ) ; } else if ( kind == SqlKind . IS_FALSE || kind == SqlKind . IS_NOT_TRUE ) { return new NotDimFilter ( toFilter ( plannerContext , querySignature , Iterables . getOnlyElement ( ( ( RexCall ) expression ) . getOperands ( ) ) ) ) ; } else if ( kind == SqlKind . CAST && expression . getType ( ) . getSqlTypeName ( ) == SqlTypeName . BOOLEAN ) { return toFilter ( plannerContext , querySignature , Iterables . getOnlyElement ( ( ( RexCall ) expression ) . getOperands ( ) ) ) ; } else if ( kind == SqlKind . AND || kind == SqlKind . OR || kind == SqlKind . NOT ) { final List < DimFilter > filters = new ArrayList < > ( ) ; for ( final RexNode rexNode : ( ( RexCall ) expression ) . getOperands ( ) ) { final DimFilter nextFilter = toFilter ( plannerContext , querySignature , rexNode ) ; if ( nextFilter == null ) { return null ; } filters . add ( nextFilter ) ; } if ( kind == SqlKind . AND ) { return new AndDimFilter ( filters ) ; } else if ( kind == SqlKind . OR ) { return new OrDimFilter ( filters ) ; } else { assert kind == SqlKind . NOT ; return new NotDimFilter ( Iterables . getOnlyElement ( filters ) ) ; } } else { return toLeafFilter ( plannerContext , querySignature , expression ) ; } } | Translates condition to a Druid filter or returns null if we cannot translate the condition . |
18,831 | private static DimFilter toLeafFilter ( final PlannerContext plannerContext , final DruidQuerySignature querySignature , final RexNode rexNode ) { if ( rexNode . isAlwaysTrue ( ) ) { return Filtration . matchEverything ( ) ; } else if ( rexNode . isAlwaysFalse ( ) ) { return Filtration . matchNothing ( ) ; } final DimFilter simpleFilter = toSimpleLeafFilter ( plannerContext , querySignature , rexNode ) ; return simpleFilter != null ? simpleFilter : toExpressionLeafFilter ( plannerContext , querySignature . getRowSignature ( ) , rexNode ) ; } | Translates condition to a Druid filter assuming it does not contain any boolean expressions . Returns null if we cannot translate the condition . |
18,832 | private static DimFilter toExpressionLeafFilter ( final PlannerContext plannerContext , final RowSignature rowSignature , final RexNode rexNode ) { final DruidExpression druidExpression = toDruidExpression ( plannerContext , rowSignature , rexNode ) ; return druidExpression != null ? new ExpressionDimFilter ( druidExpression . getExpression ( ) , plannerContext . getExprMacroTable ( ) ) : null ; } | Translates to an expression type leaf filter . Used as a fallback if we can t use a simple leaf filter . |
18,833 | private JsonParserIterator < DataSegment > getMetadataSegments ( DruidLeaderClient coordinatorClient , ObjectMapper jsonMapper , BytesAccumulatingResponseHandler responseHandler , Set < String > watchedDataSources ) { String query = "/druid/coordinator/v1/metadata/segments" ; if ( watchedDataSources != null && ! watchedDataSources . isEmpty ( ) ) { log . debug ( "filtering datasources in published segments based on broker's watchedDataSources[%s]" , watchedDataSources ) ; final StringBuilder sb = new StringBuilder ( ) ; for ( String ds : watchedDataSources ) { sb . append ( "datasources=" ) . append ( ds ) . append ( "&" ) ; } sb . setLength ( sb . length ( ) - 1 ) ; query = "/druid/coordinator/v1/metadata/segments?" + sb ; } Request request ; try { request = coordinatorClient . makeRequest ( HttpMethod . GET , StringUtils . format ( query ) , false ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } ListenableFuture < InputStream > future = coordinatorClient . goAsync ( request , responseHandler ) ; final JavaType typeRef = jsonMapper . getTypeFactory ( ) . constructType ( new TypeReference < DataSegment > ( ) { } ) ; return new JsonParserIterator < > ( typeRef , future , request . getUrl ( ) . toString ( ) , null , request . getUrl ( ) . getHost ( ) , jsonMapper , responseHandler ) ; } | Note that coordinator must be up to get segments |
18,834 | public Object startJob ( ) { final Object metadata = appenderator . startJob ( ) ; if ( metadata != null ) { throw new ISE ( "Metadata should be null because BatchAppenderatorDriver never persists it" ) ; } return null ; } | This method always returns null because batch ingestion doesn t support restoring tasks on failures . |
18,835 | public ListenableFuture < SegmentsAndMetadata > publishAll ( final TransactionalSegmentPublisher publisher ) { final Map < String , SegmentsForSequence > snapshot ; synchronized ( segments ) { snapshot = ImmutableMap . copyOf ( segments ) ; } return publishInBackground ( new SegmentsAndMetadata ( snapshot . values ( ) . stream ( ) . flatMap ( SegmentsForSequence :: allSegmentStateStream ) . map ( segmentWithState -> Preconditions . checkNotNull ( segmentWithState . getDataSegment ( ) , "dataSegment for segmentId[%s]" , segmentWithState . getSegmentIdentifier ( ) ) ) . collect ( Collectors . toList ( ) ) , null ) , publisher ) ; } | Publish all segments . |
18,836 | public static void setupClasspath ( final Path distributedClassPath , final Path intermediateClassPath , final Job job ) throws IOException { String classpathProperty = System . getProperty ( "druid.hadoop.internal.classpath" ) ; if ( classpathProperty == null ) { classpathProperty = System . getProperty ( "java.class.path" ) ; } String [ ] jarFiles = classpathProperty . split ( File . pathSeparator ) ; final Configuration conf = job . getConfiguration ( ) ; final FileSystem fs = distributedClassPath . getFileSystem ( conf ) ; if ( fs instanceof LocalFileSystem ) { return ; } for ( String jarFilePath : jarFiles ) { final File jarFile = new File ( jarFilePath ) ; if ( jarFile . getName ( ) . endsWith ( ".jar" ) ) { try { RetryUtils . retry ( ( ) -> { if ( isSnapshot ( jarFile ) ) { addSnapshotJarToClassPath ( jarFile , intermediateClassPath , fs , job ) ; } else { addJarToClassPath ( jarFile , distributedClassPath , intermediateClassPath , fs , job ) ; } return true ; } , shouldRetryPredicate ( ) , NUM_RETRIES ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } } } | Uploads jar files to hdfs and configures the classpath . Snapshot jar files are uploaded to intermediateClasspath and not shared across multiple jobs . Non - Snapshot jar files are uploaded to a distributedClasspath and shared across multiple jobs . |
18,837 | @ Path ( "/{dataSourceName}/intervals/{interval}/serverview" ) @ Produces ( MediaType . APPLICATION_JSON ) @ ResourceFilters ( DatasourceResourceFilter . class ) public Response getSegmentDataSourceSpecificInterval ( @ PathParam ( "dataSourceName" ) String dataSourceName , @ PathParam ( "interval" ) String interval , @ QueryParam ( "partial" ) final boolean partial ) { TimelineLookup < String , SegmentLoadInfo > timeline = serverInventoryView . getTimeline ( new TableDataSource ( dataSourceName ) ) ; final Interval theInterval = Intervals . of ( interval . replace ( '_' , '/' ) ) ; if ( timeline == null ) { log . debug ( "No timeline found for datasource[%s]" , dataSourceName ) ; return Response . ok ( new ArrayList < ImmutableSegmentLoadInfo > ( ) ) . build ( ) ; } Iterable < TimelineObjectHolder < String , SegmentLoadInfo > > lookup = timeline . lookupWithIncompletePartitions ( theInterval ) ; FunctionalIterable < ImmutableSegmentLoadInfo > retval = FunctionalIterable . create ( lookup ) . transformCat ( ( TimelineObjectHolder < String , SegmentLoadInfo > input ) -> Iterables . transform ( input . getObject ( ) , ( PartitionChunk < SegmentLoadInfo > chunk ) -> chunk . getObject ( ) . toImmutableSegmentLoadInfo ( ) ) ) ; return Response . ok ( retval ) . build ( ) ; } | Provides serverView for a datasource and Interval which gives details about servers hosting segments for an interval Used by the realtime tasks to fetch a view of the interval they are interested in . |
18,838 | @ Path ( "/{dataSourceName}/handoffComplete" ) @ Produces ( MediaType . APPLICATION_JSON ) @ ResourceFilters ( DatasourceResourceFilter . class ) public Response isHandOffComplete ( @ PathParam ( "dataSourceName" ) String dataSourceName , @ QueryParam ( "interval" ) final String interval , @ QueryParam ( "partitionNumber" ) final int partitionNumber , @ QueryParam ( "version" ) final String version ) { try { final List < Rule > rules = databaseRuleManager . getRulesWithDefault ( dataSourceName ) ; final Interval theInterval = Intervals . of ( interval ) ; final SegmentDescriptor descriptor = new SegmentDescriptor ( theInterval , version , partitionNumber ) ; final DateTime now = DateTimes . nowUtc ( ) ; boolean dropped = true ; for ( Rule rule : rules ) { if ( rule . appliesTo ( theInterval , now ) ) { if ( rule instanceof LoadRule ) { dropped = false ; } break ; } } if ( dropped ) { return Response . ok ( true ) . build ( ) ; } TimelineLookup < String , SegmentLoadInfo > timeline = serverInventoryView . getTimeline ( new TableDataSource ( dataSourceName ) ) ; if ( timeline == null ) { log . debug ( "No timeline found for datasource[%s]" , dataSourceName ) ; return Response . ok ( false ) . build ( ) ; } Iterable < TimelineObjectHolder < String , SegmentLoadInfo > > lookup = timeline . lookupWithIncompletePartitions ( theInterval ) ; FunctionalIterable < ImmutableSegmentLoadInfo > loadInfoIterable = FunctionalIterable . create ( lookup ) . transformCat ( ( TimelineObjectHolder < String , SegmentLoadInfo > input ) -> Iterables . transform ( input . getObject ( ) , ( PartitionChunk < SegmentLoadInfo > chunk ) -> chunk . getObject ( ) . toImmutableSegmentLoadInfo ( ) ) ) ; if ( isSegmentLoaded ( loadInfoIterable , descriptor ) ) { return Response . ok ( true ) . build ( ) ; } return Response . ok ( false ) . build ( ) ; } catch ( Exception e ) { log . error ( e , "Error while handling hand off check request" ) ; return Response . serverError ( ) . entity ( ImmutableMap . of ( "error" , e . toString ( ) ) ) . build ( ) ; } } | Used by the realtime tasks to learn whether a segment is handed off or not . It returns true when the segment will never be handed off or is already handed off . Otherwise it returns false . |
18,839 | public static InputStream streamFile ( final File file , final long offset ) throws IOException { final RandomAccessFile raf = new RandomAccessFile ( file , "r" ) ; final long rafLength = raf . length ( ) ; if ( offset > 0 ) { raf . seek ( offset ) ; } else if ( offset < 0 && offset < rafLength ) { raf . seek ( Math . max ( 0 , rafLength + offset ) ) ; } return Channels . newInputStream ( raf . getChannel ( ) ) ; } | Open a stream to a file . |
18,840 | private MessageType getPartialReadSchema ( InitContext context ) { MessageType fullSchema = context . getFileSchema ( ) ; String name = fullSchema . getName ( ) ; HadoopDruidIndexerConfig config = HadoopDruidIndexerConfig . fromConfiguration ( context . getConfiguration ( ) ) ; ParseSpec parseSpec = config . getParser ( ) . getParseSpec ( ) ; if ( parseSpec instanceof ParquetParseSpec ) { if ( ( ( ParquetParseSpec ) parseSpec ) . getFlattenSpec ( ) != null ) { return fullSchema ; } } String tsField = parseSpec . getTimestampSpec ( ) . getTimestampColumn ( ) ; List < DimensionSchema > dimensionSchema = parseSpec . getDimensionsSpec ( ) . getDimensions ( ) ; Set < String > dimensions = new HashSet < > ( ) ; for ( DimensionSchema dim : dimensionSchema ) { dimensions . add ( dim . getName ( ) ) ; } Set < String > metricsFields = new HashSet < > ( ) ; for ( AggregatorFactory agg : config . getSchema ( ) . getDataSchema ( ) . getAggregators ( ) ) { metricsFields . addAll ( agg . requiredFields ( ) ) ; } List < Type > partialFields = new ArrayList < > ( ) ; for ( Type type : fullSchema . getFields ( ) ) { if ( tsField . equals ( type . getName ( ) ) || metricsFields . contains ( type . getName ( ) ) || dimensions . size ( ) > 0 && dimensions . contains ( type . getName ( ) ) || dimensions . size ( ) == 0 ) { partialFields . add ( type ) ; } } return new MessageType ( name , partialFields ) ; } | Select the columns from the parquet schema that are used in the schema of the ingestion job |
18,841 | private void fetchIfNeeded ( long remainingBytes ) { if ( ( fetchFutures . isEmpty ( ) || fetchFutures . peekLast ( ) . isDone ( ) ) && remainingBytes <= prefetchConfig . getPrefetchTriggerBytes ( ) ) { Future < Void > fetchFuture = fetchExecutor . submit ( ( ) -> { fetch ( ) ; return null ; } ) ; fetchFutures . add ( fetchFuture ) ; } } | Submit a fetch task if remainingBytes is smaller than prefetchTriggerBytes . |
18,842 | private boolean isRecordAlreadyRead ( final PartitionIdType recordPartition , final SequenceOffsetType recordSequenceNumber ) { final SequenceOffsetType lastReadOffset = lastReadOffsets . get ( recordPartition ) ; if ( lastReadOffset == null ) { return false ; } else { return createSequenceNumber ( recordSequenceNumber ) . compareTo ( createSequenceNumber ( lastReadOffset ) ) <= 0 ; } } | Returns true if the given record has already been read based on lastReadOffsets . |
18,843 | private boolean isMoreToReadBeforeReadingRecord ( final SequenceOffsetType recordSequenceNumber , final SequenceOffsetType endSequenceNumber ) { final int compareToEnd = createSequenceNumber ( recordSequenceNumber ) . compareTo ( createSequenceNumber ( endSequenceNumber ) ) ; return isEndOffsetExclusive ( ) ? compareToEnd < 0 : compareToEnd <= 0 ; } | Returns true if given that we want to start reading from recordSequenceNumber and end at endSequenceNumber there is more left to read . Used in pre - read checks to determine if there is anything left to read . |
18,844 | private boolean isMoreToReadAfterReadingRecord ( final SequenceOffsetType recordSequenceNumber , final SequenceOffsetType endSequenceNumber ) { final int compareNextToEnd = createSequenceNumber ( getNextStartOffset ( recordSequenceNumber ) ) . compareTo ( createSequenceNumber ( endSequenceNumber ) ) ; return compareNextToEnd < 0 ; } | Returns true if given that recordSequenceNumber has already been read and we want to end at endSequenceNumber there is more left to read . Used in post - read checks to determine if there is anything left to read . |
18,845 | private Access authorizationCheck ( final HttpServletRequest req , Action action ) { return IndexTaskUtils . datasourceAuthorizationCheck ( req , action , task . getDataSource ( ) , authorizerMapper ) ; } | Authorizes action to be performed on this task s datasource |
18,846 | @ Path ( "/pause" ) @ Produces ( MediaType . APPLICATION_JSON ) public Response pauseHTTP ( final HttpServletRequest req ) throws InterruptedException { authorizationCheck ( req , Action . WRITE ) ; return pause ( ) ; } | Signals the ingestion loop to pause . |
18,847 | public static Granularity mergeGranularities ( List < Granularity > toMerge ) { if ( toMerge == null || toMerge . size ( ) == 0 ) { return null ; } Granularity result = toMerge . get ( 0 ) ; for ( int i = 1 ; i < toMerge . size ( ) ; i ++ ) { if ( ! Objects . equals ( result , toMerge . get ( i ) ) ) { return null ; } } return result ; } | simple merge strategy on query granularity that checks if all are equal or else returns null . this can be improved in future but is good enough for most use - cases . |
18,848 | public final Interval bucket ( DateTime t ) { DateTime start = bucketStart ( t ) ; return new Interval ( start , increment ( start ) ) ; } | Return a granularity - sized Interval containing a particular DateTime . |
18,849 | final Integer [ ] getDateValues ( String filePath , Formatter formatter ) { Pattern pattern = defaultPathPattern ; switch ( formatter ) { case DEFAULT : case LOWER_DEFAULT : break ; case HIVE : pattern = hivePathPattern ; break ; default : throw new IAE ( "Format %s not supported" , formatter ) ; } Matcher matcher = pattern . matcher ( filePath ) ; Integer [ ] vals = new Integer [ 7 ] ; if ( matcher . matches ( ) ) { for ( int i = 1 ; i <= matcher . groupCount ( ) ; i ++ ) { vals [ i ] = ( matcher . group ( i ) != null ) ? Integer . parseInt ( matcher . group ( i ) ) : null ; } } return vals ; } | Used by the toDate implementations . |
18,850 | public static < T extends Comparable < T > > RangeSet < T > unionRanges ( final Iterable < Range < T > > ranges ) { RangeSet < T > rangeSet = null ; for ( Range < T > range : ranges ) { if ( rangeSet == null ) { rangeSet = TreeRangeSet . create ( ) ; } rangeSet . add ( range ) ; } return rangeSet ; } | Unions a set of ranges or returns null if the set is empty . |
18,851 | public static < T extends Comparable < T > > RangeSet < T > unionRangeSets ( final Iterable < RangeSet < T > > rangeSets ) { final RangeSet < T > rangeSet = TreeRangeSet . create ( ) ; for ( RangeSet < T > set : rangeSets ) { rangeSet . addAll ( set ) ; } return rangeSet ; } | Unions a set of rangeSets or returns null if the set is empty . |
18,852 | public static < T extends Comparable < T > > RangeSet < T > intersectRangeSets ( final Iterable < RangeSet < T > > rangeSets ) { RangeSet < T > rangeSet = null ; for ( final RangeSet < T > set : rangeSets ) { if ( rangeSet == null ) { rangeSet = TreeRangeSet . create ( ) ; rangeSet . addAll ( set ) ; } else { rangeSet . removeAll ( set . complement ( ) ) ; } } return rangeSet ; } | Intersects a set of rangeSets or returns null if the set is empty . |
18,853 | public String getHostAndPort ( ) { if ( enablePlaintextPort ) { if ( plaintextPort < 0 ) { return HostAndPort . fromString ( host ) . toString ( ) ; } else { return HostAndPort . fromParts ( host , plaintextPort ) . toString ( ) ; } } return null ; } | Returns host and port together as something that can be used as part of a URI . |
18,854 | @ GuardedBy ( "tasks" ) private void saveRunningTasks ( ) { final File restoreFile = getRestoreFile ( ) ; final List < String > theTasks = new ArrayList < > ( ) ; for ( ForkingTaskRunnerWorkItem forkingTaskRunnerWorkItem : tasks . values ( ) ) { theTasks . add ( forkingTaskRunnerWorkItem . getTaskId ( ) ) ; } try { Files . createParentDirs ( restoreFile ) ; jsonMapper . writeValue ( restoreFile , new TaskRestoreInfo ( theTasks ) ) ; } catch ( Exception e ) { log . warn ( e , "Failed to save tasks to restore file[%s]. Skipping this save." , restoreFile ) ; } } | occur while saving . |
18,855 | public GenericRecord parse ( ByteBuffer bytes ) { if ( bytes . remaining ( ) < 5 ) { throw new ParseException ( "record must have at least 5 bytes carrying version and schemaId" ) ; } byte version = bytes . get ( ) ; if ( version != V1 ) { throw new ParseException ( "found record of arbitrary version [%s]" , version ) ; } int schemaId = bytes . getInt ( ) ; Schema schemaObj = schemaObjs . get ( schemaId ) ; if ( schemaObj == null ) { throw new ParseException ( "Failed to find schema for id [%s]" , schemaId ) ; } DatumReader < GenericRecord > reader = new GenericDatumReader < > ( schemaObj ) ; try ( ByteBufferInputStream inputStream = new ByteBufferInputStream ( Collections . singletonList ( bytes ) ) ) { return reader . read ( null , DecoderFactory . get ( ) . binaryDecoder ( inputStream , null ) ) ; } catch ( EOFException eof ) { throw new ParseException ( eof , "Avro's unnecessary EOFException, detail: [%s]" , "https://issues.apache.org/jira/browse/AVRO-813" ) ; } catch ( Exception e ) { throw new ParseException ( e , "Fail to decode avro message with schemaId [%s]." , schemaId ) ; } } | remaining bytes would have avro data |
18,856 | public static long copyAndClose ( InputStream is , OutputStream os ) throws IOException { try { final long retval = ByteStreams . copy ( is , os ) ; os . flush ( ) ; return retval ; } finally { is . close ( ) ; os . close ( ) ; } } | Copy from is to os and close the streams regardless of the result . |
18,857 | private static String truncateErrorMsg ( String errorMsg ) { if ( errorMsg != null && errorMsg . length ( ) > MAX_ERROR_MSG_LENGTH ) { return errorMsg . substring ( 0 , MAX_ERROR_MSG_LENGTH ) + "..." ; } else { return errorMsg ; } } | The full error message will be available via a TaskReport . |
18,858 | private void selfCheckingMove ( String s3Bucket , String targetS3Bucket , String s3Path , String targetS3Path , String copyMsg ) throws IOException , SegmentLoadingException { if ( s3Bucket . equals ( targetS3Bucket ) && s3Path . equals ( targetS3Path ) ) { log . info ( "No need to move file[s3://%s/%s] onto itself" , s3Bucket , s3Path ) ; return ; } if ( s3Client . doesObjectExist ( s3Bucket , s3Path ) ) { final ListObjectsV2Result listResult = s3Client . listObjectsV2 ( new ListObjectsV2Request ( ) . withBucketName ( s3Bucket ) . withPrefix ( s3Path ) . withMaxKeys ( 1 ) ) ; if ( listResult . getObjectSummaries ( ) . size ( ) == 0 ) { throw new ISE ( "Unable to list object [s3://%s/%s]" , s3Bucket , s3Path ) ; } final S3ObjectSummary objectSummary = listResult . getObjectSummaries ( ) . get ( 0 ) ; if ( objectSummary . getStorageClass ( ) != null && StorageClass . fromValue ( StringUtils . toUpperCase ( objectSummary . getStorageClass ( ) ) ) . equals ( StorageClass . Glacier ) ) { throw new AmazonServiceException ( StringUtils . format ( "Cannot move file[s3://%s/%s] of storage class glacier, skipping." , s3Bucket , s3Path ) ) ; } else { log . info ( "Moving file %s" , copyMsg ) ; final CopyObjectRequest copyRequest = new CopyObjectRequest ( s3Bucket , s3Path , targetS3Bucket , targetS3Path ) ; if ( ! config . getDisableAcl ( ) ) { copyRequest . setAccessControlList ( S3Utils . grantFullControlToBucketOwner ( s3Client , targetS3Bucket ) ) ; } s3Client . copyObject ( copyRequest ) ; if ( ! s3Client . doesObjectExist ( targetS3Bucket , targetS3Path ) ) { throw new IOE ( "After copy was reported as successful the file doesn't exist in the target location [%s]" , copyMsg ) ; } deleteWithRetriesSilent ( s3Bucket , s3Path ) ; log . debug ( "Finished moving file %s" , copyMsg ) ; } } else { if ( s3Client . doesObjectExist ( targetS3Bucket , targetS3Path ) ) { log . info ( "Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]" , s3Bucket , s3Path , targetS3Bucket , targetS3Path ) ; } else { throw new SegmentLoadingException ( "Unable to move file %s, not present in either source or target location" , copyMsg ) ; } } } | Copies an object and after that checks that the object is present at the target location via a separate API call . If it is not an exception is thrown and the object is not deleted at the old location . This paranoic check is added after it was observed that S3 may report a successful move and the object is not found at the target location . |
18,859 | protected long download ( T object , File outFile ) throws IOException { openObjectFunction . open ( object , outFile ) ; return outFile . length ( ) ; } | Downloads the entire resultset object into a file . This avoids maintaining a persistent connection to the database . The retry is performed at the query execution layer . |
18,860 | public void setUri ( URI uri ) throws URISyntaxException , NoSuchAlgorithmException , KeyManagementException { super . setUri ( uri ) ; } | we are only overriding this to help Jackson not be confused about the two setURI methods |
18,861 | public static File [ ] getHadoopDependencyFilesToLoad ( List < String > hadoopDependencyCoordinates , ExtensionsConfig extensionsConfig ) { final File rootHadoopDependenciesDir = new File ( extensionsConfig . getHadoopDependenciesDir ( ) ) ; if ( rootHadoopDependenciesDir . exists ( ) && ! rootHadoopDependenciesDir . isDirectory ( ) ) { throw new ISE ( "Root Hadoop dependencies directory [%s] is not a directory!?" , rootHadoopDependenciesDir ) ; } final File [ ] hadoopDependenciesToLoad = new File [ hadoopDependencyCoordinates . size ( ) ] ; int i = 0 ; for ( final String coordinate : hadoopDependencyCoordinates ) { final DefaultArtifact artifact = new DefaultArtifact ( coordinate ) ; final File hadoopDependencyDir = new File ( rootHadoopDependenciesDir , artifact . getArtifactId ( ) ) ; final File versionDir = new File ( hadoopDependencyDir , artifact . getVersion ( ) ) ; if ( ! hadoopDependencyDir . isDirectory ( ) || ! versionDir . isDirectory ( ) ) { throw new ISE ( "Hadoop dependency [%s] didn't exist!?" , versionDir . getAbsolutePath ( ) ) ; } hadoopDependenciesToLoad [ i ++ ] = versionDir ; } return hadoopDependenciesToLoad ; } | Find all the hadoop dependencies that should be loaded by druid |
18,862 | private static String computeKeyHash ( String memcachedPrefix , NamedKey key ) { return memcachedPrefix + ":" + DigestUtils . sha1Hex ( key . namespace ) + ":" + DigestUtils . sha1Hex ( key . key ) ; } | length of separators |
18,863 | static String tryExtractMostProbableDataSource ( String segmentId ) { Matcher dateTimeMatcher = DateTimes . COMMON_DATE_TIME_PATTERN . matcher ( segmentId ) ; while ( true ) { if ( ! dateTimeMatcher . find ( ) ) { return null ; } int dataSourceEnd = dateTimeMatcher . start ( ) - 1 ; if ( segmentId . charAt ( dataSourceEnd ) != DELIMITER ) { continue ; } return segmentId . substring ( 0 , dataSourceEnd ) ; } } | Heuristically tries to extract the most probable data source from a String segment id representation or returns null on failure . |
18,864 | public static SegmentId dummy ( String dataSource , int partitionNum ) { return of ( dataSource , Intervals . ETERNITY , "dummy_version" , partitionNum ) ; } | Creates a dummy SegmentId with the given data source and partition number . This method is useful in benchmark and test code . |
18,865 | static List < Interval > filterSkipIntervals ( Interval totalInterval , List < Interval > skipIntervals ) { final List < Interval > filteredIntervals = new ArrayList < > ( skipIntervals . size ( ) + 1 ) ; DateTime remainingStart = totalInterval . getStart ( ) ; DateTime remainingEnd = totalInterval . getEnd ( ) ; for ( Interval skipInterval : skipIntervals ) { if ( skipInterval . getStart ( ) . isBefore ( remainingStart ) && skipInterval . getEnd ( ) . isAfter ( remainingStart ) ) { remainingStart = skipInterval . getEnd ( ) ; } else if ( skipInterval . getStart ( ) . isBefore ( remainingEnd ) && skipInterval . getEnd ( ) . isAfter ( remainingEnd ) ) { remainingEnd = skipInterval . getStart ( ) ; } else if ( ! remainingStart . isAfter ( skipInterval . getStart ( ) ) && ! remainingEnd . isBefore ( skipInterval . getEnd ( ) ) ) { filteredIntervals . add ( new Interval ( remainingStart , skipInterval . getStart ( ) ) ) ; remainingStart = skipInterval . getEnd ( ) ; } else { log . warn ( "skipInterval[%s] is not contained in remainingInterval[%s]" , skipInterval , new Interval ( remainingStart , remainingEnd ) ) ; } } if ( ! remainingStart . equals ( remainingEnd ) ) { filteredIntervals . add ( new Interval ( remainingStart , remainingEnd ) ) ; } return filteredIntervals ; } | Returns a list of intervals which are contained by totalInterval but don t ovarlap with skipIntervals . |
18,866 | private static String mergePaths ( String path1 , String path2 ) { return path1 + ( path1 . endsWith ( Path . SEPARATOR ) ? "" : Path . SEPARATOR ) + path2 ; } | some hadoop version Path . mergePaths does not exist |
18,867 | private static List < Object > convertRepeatedFieldToList ( Group g , int fieldIndex , boolean binaryAsString ) { Type t = g . getType ( ) . getFields ( ) . get ( fieldIndex ) ; assert t . getRepetition ( ) . equals ( Type . Repetition . REPEATED ) ; int repeated = g . getFieldRepetitionCount ( fieldIndex ) ; List < Object > vals = new ArrayList < > ( ) ; for ( int i = 0 ; i < repeated ; i ++ ) { if ( t . isPrimitive ( ) ) { vals . add ( convertPrimitiveField ( g , fieldIndex , i , binaryAsString ) ) ; } else { vals . add ( g . getGroup ( fieldIndex , i ) ) ; } } return vals ; } | convert a repeated field into a list of primitives or groups |
18,868 | private static boolean isLogicalListType ( Type listType ) { return ! listType . isPrimitive ( ) && listType . getOriginalType ( ) != null && listType . getOriginalType ( ) . equals ( OriginalType . LIST ) && listType . asGroupType ( ) . getFieldCount ( ) == 1 && listType . asGroupType ( ) . getFields ( ) . get ( 0 ) . isRepetition ( Type . Repetition . REPEATED ) ; } | check if a parquet type is a valid list type |
18,869 | private static boolean isLogicalMapType ( Type groupType ) { OriginalType ot = groupType . getOriginalType ( ) ; if ( groupType . isPrimitive ( ) || ot == null || groupType . isRepetition ( Type . Repetition . REPEATED ) ) { return false ; } if ( groupType . getOriginalType ( ) . equals ( OriginalType . MAP ) || groupType . getOriginalType ( ) . equals ( OriginalType . MAP_KEY_VALUE ) ) { GroupType myMapType = groupType . asGroupType ( ) ; if ( myMapType . getFieldCount ( ) != 1 || myMapType . getFields ( ) . get ( 0 ) . isPrimitive ( ) ) { return false ; } GroupType mapItemType = myMapType . getFields ( ) . get ( 0 ) . asGroupType ( ) ; return mapItemType . isRepetition ( Type . Repetition . REPEATED ) && mapItemType . getFieldCount ( ) == 2 && mapItemType . getFields ( ) . get ( 0 ) . getName ( ) . equalsIgnoreCase ( "key" ) && mapItemType . getFields ( ) . get ( 0 ) . isPrimitive ( ) && mapItemType . getFields ( ) . get ( 1 ) . getName ( ) . equalsIgnoreCase ( "value" ) ; } return false ; } | check if a parquet type is a valid map type |
18,870 | @ SuppressWarnings ( "unchecked" ) public Object finalizeComputation ( Object object ) { return af . finalizeComputation ( ( T ) object ) ; } | Not implemented . Throws UnsupportedOperationException . |
18,871 | private static Pair < List < DimensionSpec > , List < DimensionSpec > > partitionDimensionList ( StorageAdapter adapter , List < DimensionSpec > dimensions ) { final List < DimensionSpec > bitmapDims = new ArrayList < > ( ) ; final List < DimensionSpec > nonBitmapDims = new ArrayList < > ( ) ; final List < DimensionSpec > dimsToSearch = getDimsToSearch ( adapter . getAvailableDimensions ( ) , dimensions ) ; for ( DimensionSpec spec : dimsToSearch ) { ColumnCapabilities capabilities = adapter . getColumnCapabilities ( spec . getDimension ( ) ) ; if ( capabilities == null ) { continue ; } if ( capabilities . hasBitmapIndexes ( ) ) { bitmapDims . add ( spec ) ; } else { nonBitmapDims . add ( spec ) ; } } return new Pair < > ( bitmapDims , nonBitmapDims ) ; } | Split the given dimensions list into bitmap - supporting dimensions and non - bitmap supporting ones . Note that the returned lists are free to modify . |
18,872 | public static < T > ConditionalMultibind < T > create ( Properties properties , Binder binder , Class < T > type , Class < ? extends Annotation > annotationType ) { return new ConditionalMultibind < T > ( properties , Multibinder . newSetBinder ( binder , type , annotationType ) ) ; } | Create a ConditionalMultibind that resolves items to be added to the set at binding time . |
18,873 | private boolean hasEnoughLag ( Interval target , Interval maxInterval ) { return minDataLagMs <= ( maxInterval . getStartMillis ( ) - target . getStartMillis ( ) ) ; } | check whether the start millis of target interval is more than minDataLagMs lagging behind maxInterval s minDataLag is required to prevent repeatedly building data because of delay data . |
18,874 | public HistogramVisual asVisual ( ) { float [ ] visualCounts = new float [ bins . length - 2 ] ; for ( int i = 0 ; i < visualCounts . length ; ++ i ) { visualCounts [ i ] = ( float ) bins [ i + 1 ] ; } return new HistogramVisual ( breaks , visualCounts , new float [ ] { min , max } ) ; } | Returns a visual representation of a histogram object . Initially returns an array of just the min . and max . values but can also support the addition of quantiles . |
18,875 | @ SuppressWarnings ( "unchecked" ) public < T > Sequence < T > runSimple ( final Query < T > query , final AuthenticationResult authenticationResult , final String remoteAddress ) { initialize ( query ) ; final Sequence < T > results ; try { final Access access = authorize ( authenticationResult ) ; if ( ! access . isAllowed ( ) ) { throw new ISE ( "Unauthorized" ) ; } final QueryLifecycle . QueryResponse queryResponse = execute ( ) ; results = queryResponse . getResults ( ) ; } catch ( Throwable e ) { emitLogsAndMetrics ( e , remoteAddress , - 1 ) ; throw e ; } return Sequences . wrap ( results , new SequenceWrapper ( ) { public void after ( final boolean isDone , final Throwable thrown ) { emitLogsAndMetrics ( thrown , remoteAddress , - 1 ) ; } } ) ; } | For callers where simplicity is desired over flexibility . This method does it all in one call . If the request is unauthorized an IllegalStateException will be thrown . Logs and metrics are emitted when the Sequence is either fully iterated or throws an exception . |
18,876 | @ SuppressWarnings ( "unchecked" ) public void initialize ( final Query baseQuery ) { transition ( State . NEW , State . INITIALIZED ) ; String queryId = baseQuery . getId ( ) ; if ( Strings . isNullOrEmpty ( queryId ) ) { queryId = UUID . randomUUID ( ) . toString ( ) ; } this . baseQuery = baseQuery . withId ( queryId ) ; this . toolChest = warehouse . getToolChest ( baseQuery ) ; } | Initializes this object to execute a specific query . Does not actually execute the query . |
18,877 | public synchronized void addChangeRequests ( List < T > requests ) { for ( T request : requests ) { changes . add ( new Holder < > ( request , getLastCounter ( ) . inc ( ) ) ) ; } singleThreadedExecutor . execute ( resolveWaitingFuturesRunnable ) ; } | Add batch of segment changes update . |
18,878 | public synchronized ListenableFuture < ChangeRequestsSnapshot < T > > getRequestsSince ( final Counter counter ) { final CustomSettableFuture < T > future = new CustomSettableFuture < > ( waitingFutures ) ; if ( counter . counter < 0 ) { future . setException ( new IAE ( "counter[%s] must be >= 0" , counter ) ) ; return future ; } Counter lastCounter = getLastCounter ( ) ; if ( counter . counter == lastCounter . counter ) { if ( ! counter . matches ( lastCounter ) ) { ChangeRequestsSnapshot < T > reset = ChangeRequestsSnapshot . fail ( StringUtils . format ( "counter[%s] failed to match with [%s]" , counter , lastCounter ) ) ; future . set ( reset ) ; } else { synchronized ( waitingFutures ) { waitingFutures . put ( future , counter ) ; } } } else { try { future . set ( getRequestsSinceWithoutWait ( counter ) ) ; } catch ( Exception ex ) { future . setException ( ex ) ; } } return future ; } | Returns a Future that on completion returns list of segment updates and associated counter . If there are no update since given counter then Future completion waits till an updates is provided . |
18,879 | public static void trySkipCache ( int fd , long offset , long len ) { if ( ! initialized || ! fadvisePossible || fd < 0 ) { return ; } try { posix_fadvise ( fd , offset , len , POSIX_FADV_DONTNEED ) ; } catch ( UnsupportedOperationException uoe ) { log . warn ( uoe , "posix_fadvise is not supported" ) ; fadvisePossible = false ; } catch ( UnsatisfiedLinkError ule ) { log . warn ( ule , "Unsatisfied Link error: posix_fadvise failed on file descriptor [%d], offset [%d]" , fd , offset ) ; fadvisePossible = false ; } catch ( Exception e ) { log . warn ( e , "Unknown exception: posix_fadvise failed on file descriptor [%d], offset [%d]" , fd , offset ) ; } } | Remove pages from the file system page cache when they wont be accessed again |
18,880 | private static void trySyncFileRange ( int fd , long offset , long nbytes , int flags ) { if ( ! initialized || ! syncFileRangePossible || fd < 0 ) { return ; } try { int ret_code = sync_file_range ( fd , offset , nbytes , flags ) ; if ( ret_code != 0 ) { log . warn ( "failed on syncing fd [%d], offset [%d], bytes [%d], ret_code [%d], errno [%d]" , fd , offset , nbytes , ret_code , Native . getLastError ( ) ) ; return ; } } catch ( UnsupportedOperationException uoe ) { log . warn ( uoe , "sync_file_range is not supported" ) ; syncFileRangePossible = false ; } catch ( UnsatisfiedLinkError nle ) { log . warn ( nle , "sync_file_range failed on fd [%d], offset [%d], bytes [%d]" , fd , offset , nbytes ) ; syncFileRangePossible = false ; } catch ( Exception e ) { log . warn ( e , "Unknown exception: sync_file_range failed on fd [%d], offset [%d], bytes [%d]" , fd , offset , nbytes ) ; syncFileRangePossible = false ; } } | Sync part of an open file to the file system . |
18,881 | public void add ( double value ) { readWriteLock . writeLock ( ) . lock ( ) ; try { if ( value < lowerLimit ) { outlierHandler . handleOutlierAdd ( false ) ; return ; } else if ( value >= upperLimit ) { outlierHandler . handleOutlierAdd ( true ) ; return ; } count += 1 ; if ( value > max ) { max = value ; } if ( value < min ) { min = value ; } double valueRelativeToRange = value - lowerLimit ; int targetBucket = ( int ) ( valueRelativeToRange / bucketSize ) ; if ( targetBucket >= histogram . length ) { targetBucket = histogram . length - 1 ; } histogram [ targetBucket ] += 1 ; } finally { readWriteLock . writeLock ( ) . unlock ( ) ; } } | Add a value to the histogram using the outlierHandler to account for outliers . |
18,882 | public void combineHistogram ( FixedBucketsHistogram otherHistogram ) { if ( otherHistogram == null ) { return ; } readWriteLock . writeLock ( ) . lock ( ) ; otherHistogram . getReadWriteLock ( ) . readLock ( ) . lock ( ) ; try { missingValueCount += otherHistogram . getMissingValueCount ( ) ; if ( bucketSize == otherHistogram . getBucketSize ( ) && lowerLimit == otherHistogram . getLowerLimit ( ) && upperLimit == otherHistogram . getUpperLimit ( ) ) { combineHistogramSameBuckets ( otherHistogram ) ; } else { combineHistogramDifferentBuckets ( otherHistogram ) ; } } finally { readWriteLock . writeLock ( ) . unlock ( ) ; otherHistogram . getReadWriteLock ( ) . readLock ( ) . unlock ( ) ; } } | Merge another histogram into this one . Only the state of this histogram is updated . |
18,883 | private void combineHistogramSameBuckets ( FixedBucketsHistogram otherHistogram ) { long [ ] otherHistogramArray = otherHistogram . getHistogram ( ) ; for ( int i = 0 ; i < numBuckets ; i ++ ) { histogram [ i ] += otherHistogramArray [ i ] ; } count += otherHistogram . getCount ( ) ; max = Math . max ( max , otherHistogram . getMax ( ) ) ; min = Math . min ( min , otherHistogram . getMin ( ) ) ; outlierHandler . handleOutliersForCombineSameBuckets ( otherHistogram ) ; } | Merge another histogram that has the same range and same buckets . |
18,884 | private void combineHistogramDifferentBuckets ( FixedBucketsHistogram otherHistogram ) { if ( otherHistogram . getLowerLimit ( ) >= upperLimit ) { outlierHandler . handleOutliersCombineDifferentBucketsAllUpper ( otherHistogram ) ; } else if ( otherHistogram . getUpperLimit ( ) <= lowerLimit ) { outlierHandler . handleOutliersCombineDifferentBucketsAllLower ( otherHistogram ) ; } else { simpleInterpolateMerge ( otherHistogram ) ; } } | Merge another histogram that has different buckets from mine . |
18,885 | private double getCumulativeCount ( double cutoff , boolean fromStart ) { int cutoffBucket = ( int ) ( ( cutoff - lowerLimit ) / bucketSize ) ; double count = 0 ; if ( fromStart ) { for ( int i = 0 ; i <= cutoffBucket ; i ++ ) { if ( i == cutoffBucket ) { double bucketStart = i * bucketSize + lowerLimit ; double partialCount = ( ( cutoff - bucketStart ) / bucketSize ) * histogram [ i ] ; count += partialCount ; } else { count += histogram [ i ] ; } } } else { for ( int i = cutoffBucket ; i < histogram . length ; i ++ ) { if ( i == cutoffBucket ) { double bucketEnd = ( ( i + 1 ) * bucketSize ) + lowerLimit ; double partialCount = ( ( bucketEnd - cutoff ) / bucketSize ) * histogram [ i ] ; count += partialCount ; } else { count += histogram [ i ] ; } } } return count ; } | Get a sum of bucket counts from either the start of a histogram s range or end up to a specified cutoff value . |
18,886 | private void writeByteBufferCommonFields ( ByteBuffer buf ) { buf . putDouble ( lowerLimit ) ; buf . putDouble ( upperLimit ) ; buf . putInt ( numBuckets ) ; buf . put ( ( byte ) outlierHandlingMode . ordinal ( ) ) ; buf . putLong ( count ) ; buf . putLong ( lowerOutlierCount ) ; buf . putLong ( upperOutlierCount ) ; buf . putLong ( missingValueCount ) ; buf . putDouble ( max ) ; buf . putDouble ( min ) ; } | Serializes histogram fields that are common to both the full and sparse encoding modes . |
18,887 | public byte [ ] toBytesFull ( boolean withHeader ) { int size = getFullStorageSize ( numBuckets ) ; if ( withHeader ) { size += SERDE_HEADER_SIZE ; } ByteBuffer buf = ByteBuffer . allocate ( size ) ; writeByteBufferFull ( buf , withHeader ) ; return buf . array ( ) ; } | Serialize the histogram in full encoding mode . |
18,888 | private void writeByteBufferFull ( ByteBuffer buf , boolean withHeader ) { if ( withHeader ) { writeByteBufferSerdeHeader ( buf , FULL_ENCODING_MODE ) ; } writeByteBufferCommonFields ( buf ) ; buf . asLongBuffer ( ) . put ( histogram ) ; buf . position ( buf . position ( ) + Long . BYTES * histogram . length ) ; } | Helper method for toBytesFull |
18,889 | public byte [ ] toBytesSparse ( int nonEmptyBuckets ) { int size = SERDE_HEADER_SIZE + getSparseStorageSize ( nonEmptyBuckets ) ; ByteBuffer buf = ByteBuffer . allocate ( size ) ; writeByteBufferSparse ( buf , nonEmptyBuckets ) ; return buf . array ( ) ; } | Serialize the histogram in sparse encoding mode . |
18,890 | public void writeByteBufferSparse ( ByteBuffer buf , int nonEmptyBuckets ) { writeByteBufferSerdeHeader ( buf , SPARSE_ENCODING_MODE ) ; writeByteBufferCommonFields ( buf ) ; buf . putInt ( nonEmptyBuckets ) ; int bucketsWritten = 0 ; for ( int i = 0 ; i < numBuckets ; i ++ ) { if ( histogram [ i ] > 0 ) { buf . putInt ( i ) ; buf . putLong ( histogram [ i ] ) ; bucketsWritten += 1 ; } if ( bucketsWritten == nonEmptyBuckets ) { break ; } } } | Helper method for toBytesSparse |
18,891 | public static FixedBucketsHistogram fromBytes ( byte [ ] bytes ) { ByteBuffer buf = ByteBuffer . wrap ( bytes ) ; return fromByteBuffer ( buf ) ; } | General deserialization method for FixedBucketsHistogram . |
18,892 | public static FixedBucketsHistogram fromByteBuffer ( ByteBuffer buf ) { byte serializationVersion = buf . get ( ) ; Preconditions . checkArgument ( serializationVersion == SERIALIZATION_VERSION , StringUtils . format ( "Only serialization version %s is supported." , SERIALIZATION_VERSION ) ) ; byte mode = buf . get ( ) ; if ( mode == FULL_ENCODING_MODE ) { return fromByteBufferFullNoSerdeHeader ( buf ) ; } else if ( mode == SPARSE_ENCODING_MODE ) { return fromBytesSparse ( buf ) ; } else { throw new ISE ( "Invalid histogram serde mode: %s" , mode ) ; } } | Deserialization helper method |
18,893 | protected static FixedBucketsHistogram fromByteBufferFullNoSerdeHeader ( ByteBuffer buf ) { double lowerLimit = buf . getDouble ( ) ; double upperLimit = buf . getDouble ( ) ; int numBuckets = buf . getInt ( ) ; OutlierHandlingMode outlierHandlingMode = OutlierHandlingMode . values ( ) [ buf . get ( ) ] ; long count = buf . getLong ( ) ; long lowerOutlierCount = buf . getLong ( ) ; long upperOutlierCount = buf . getLong ( ) ; long missingValueCount = buf . getLong ( ) ; double max = buf . getDouble ( ) ; double min = buf . getDouble ( ) ; long histogram [ ] = new long [ numBuckets ] ; buf . asLongBuffer ( ) . get ( histogram ) ; buf . position ( buf . position ( ) + Long . BYTES * histogram . length ) ; return new FixedBucketsHistogram ( lowerLimit , upperLimit , numBuckets , outlierHandlingMode , histogram , count , max , min , lowerOutlierCount , upperOutlierCount , missingValueCount ) ; } | Helper method for deserializing histograms with full encoding mode . Assumes that the serialization header is not present or has already been read . |
18,894 | private static FixedBucketsHistogram fromBytesSparse ( ByteBuffer buf ) { double lowerLimit = buf . getDouble ( ) ; double upperLimit = buf . getDouble ( ) ; int numBuckets = buf . getInt ( ) ; OutlierHandlingMode outlierHandlingMode = OutlierHandlingMode . values ( ) [ buf . get ( ) ] ; long count = buf . getLong ( ) ; long lowerOutlierCount = buf . getLong ( ) ; long upperOutlierCount = buf . getLong ( ) ; long missingValueCount = buf . getLong ( ) ; double max = buf . getDouble ( ) ; double min = buf . getDouble ( ) ; int nonEmptyBuckets = buf . getInt ( ) ; long histogram [ ] = new long [ numBuckets ] ; for ( int i = 0 ; i < nonEmptyBuckets ; i ++ ) { int bucket = buf . getInt ( ) ; long bucketCount = buf . getLong ( ) ; histogram [ bucket ] = bucketCount ; } return new FixedBucketsHistogram ( lowerLimit , upperLimit , numBuckets , outlierHandlingMode , histogram , count , max , min , lowerOutlierCount , upperOutlierCount , missingValueCount ) ; } | Helper method for deserializing histograms with sparse encoding mode . Assumes that the serialization header is not present or has already been read . |
18,895 | public static int getSparseStorageSize ( int nonEmptyBuckets ) { return COMMON_FIELDS_SIZE + Integer . BYTES + ( Integer . BYTES + Long . BYTES ) * nonEmptyBuckets ; } | Compute the size in bytes of a sparse - encoding serialized histogram without the serialization header |
18,896 | public Map < String , Long > getDataSourceSizes ( ) { return dataSources . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( Entry :: getKey , entry -> entry . getValue ( ) . getTotalSegmentSize ( ) ) ) ; } | Returns a map of dataSource to the total byte size of segments managed by this segmentManager . This method should be used carefully because the returned map might be different from the actual data source states . |
18,897 | public Map < String , Long > getDataSourceCounts ( ) { return dataSources . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( Entry :: getKey , entry -> entry . getValue ( ) . getNumSegments ( ) ) ) ; } | Returns a map of dataSource to the number of segments managed by this segmentManager . This method should be carefully because the returned map might be different from the actual data source states . |
18,898 | public boolean loadSegment ( final DataSegment segment ) throws SegmentLoadingException { final Segment adapter = getAdapter ( segment ) ; final SettableSupplier < Boolean > resultSupplier = new SettableSupplier < > ( ) ; dataSources . compute ( segment . getDataSource ( ) , ( k , v ) -> { final DataSourceState dataSourceState = v == null ? new DataSourceState ( ) : v ; final VersionedIntervalTimeline < String , ReferenceCountingSegment > loadedIntervals = dataSourceState . getTimeline ( ) ; final PartitionHolder < ReferenceCountingSegment > entry = loadedIntervals . findEntry ( segment . getInterval ( ) , segment . getVersion ( ) ) ; if ( ( entry != null ) && ( entry . getChunk ( segment . getShardSpec ( ) . getPartitionNum ( ) ) != null ) ) { log . warn ( "Told to load an adapter for segment[%s] that already exists" , segment . getId ( ) ) ; resultSupplier . set ( false ) ; } else { loadedIntervals . add ( segment . getInterval ( ) , segment . getVersion ( ) , segment . getShardSpec ( ) . createChunk ( new ReferenceCountingSegment ( adapter ) ) ) ; dataSourceState . addSegment ( segment ) ; resultSupplier . set ( true ) ; } return dataSourceState ; } ) ; return resultSupplier . get ( ) ; } | Load a single segment . |
18,899 | private void loadSegment ( DataSegment segment , DataSegmentChangeCallback callback ) throws SegmentLoadingException { final boolean loaded ; try { loaded = segmentManager . loadSegment ( segment ) ; } catch ( Exception e ) { removeSegment ( segment , callback , false ) ; throw new SegmentLoadingException ( e , "Exception loading segment[%s]" , segment . getId ( ) ) ; } if ( loaded ) { File segmentInfoCacheFile = new File ( config . getInfoDir ( ) , segment . getId ( ) . toString ( ) ) ; if ( ! segmentInfoCacheFile . exists ( ) ) { try { jsonMapper . writeValue ( segmentInfoCacheFile , segment ) ; } catch ( IOException e ) { removeSegment ( segment , callback , false ) ; throw new SegmentLoadingException ( e , "Failed to write to disk segment info cache file[%s]" , segmentInfoCacheFile ) ; } } } } | Load a single segment . If the segment is loaded successfully this function simply returns . Otherwise it will throw a SegmentLoadingException |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.