idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
18,700
public void channelInactive ( ChannelHandlerContext ctx ) { Status status = Status . UNAVAILABLE . withDescription ( "Connection closed while performing protocol negotiation for " + ctx . pipeline ( ) . names ( ) ) ; failWrites ( status . asRuntimeException ( ) ) ; }
If this channel becomes inactive then notify all buffered writes that we failed .
18,701
public void close ( ChannelHandlerContext ctx , ChannelPromise future ) throws Exception { Status status = Status . UNAVAILABLE . withDescription ( "Connection closing while performing protocol negotiation for " + ctx . pipeline ( ) . names ( ) ) ; failWrites ( status . asRuntimeException ( ) ) ; super . close ( ctx , future ) ; }
If we are still performing protocol negotiation then this will propagate failures to all buffered writes .
18,702
@ SuppressWarnings ( "FutureReturnValueIgnored" ) private void failWrites ( Throwable cause ) { if ( failCause == null ) { failCause = cause ; } else { logger . log ( Level . FINE , "Ignoring duplicate failure" , cause ) ; } while ( ! bufferedWrites . isEmpty ( ) ) { ChannelWrite write = bufferedWrites . poll ( ) ; write . promise . setFailure ( cause ) ; ReferenceCountUtil . release ( write . msg ) ; } }
Propagate failures to all buffered writes .
18,703
private ServerReflectionIndex updateIndexIfNecessary ( ) { synchronized ( lock ) { if ( serverReflectionIndex == null ) { serverReflectionIndex = new ServerReflectionIndex ( server . getImmutableServices ( ) , server . getMutableServices ( ) ) ; return serverReflectionIndex ; } Set < FileDescriptor > serverFileDescriptors = new HashSet < > ( ) ; Set < String > serverServiceNames = new HashSet < > ( ) ; List < ServerServiceDefinition > serverMutableServices = server . getMutableServices ( ) ; for ( ServerServiceDefinition mutableService : serverMutableServices ) { io . grpc . ServiceDescriptor serviceDescriptor = mutableService . getServiceDescriptor ( ) ; if ( serviceDescriptor . getSchemaDescriptor ( ) instanceof ProtoFileDescriptorSupplier ) { String serviceName = serviceDescriptor . getName ( ) ; FileDescriptor fileDescriptor = ( ( ProtoFileDescriptorSupplier ) serviceDescriptor . getSchemaDescriptor ( ) ) . getFileDescriptor ( ) ; serverFileDescriptors . add ( fileDescriptor ) ; serverServiceNames . add ( serviceName ) ; } } FileDescriptorIndex mutableServicesIndex = serverReflectionIndex . getMutableServicesIndex ( ) ; if ( ! mutableServicesIndex . getServiceFileDescriptors ( ) . equals ( serverFileDescriptors ) || ! mutableServicesIndex . getServiceNames ( ) . equals ( serverServiceNames ) ) { serverReflectionIndex = new ServerReflectionIndex ( server . getImmutableServices ( ) , serverMutableServices ) ; } return serverReflectionIndex ; } }
Checks for updates to the server s mutable services and updates the index if any changes are detected . A change is any addition or removal in the set of file descriptors attached to the mutable services or a change in the service names .
18,704
public void addListener ( final CancellationListener cancellationListener , final Executor executor ) { checkNotNull ( cancellationListener , "cancellationListener" ) ; checkNotNull ( executor , "executor" ) ; if ( canBeCancelled ( ) ) { ExecutableListener executableListener = new ExecutableListener ( executor , cancellationListener ) ; synchronized ( this ) { if ( isCancelled ( ) ) { executableListener . deliver ( ) ; } else { if ( listeners == null ) { listeners = new ArrayList < > ( ) ; listeners . add ( executableListener ) ; if ( cancellableAncestor != null ) { cancellableAncestor . addListener ( parentListener , DirectExecutor . INSTANCE ) ; } } else { listeners . add ( executableListener ) ; } } } } }
Add a listener that will be notified when the context becomes cancelled .
18,705
void notifyAndClearListeners ( ) { if ( ! canBeCancelled ( ) ) { return ; } ArrayList < ExecutableListener > tmpListeners ; synchronized ( this ) { if ( listeners == null ) { return ; } tmpListeners = listeners ; listeners = null ; } for ( int i = 0 ; i < tmpListeners . size ( ) ; i ++ ) { if ( ! ( tmpListeners . get ( i ) . listener instanceof ParentListener ) ) { tmpListeners . get ( i ) . deliver ( ) ; } } for ( int i = 0 ; i < tmpListeners . size ( ) ; i ++ ) { if ( tmpListeners . get ( i ) . listener instanceof ParentListener ) { tmpListeners . get ( i ) . deliver ( ) ; } } if ( cancellableAncestor != null ) { cancellableAncestor . removeListener ( parentListener ) ; } }
Notify all listeners that this context has been cancelled and immediately release any reference to them so that they may be garbage collected .
18,706
private static void validateGeneration ( int generation ) { if ( generation == CONTEXT_DEPTH_WARN_THRESH ) { log . log ( Level . SEVERE , "Context ancestry chain length is abnormally long. " + "This suggests an error in application code. " + "Length exceeded: " + CONTEXT_DEPTH_WARN_THRESH , new Exception ( ) ) ; } }
If the ancestry chain length is unreasonably long then print an error to the log and record the stack trace .
18,707
public static SSLSocket upgrade ( SSLSocketFactory sslSocketFactory , HostnameVerifier hostnameVerifier , Socket socket , String host , int port , ConnectionSpec spec ) throws IOException { Preconditions . checkNotNull ( sslSocketFactory , "sslSocketFactory" ) ; Preconditions . checkNotNull ( socket , "socket" ) ; Preconditions . checkNotNull ( spec , "spec" ) ; SSLSocket sslSocket = ( SSLSocket ) sslSocketFactory . createSocket ( socket , host , port , true ) ; spec . apply ( sslSocket , false ) ; String negotiatedProtocol = OkHttpProtocolNegotiator . get ( ) . negotiate ( sslSocket , host , spec . supportsTlsExtensions ( ) ? TLS_PROTOCOLS : null ) ; Preconditions . checkState ( TLS_PROTOCOLS . contains ( Protocol . get ( negotiatedProtocol ) ) , "Only " + TLS_PROTOCOLS + " are supported, but negotiated protocol is %s" , negotiatedProtocol ) ; if ( hostnameVerifier == null ) { hostnameVerifier = OkHostnameVerifier . INSTANCE ; } if ( ! hostnameVerifier . verify ( canonicalizeHost ( host ) , sslSocket . getSession ( ) ) ) { throw new SSLPeerUnverifiedException ( "Cannot verify hostname: " + host ) ; } return sslSocket ; }
Upgrades given Socket to be a SSLSocket .
18,708
static String canonicalizeHost ( String host ) { if ( host . startsWith ( "[" ) && host . endsWith ( "]" ) ) { return host . substring ( 1 , host . length ( ) - 1 ) ; } return host ; }
Converts a host from URI to X509 format .
18,709
public static String rpad ( String base , Integer len , String pad ) { if ( len < 0 ) { return null ; } else if ( len == 0 ) { return "" ; } char [ ] data = new char [ len ] ; int pos = 0 ; for ( ; pos < base . length ( ) && pos < len ; pos ++ ) { data [ pos ] = base . charAt ( pos ) ; } for ( ; pos < len ; pos += pad . length ( ) ) { for ( int i = 0 ; i < pad . length ( ) && i < len - pos ; i ++ ) { data [ pos + i ] = pad . charAt ( i ) ; } } return new String ( data ) ; }
Returns the string right - padded with the string pad to a length of len characters . If str is longer than len the return value is shortened to len characters .
18,710
public PopulateResult populate ( final ByteSource source , final Map < K , V > map ) throws IOException { return source . asCharSource ( StandardCharsets . UTF_8 ) . readLines ( new LineProcessor < PopulateResult > ( ) { private int lines = 0 ; private int entries = 0 ; public boolean processLine ( String line ) { if ( lines == Integer . MAX_VALUE ) { throw new ISE ( "Cannot read more than %,d lines" , Integer . MAX_VALUE ) ; } final Map < K , V > kvMap = parser . parseToMap ( line ) ; map . putAll ( kvMap ) ; lines ++ ; entries += kvMap . size ( ) ; return true ; } public PopulateResult getResult ( ) { return new PopulateResult ( lines , entries ) ; } } ) ; }
Read through the source line by line and populate map with the data returned from the parser
18,711
public static void closeBoth ( Closeable first , Closeable second ) throws IOException { try ( Closeable ignore1 = second ; Closeable ignore2 = first ) { } }
Call method instead of code like
18,712
protected long computeOffset ( final long t , final DateTimeZone tz ) { long start = dataInterval . getEndMillis ( ) - periodMillis ; long startOffset = start % periodMillis - originMillis % periodMillis ; if ( startOffset < 0 ) { startOffset += periodMillis ; } start -= startOffset ; long tOffset = t % periodMillis - originMillis % periodMillis ; if ( tOffset < 0 ) { tOffset += periodMillis ; } tOffset += start ; return tOffset - t - ( tz . getOffset ( tOffset ) - tz . getOffset ( t ) ) ; }
Map time t into the last period ending within dataInterval
18,713
public Iterable < DataSegment > iterateAllSegments ( ) { return ( ) -> dataSources . values ( ) . stream ( ) . flatMap ( dataSource -> dataSource . getSegments ( ) . stream ( ) ) . iterator ( ) ; }
Returns an iterable to go over all segments in all data sources stored on this DruidServer . The order in which segments are iterated is unspecified .
18,714
public static Map < String , String > jsonize ( String msgFormat , Object ... args ) { return ImmutableMap . of ( "error" , StringUtils . nonStrictFormat ( msgFormat , args ) ) ; }
Converts String errorMsg into a Map so that it produces valid json on serialization into response .
18,715
private static byte [ ] toByteArray ( final ByteBuffer buffer ) { if ( buffer . hasArray ( ) && buffer . arrayOffset ( ) == 0 && buffer . position ( ) == 0 && buffer . array ( ) . length == buffer . limit ( ) ) { return buffer . array ( ) ; } else { final byte [ ] retVal = new byte [ buffer . remaining ( ) ] ; buffer . duplicate ( ) . get ( retVal ) ; return retVal ; } }
Returns an array with the content between the position and limit of buffer . This may be the buffer s backing array itself . Does not modify position or limit of the buffer .
18,716
protected double computeCost ( final DataSegment proposalSegment , final ServerHolder server , final boolean includeCurrentServer ) { double cost = super . computeCost ( proposalSegment , server , includeCurrentServer ) ; if ( cost == Double . POSITIVE_INFINITY ) { return cost ; } int nSegments = 1 ; if ( server . getServer ( ) . getLazyAllSegments ( ) . size ( ) > 0 ) { nSegments = server . getServer ( ) . getLazyAllSegments ( ) . size ( ) ; } double normalizedCost = cost / nSegments ; double usageRatio = ( double ) server . getSizeUsed ( ) / ( double ) server . getServer ( ) . getMaxSize ( ) ; return normalizedCost * usageRatio ; }
Averages the cost obtained from CostBalancerStrategy . Also the costs are weighted according to their usage ratios . This ensures that all the hosts will have the same % disk utilization .
18,717
public static String kerberosChallenge ( String server ) throws AuthenticationException { kerberosLock . lock ( ) ; try { Oid mechOid = KerberosUtil . getOidInstance ( "GSS_KRB5_MECH_OID" ) ; GSSManager manager = GSSManager . getInstance ( ) ; GSSName serverName = manager . createName ( "HTTP@" + server , GSSName . NT_HOSTBASED_SERVICE ) ; GSSContext gssContext = manager . createContext ( serverName . canonicalize ( mechOid ) , mechOid , null , GSSContext . DEFAULT_LIFETIME ) ; gssContext . requestMutualAuth ( true ) ; gssContext . requestCredDeleg ( true ) ; byte [ ] inToken = new byte [ 0 ] ; byte [ ] outToken = gssContext . initSecContext ( inToken , 0 , inToken . length ) ; gssContext . dispose ( ) ; return new String ( StringUtils . encodeBase64 ( outToken ) , StandardCharsets . US_ASCII ) ; } catch ( GSSException | IllegalAccessException | NoSuchFieldException | ClassNotFoundException e ) { throw new AuthenticationException ( e ) ; } finally { kerberosLock . unlock ( ) ; } }
This method always needs to be called within a doAs block so that the client s TGT credentials can be read from the Subject .
18,718
public static DruidQueryRel fullScan ( final LogicalTableScan scanRel , final RelOptTable table , final DruidTable druidTable , final QueryMaker queryMaker ) { return new DruidQueryRel ( scanRel . getCluster ( ) , scanRel . getCluster ( ) . traitSetOf ( Convention . NONE ) , table , druidTable , queryMaker , PartialDruidQuery . create ( scanRel ) ) ; }
Create a DruidQueryRel representing a full scan .
18,719
public static HyperLogLogCollector makeCollector ( ByteBuffer buffer ) { int remaining = buffer . remaining ( ) ; if ( remaining % 3 == 0 || remaining == 1027 ) { return new VersionZeroHyperLogLogCollector ( buffer ) ; } else { return new VersionOneHyperLogLogCollector ( buffer ) ; } }
Create a wrapper object around an HLL sketch contained within a buffer . The position and limit of the buffer may be changed ; if you do not want this to happen you can duplicate the buffer before passing it in .
18,720
private static short mergeAndStoreByteRegister ( final ByteBuffer storageBuffer , final int position , final int offsetDiff , final byte byteToAdd ) { if ( byteToAdd == 0 ) { return 0 ; } final byte currVal = storageBuffer . get ( position ) ; final int upperNibble = currVal & 0xf0 ; final int lowerNibble = currVal & 0x0f ; final int otherUpper = ( byteToAdd & 0xf0 ) - ( offsetDiff << bitsPerBucket ) ; final int otherLower = ( byteToAdd & 0x0f ) - offsetDiff ; final int newUpper = Math . max ( upperNibble , otherUpper ) ; final int newLower = Math . max ( lowerNibble , otherLower ) ; storageBuffer . put ( position , ( byte ) ( ( newUpper | newLower ) & 0xff ) ) ; short numNoLongerZero = 0 ; if ( upperNibble == 0 && newUpper > 0 ) { ++ numNoLongerZero ; } if ( lowerNibble == 0 && newLower > 0 ) { ++ numNoLongerZero ; } return numNoLongerZero ; }
Returns the number of registers that are no longer zero after the value was added
18,721
public static Access datasourceAuthorizationCheck ( final HttpServletRequest req , Action action , String datasource , AuthorizerMapper authorizerMapper ) { ResourceAction resourceAction = new ResourceAction ( new Resource ( datasource , ResourceType . DATASOURCE ) , action ) ; Access access = AuthorizationUtils . authorizeResourceAction ( req , resourceAction , authorizerMapper ) ; if ( ! access . isAllowed ( ) ) { throw new ForbiddenException ( access . toString ( ) ) ; } return access ; }
Authorizes action to be performed on a task s datasource
18,722
LookupsState < LookupExtractorFactoryContainer > getAllLookupsState ( ) { Preconditions . checkState ( lifecycleLock . awaitStarted ( 1 , TimeUnit . MILLISECONDS ) ) ; LookupUpdateState lookupUpdateState = stateRef . get ( ) ; Map < String , LookupExtractorFactoryContainer > lookupsToLoad = new HashMap < > ( ) ; Set < String > lookupsToDrop = new HashSet < > ( ) ; updateToLoadAndDrop ( lookupUpdateState . noticesBeingHandled , lookupsToLoad , lookupsToDrop ) ; updateToLoadAndDrop ( lookupUpdateState . pendingNotices , lookupsToLoad , lookupsToDrop ) ; return new LookupsState < > ( lookupUpdateState . lookupMap , lookupsToLoad , lookupsToDrop ) ; }
Note that this should ensure that toLoad and toDrop are disjoint .
18,723
private List < LookupBean > getLookupsList ( ) { List < LookupBean > lookupBeanList ; if ( lookupConfig . getEnableLookupSyncOnStartup ( ) ) { lookupBeanList = getLookupListFromCoordinator ( lookupListeningAnnouncerConfig . getLookupTier ( ) ) ; if ( lookupBeanList == null ) { LOG . info ( "Coordinator is unavailable. Loading saved snapshot instead" ) ; lookupBeanList = getLookupListFromSnapshot ( ) ; } } else { lookupBeanList = getLookupListFromSnapshot ( ) ; } return lookupBeanList ; }
Gets the lookup list from coordinator or from snapshot .
18,724
private List < LookupBean > getLookupListFromCoordinator ( String tier ) { try { MutableBoolean firstAttempt = new MutableBoolean ( true ) ; Map < String , LookupExtractorFactoryContainer > lookupMap = RetryUtils . retry ( ( ) -> { if ( firstAttempt . isTrue ( ) ) { firstAttempt . setValue ( false ) ; } else if ( lookupConfig . getCoordinatorRetryDelay ( ) > 0 ) { Thread . sleep ( lookupConfig . getCoordinatorRetryDelay ( ) ) ; } return tryGetLookupListFromCoordinator ( tier ) ; } , e -> true , lookupConfig . getCoordinatorFetchRetries ( ) ) ; if ( lookupMap != null ) { List < LookupBean > lookupBeanList = new ArrayList < > ( ) ; lookupMap . forEach ( ( k , v ) -> lookupBeanList . add ( new LookupBean ( k , null , v ) ) ) ; return lookupBeanList ; } else { return null ; } } catch ( Exception e ) { LOG . error ( e , "Error while trying to get lookup list from coordinator for tier[%s]" , tier ) ; return null ; } }
Returns a list of lookups from the coordinator if the coordinator is available . If it s not available returns null .
18,725
private List < LookupBean > getLookupListFromSnapshot ( ) { if ( lookupSnapshotTaker != null ) { return lookupSnapshotTaker . pullExistingSnapshot ( lookupListeningAnnouncerConfig . getLookupTier ( ) ) ; } return null ; }
Returns a list of lookups from the snapshot if the lookupsnapshottaker is configured . If it s not available returns null .
18,726
public static DoublesColumn create ( ColumnarDoubles column , ImmutableBitmap nullValueBitmap ) { if ( nullValueBitmap . isEmpty ( ) ) { return new DoublesColumn ( column ) ; } else { return new DoublesColumnWithNulls ( column , nullValueBitmap ) ; } }
Factory method to create DoublesColumn .
18,727
private static ImmutableMap < String , ImmutableWorkerInfo > getNonAffinityWorkers ( final AffinityConfig affinityConfig , final Map < String , ImmutableWorkerInfo > workerMap ) { return ImmutableMap . copyOf ( Maps . filterKeys ( workerMap , workerHost -> ! affinityConfig . getAffinityWorkers ( ) . contains ( workerHost ) ) ) ; }
Return workers not assigned to any affinity pool at all .
18,728
public static Integer getThreadPriorityFromTaskPriority ( final int taskPriority ) { if ( taskPriority == 0 ) { return null ; } int finalPriority = taskPriority + Thread . NORM_PRIORITY ; if ( taskPriority > Thread . MAX_PRIORITY ) { return Thread . MAX_PRIORITY ; } if ( finalPriority < Thread . MIN_PRIORITY ) { return Thread . MIN_PRIORITY ; } return finalPriority ; }
Return the thread - factory friendly priorities from the task priority
18,729
public void stop ( ) { synchronized ( startStopLock ) { running = false ; taskStatusChecker . shutdownNow ( ) ; if ( numRunningTasks > 0 ) { final Iterator < MonitorEntry > iterator = runningTasks . values ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { final MonitorEntry entry = iterator . next ( ) ; iterator . remove ( ) ; final String taskId = entry . runningTask . getId ( ) ; log . info ( "Request to kill subtask[%s]" , taskId ) ; indexingServiceClient . killTask ( taskId ) ; numRunningTasks -- ; numKilledTasks ++ ; } if ( numRunningTasks > 0 ) { log . warn ( "Inconsistent state: numRunningTasks[%d] is still not zero after trying to kill all running tasks." , numRunningTasks ) ; } } log . info ( "Stopped taskMonitor" ) ; } }
Stop task monitoring and kill all running tasks .
18,730
@ SuppressWarnings ( "unchecked" ) public static < InType > CombiningIterable < InType > createSplatted ( Iterable < ? extends Iterable < InType > > in , Comparator < InType > comparator ) { return create ( new MergeIterable < InType > ( comparator , ( Iterable < Iterable < InType > > ) in ) , comparator , new BinaryFn < InType , InType , InType > ( ) { public InType apply ( InType arg1 , InType arg2 ) { if ( arg1 == null ) { return arg2 ; } return arg1 ; } } ) ; }
Creates a CombiningIterable around a MergeIterable such that equivalent elements are thrown away
18,731
public Collection < DataSegment > getLazyAllSegments ( ) { return CollectionUtils . createLazyCollectionFromStream ( ( ) -> dataSources . values ( ) . stream ( ) . flatMap ( dataSource -> dataSource . getSegments ( ) . stream ( ) ) , totalSegments ) ; }
Returns a lazy collection with all segments in all data sources stored on this ImmutableDruidServer . The order of segments in this collection is unspecified .
18,732
private DruidConnection getDruidConnection ( final String connectionId ) { final DruidConnection connection = connections . get ( connectionId ) ; if ( connection == null ) { throw new NoSuchConnectionException ( connectionId ) ; } return connection . sync ( exec . schedule ( ( ) -> { log . debug ( "Connection[%s] timed out." , connectionId ) ; closeConnection ( new ConnectionHandle ( connectionId ) ) ; } , new Interval ( DateTimes . nowUtc ( ) , config . getConnectionIdleTimeout ( ) ) . toDurationMillis ( ) , TimeUnit . MILLISECONDS ) ) ; }
Get a connection or throw an exception if it doesn t exist . Also refreshes the timeout timer .
18,733
public Map < String , Object > getDebugInfo ( ) { long currTime = System . currentTimeMillis ( ) ; Object notSuccessfullySyncedFor ; if ( lastSuccessfulSyncTime == 0 ) { notSuccessfullySyncedFor = "Never Successfully Synced" ; } else { notSuccessfullySyncedFor = ( currTime - lastSuccessfulSyncTime ) / 1000 ; } return ImmutableMap . of ( "notSyncedForSecs" , lastSyncTime == 0 ? "Never Synced" : ( currTime - lastSyncTime ) / 1000 , "notSuccessfullySyncedFor" , notSuccessfullySyncedFor , "consecutiveFailedAttemptCount" , consecutiveFailedAttemptCount , "syncScheduled" , startStopLock . isStarted ( ) ) ; }
This method returns the debugging information for printing must not be used for any other purpose .
18,734
public void start ( ) throws Exception { super . start ( ) ; synchronized ( lifecycleLock ) { Preconditions . checkState ( ! started , "already started" ) ; started = true ; try { cleanupStaleAnnouncements ( ) ; registerRunListener ( ) ; pathChildrenCache . start ( ) ; log . info ( "Started WorkerTaskMonitor." ) ; started = true ; } catch ( InterruptedException e ) { throw e ; } catch ( Exception e ) { log . makeAlert ( e , "Exception starting WorkerTaskMonitor" ) . emit ( ) ; throw e ; } } }
Register a monitor for new tasks . When new tasks appear the worker node announces a status to indicate it has started the task . When the task is complete the worker node updates the status .
18,735
public static CaffeineCache create ( final CaffeineCacheConfig config , final Executor executor ) { Caffeine < Object , Object > builder = Caffeine . newBuilder ( ) . recordStats ( ) ; if ( config . getExpireAfter ( ) >= 0 ) { builder . expireAfterAccess ( config . getExpireAfter ( ) , TimeUnit . MILLISECONDS ) ; } if ( config . getSizeInBytes ( ) >= 0 ) { builder . maximumWeight ( config . getSizeInBytes ( ) ) ; } else { builder . maximumWeight ( Math . min ( MAX_DEFAULT_BYTES , JvmUtils . getRuntimeInfo ( ) . getMaxHeapSizeBytes ( ) / 10 ) ) ; } builder . weigher ( ( NamedKey key , byte [ ] value ) -> value . length + key . key . length + key . namespace . length ( ) * Character . BYTES + FIXED_COST ) . executor ( executor ) ; return new CaffeineCache ( builder . build ( ) , config ) ; }
Used in testing
18,736
public void close ( String namespace ) { if ( config . isEvictOnClose ( ) ) { cache . asMap ( ) . keySet ( ) . removeIf ( key -> key . namespace . equals ( namespace ) ) ; } }
This is completely racy with put . Any values missed should be evicted later anyways . So no worries .
18,737
public static Access authorizeResourceAction ( final HttpServletRequest request , final ResourceAction resourceAction , final AuthorizerMapper authorizerMapper ) { return authorizeAllResourceActions ( request , Collections . singletonList ( resourceAction ) , authorizerMapper ) ; }
Check a resource - action using the authorization fields from the request .
18,738
public static AuthenticationResult authenticationResultFromRequest ( final HttpServletRequest request ) { final AuthenticationResult authenticationResult = ( AuthenticationResult ) request . getAttribute ( AuthConfig . DRUID_AUTHENTICATION_RESULT ) ; if ( authenticationResult == null ) { throw new ISE ( "Null authentication result" ) ; } return authenticationResult ; }
Returns the authentication information for a request .
18,739
public static Access authorizeAllResourceActions ( final AuthenticationResult authenticationResult , final Iterable < ResourceAction > resourceActions , final AuthorizerMapper authorizerMapper ) { final Authorizer authorizer = authorizerMapper . getAuthorizer ( authenticationResult . getAuthorizerName ( ) ) ; if ( authorizer == null ) { throw new ISE ( "No authorizer found with name: [%s]." , authenticationResult . getAuthorizerName ( ) ) ; } final Set < ResourceAction > resultCache = new HashSet < > ( ) ; for ( ResourceAction resourceAction : resourceActions ) { if ( resultCache . contains ( resourceAction ) ) { continue ; } final Access access = authorizer . authorize ( authenticationResult , resourceAction . getResource ( ) , resourceAction . getAction ( ) ) ; if ( ! access . isAllowed ( ) ) { return access ; } else { resultCache . add ( resourceAction ) ; } } return Access . OK ; }
Check a list of resource - actions to be performed by the identity represented by authenticationResult .
18,740
public static Access authorizeAllResourceActions ( final HttpServletRequest request , final Iterable < ResourceAction > resourceActions , final AuthorizerMapper authorizerMapper ) { if ( request . getAttribute ( AuthConfig . DRUID_ALLOW_UNSECURED_PATH ) != null ) { return Access . OK ; } if ( request . getAttribute ( AuthConfig . DRUID_AUTHORIZATION_CHECKED ) != null ) { throw new ISE ( "Request already had authorization check." ) ; } Access access = authorizeAllResourceActions ( authenticationResultFromRequest ( request ) , resourceActions , authorizerMapper ) ; request . setAttribute ( AuthConfig . DRUID_AUTHORIZATION_CHECKED , access . isAllowed ( ) ) ; return access ; }
Check a list of resource - actions to be performed as a result of an HTTP request .
18,741
public static < KeyType , ResType > Map < KeyType , List < ResType > > filterAuthorizedResources ( final HttpServletRequest request , final Map < KeyType , List < ResType > > unfilteredResources , final Function < ? super ResType , Iterable < ResourceAction > > resourceActionGenerator , final AuthorizerMapper authorizerMapper ) { if ( request . getAttribute ( AuthConfig . DRUID_ALLOW_UNSECURED_PATH ) != null ) { return unfilteredResources ; } if ( request . getAttribute ( AuthConfig . DRUID_AUTHORIZATION_CHECKED ) != null ) { throw new ISE ( "Request already had authorization check." ) ; } final AuthenticationResult authenticationResult = AuthorizationUtils . authenticationResultFromRequest ( request ) ; Map < KeyType , List < ResType > > filteredResources = new HashMap < > ( ) ; for ( Map . Entry < KeyType , List < ResType > > entry : unfilteredResources . entrySet ( ) ) { if ( entry . getValue ( ) == null ) { continue ; } final List < ResType > filteredList = Lists . newArrayList ( AuthorizationUtils . filterAuthorizedResources ( authenticationResult , entry . getValue ( ) , resourceActionGenerator , authorizerMapper ) ) ; if ( filteredList . size ( ) > 0 ) { filteredResources . put ( entry . getKey ( ) , filteredList ) ; } } request . setAttribute ( AuthConfig . DRUID_AUTHORIZATION_CHECKED , true ) ; return filteredResources ; }
Given a map of resource lists filter each resources list by applying the resource action generator to each item in each resource list .
18,742
public static TimestampSpec mergeTimestampSpec ( List < TimestampSpec > toMerge ) { if ( toMerge == null || toMerge . size ( ) == 0 ) { return null ; } TimestampSpec result = toMerge . get ( 0 ) ; for ( int i = 1 ; i < toMerge . size ( ) ; i ++ ) { if ( toMerge . get ( i ) == null ) { continue ; } if ( ! Objects . equals ( result , toMerge . get ( i ) ) ) { return null ; } } return result ; }
returns null . this can be improved in future but is good enough for most use - cases .
18,743
public static LongsColumn create ( ColumnarLongs column , ImmutableBitmap nullValueBitmap ) { if ( nullValueBitmap . isEmpty ( ) ) { return new LongsColumn ( column ) ; } else { return new LongsColumnWithNulls ( column , nullValueBitmap ) ; } }
Factory method to create LongsColumn .
18,744
public static void createIfNotExists ( CuratorFramework curatorFramework , String path , CreateMode mode , byte [ ] rawBytes , int maxZnodeBytes ) throws Exception { verifySize ( path , rawBytes , maxZnodeBytes ) ; if ( curatorFramework . checkExists ( ) . forPath ( path ) == null ) { try { curatorFramework . create ( ) . creatingParentsIfNeeded ( ) . withMode ( mode ) . forPath ( path , rawBytes ) ; } catch ( KeeperException . NodeExistsException e ) { log . info ( "Skipping create path[%s], since it already exists." , path ) ; } } }
Create znode if it does not already exist . If it does already exist this does nothing . In particular the existing znode may have a different payload or create mode .
18,745
protected void abandonSegment ( final long truncatedTime , final Sink sink ) { if ( sinks . containsKey ( truncatedTime ) ) { try { segmentAnnouncer . unannounceSegment ( sink . getSegment ( ) ) ; removeSegment ( sink , computePersistDir ( schema , sink . getInterval ( ) ) ) ; log . info ( "Removing sinkKey %d for segment %s" , truncatedTime , sink . getSegment ( ) . getId ( ) ) ; sinks . remove ( truncatedTime ) ; metrics . setSinkCount ( sinks . size ( ) ) ; sinkTimeline . remove ( sink . getInterval ( ) , sink . getVersion ( ) , new SingleElementPartitionChunk < > ( sink ) ) ; for ( FireHydrant hydrant : sink ) { cache . close ( SinkQuerySegmentWalker . makeHydrantCacheIdentifier ( hydrant ) ) ; hydrant . swapSegment ( null ) ; } synchronized ( handoffCondition ) { handoffCondition . notifyAll ( ) ; } } catch ( Exception e ) { log . makeAlert ( e , "Unable to abandon old segment for dataSource[%s]" , schema . getDataSource ( ) ) . addData ( "interval" , sink . getInterval ( ) ) . emit ( ) ; } } }
Unannounces a given sink and removes all local references to it . It is important that this is only called from the single - threaded mergeExecutor since otherwise chaos may ensue if merged segments are deleted while being created .
18,746
protected int persistHydrant ( FireHydrant indexToPersist , DataSchema schema , Interval interval , Map < String , Object > metadataElems ) { synchronized ( indexToPersist ) { if ( indexToPersist . hasSwapped ( ) ) { log . info ( "DataSource[%s], Interval[%s], Hydrant[%s] already swapped. Ignoring request to persist." , schema . getDataSource ( ) , interval , indexToPersist ) ; return 0 ; } log . info ( "DataSource[%s], Interval[%s], Metadata [%s] persisting Hydrant[%s]" , schema . getDataSource ( ) , interval , metadataElems , indexToPersist ) ; try { int numRows = indexToPersist . getIndex ( ) . size ( ) ; final IndexSpec indexSpec = config . getIndexSpec ( ) ; indexToPersist . getIndex ( ) . getMetadata ( ) . putAll ( metadataElems ) ; final File persistedFile = indexMerger . persist ( indexToPersist . getIndex ( ) , interval , new File ( computePersistDir ( schema , interval ) , String . valueOf ( indexToPersist . getCount ( ) ) ) , indexSpec , config . getSegmentWriteOutMediumFactory ( ) ) ; indexToPersist . swapSegment ( new QueryableIndexSegment ( indexIO . loadIndex ( persistedFile ) , indexToPersist . getSegmentId ( ) ) ) ; return numRows ; } catch ( IOException e ) { log . makeAlert ( "dataSource[%s] -- incremental persist failed" , schema . getDataSource ( ) ) . addData ( "interval" , interval ) . addData ( "count" , indexToPersist . getCount ( ) ) . emit ( ) ; throw new RuntimeException ( e ) ; } } }
Persists the given hydrant and returns the number of rows persisted
18,747
private ServerHolder assignPrimary ( final DruidCoordinatorRuntimeParams params , final DataSegment segment ) { ServerHolder topCandidate = null ; for ( final Object2IntMap . Entry < String > entry : targetReplicants . object2IntEntrySet ( ) ) { final int targetReplicantsInTier = entry . getIntValue ( ) ; if ( targetReplicantsInTier <= 0 ) { continue ; } final String tier = entry . getKey ( ) ; String noAvailability = StringUtils . format ( "No available [%s] servers or node capacity to assign primary segment[%s]! Expected Replicants[%d]" , tier , segment . getId ( ) , targetReplicantsInTier ) ; final List < ServerHolder > holders = getFilteredHolders ( tier , params . getDruidCluster ( ) , createLoadQueueSizeLimitingPredicate ( params ) ) ; if ( holders . isEmpty ( ) ) { log . warn ( noAvailability ) ; continue ; } final ServerHolder candidate = params . getBalancerStrategy ( ) . findNewSegmentHomeReplicator ( segment , holders ) ; if ( candidate == null ) { log . warn ( noAvailability ) ; } else { strategyCache . put ( tier , candidate ) ; if ( topCandidate == null || candidate . getServer ( ) . getPriority ( ) > topCandidate . getServer ( ) . getPriority ( ) ) { topCandidate = candidate ; } } } if ( topCandidate != null ) { strategyCache . remove ( topCandidate . getServer ( ) . getTier ( ) ) ; log . info ( "Assigning 'primary' for segment [%s] to server [%s] in tier [%s]" , segment . getId ( ) , topCandidate . getServer ( ) . getName ( ) , topCandidate . getServer ( ) . getTier ( ) ) ; topCandidate . getPeon ( ) . loadSegment ( segment , null ) ; } return topCandidate ; }
Iterates through each tier and find the respective segment homes ; with the found segment homes selects the one with the highest priority to be the holder for the primary replica .
18,748
private boolean loadingInProgress ( final DruidCluster druidCluster ) { for ( final Object2IntMap . Entry < String > entry : targetReplicants . object2IntEntrySet ( ) ) { final String tier = entry . getKey ( ) ; if ( druidCluster . hasTier ( tier ) && entry . getIntValue ( ) > currentReplicants . getOrDefault ( tier , 0 ) ) { return true ; } } return false ; }
Returns true if at least one tier in target replica assignment exists in cluster but does not have enough replicas .
18,749
private boolean keyEquals ( ByteBuffer curKeyBuffer , ByteBuffer buffer , int bufferOffset ) { int i = 0 ; for ( ; i + Long . BYTES <= keySize ; i += Long . BYTES ) { if ( curKeyBuffer . getLong ( i ) != buffer . getLong ( bufferOffset + i ) ) { return false ; } } if ( i + Integer . BYTES <= keySize ) { if ( curKeyBuffer . getInt ( i ) != buffer . getInt ( bufferOffset + i ) ) { return false ; } i += Integer . BYTES ; } for ( ; i < keySize ; i ++ ) { if ( curKeyBuffer . get ( i ) != buffer . get ( bufferOffset + i ) ) { return false ; } } return true ; }
Checks two keys contained in the given buffers are same .
18,750
private void initNewSlot ( ByteBuffer newKey ) { increaseWriteIndex ( ) ; final int recordOffset = recordSize * curWriteIndex ; buffer . position ( recordOffset ) ; buffer . put ( newKey ) ; for ( int i = 0 ; i < aggregators . length ; i ++ ) { aggregators [ i ] . init ( buffer , recordOffset + aggregatorOffsets [ i ] ) ; } }
Initialize a new slot for a new grouping key . This may be potentially blocked if the array is full until at least one slot becomes available .
18,751
public CloseableIterator < Entry < KeyType > > iterator ( ) { if ( ! initialized ) { throw new ISE ( "Grouper should be initialized first" ) ; } return new CloseableIterator < Entry < KeyType > > ( ) { { increaseReadIndexTo ( 0 ) ; } public boolean hasNext ( ) { return ! finished || remaining ( ) > 0 ; } private int remaining ( ) { if ( curWriteIndex >= nextReadIndex ) { return curWriteIndex - nextReadIndex ; } else { return ( maxNumSlots - nextReadIndex ) + curWriteIndex ; } } public Entry < KeyType > next ( ) { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( ) ; } final int recordOffset = recordSize * nextReadIndex ; final KeyType key = keySerde . fromByteBuffer ( buffer , recordOffset ) ; final Object [ ] values = new Object [ aggregators . length ] ; for ( int i = 0 ; i < aggregators . length ; i ++ ) { values [ i ] = aggregators [ i ] . get ( buffer , recordOffset + aggregatorOffsets [ i ] ) ; } final int targetIndex = nextReadIndex == maxNumSlots - 1 ? 0 : nextReadIndex + 1 ; increaseReadIndexTo ( targetIndex ) ; return new Entry < > ( key , values ) ; } private void increaseReadIndexTo ( int target ) { final long startAtNs = System . nanoTime ( ) ; final long queryTimeoutAtNs = getQueryTimeoutAtNs ( startAtNs ) ; final long spinTimeoutAtNs = startAtNs + SPIN_FOR_TIMEOUT_THRESHOLD_NS ; long timeoutNs = queryTimeoutAtNs - startAtNs ; long spinTimeoutNs = SPIN_FOR_TIMEOUT_THRESHOLD_NS ; while ( ( curWriteIndex == - 1 || target == curWriteIndex ) && ! finished && ! Thread . currentThread ( ) . isInterrupted ( ) ) { if ( timeoutNs <= 0L ) { throw new RuntimeException ( new TimeoutException ( ) ) ; } if ( spinTimeoutNs <= 0L ) { Thread . yield ( ) ; } long now = System . nanoTime ( ) ; timeoutNs = queryTimeoutAtNs - now ; spinTimeoutNs = spinTimeoutAtNs - now ; } nextReadIndex = target ; } public void close ( ) { } } ; }
Return a sorted iterator . This method can be called safely while writing and the iterating thread and the writing thread can be different . The result iterator always returns sorted results . This method should be called only one time per grouper .
18,752
public InputRow transform ( final InputRow row ) { if ( row == null ) { return null ; } final InputRow transformedRow ; if ( transforms . isEmpty ( ) ) { transformedRow = row ; } else { transformedRow = new TransformedInputRow ( row , transforms ) ; } if ( valueMatcher != null ) { rowSupplierForValueMatcher . set ( transformedRow ) ; if ( ! valueMatcher . matches ( ) ) { return null ; } } return transformedRow ; }
Transforms an input row or returns null if the row should be filtered out .
18,753
public ListenableFuture < SegmentsAndMetadata > publish ( final TransactionalSegmentPublisher publisher , final Committer committer , final Collection < String > sequenceNames ) { final List < SegmentIdWithShardSpec > theSegments = getSegmentWithStates ( sequenceNames ) . map ( SegmentWithState :: getSegmentIdentifier ) . collect ( Collectors . toList ( ) ) ; final ListenableFuture < SegmentsAndMetadata > publishFuture = ListenableFutures . transformAsync ( pushInBackground ( wrapCommitter ( committer ) , theSegments , true ) , sam -> publishInBackground ( sam , publisher ) ) ; return Futures . transform ( publishFuture , ( Function < ? super SegmentsAndMetadata , ? extends SegmentsAndMetadata > ) sam -> { synchronized ( segments ) { sequenceNames . forEach ( segments :: remove ) ; } return sam ; } ) ; }
Execute a task in background to publish all segments corresponding to the given sequence names . The task internally pushes the segments to the deep storage first and then publishes the metadata to the metadata storage .
18,754
public static Collection < DimensionSpec > extractionsToRewrite ( GroupByQuery query ) { return Collections2 . filter ( query . getDimensions ( ) , new Predicate < DimensionSpec > ( ) { public boolean apply ( DimensionSpec input ) { return input . getExtractionFn ( ) != null && ExtractionFn . ExtractionType . ONE_TO_ONE . equals ( input . getExtractionFn ( ) . getExtractionType ( ) ) ; } } ) ; }
This function checks the query for dimensions which can be optimized by applying the dimension extraction as the final step of the query instead of on every event .
18,755
public static < ColumnSelectorStrategyClass extends ColumnSelectorStrategy > ColumnSelectorPlus < ColumnSelectorStrategyClass > [ ] createColumnSelectorPluses ( ColumnSelectorStrategyFactory < ColumnSelectorStrategyClass > strategyFactory , List < DimensionSpec > dimensionSpecs , ColumnSelectorFactory columnSelectorFactory ) { int dimCount = dimensionSpecs . size ( ) ; @ SuppressWarnings ( "unchecked" ) ColumnSelectorPlus < ColumnSelectorStrategyClass > [ ] dims = new ColumnSelectorPlus [ dimCount ] ; for ( int i = 0 ; i < dimCount ; i ++ ) { final DimensionSpec dimSpec = dimensionSpecs . get ( i ) ; final String dimName = dimSpec . getDimension ( ) ; final ColumnValueSelector selector = getColumnValueSelectorFromDimensionSpec ( dimSpec , columnSelectorFactory ) ; ColumnSelectorStrategyClass strategy = makeStrategy ( strategyFactory , dimSpec , columnSelectorFactory . getColumnCapabilities ( dimSpec . getDimension ( ) ) , selector ) ; final ColumnSelectorPlus < ColumnSelectorStrategyClass > selectorPlus = new ColumnSelectorPlus < > ( dimName , dimSpec . getOutputName ( ) , strategy , selector ) ; dims [ i ] = selectorPlus ; } return dims ; }
Creates an array of ColumnSelectorPlus objects selectors that handle type - specific operations within query processing engines using a strategy factory provided by the query engine . One ColumnSelectorPlus will be created for each column specified in dimensionSpecs .
18,756
public static Long getExactLongFromDecimalString ( String decimalStr ) { final Long val = GuavaUtils . tryParseLong ( decimalStr ) ; if ( val != null ) { return val ; } BigDecimal convertedBD ; try { convertedBD = new BigDecimal ( decimalStr ) ; } catch ( NumberFormatException nfe ) { return null ; } try { return convertedBD . longValueExact ( ) ; } catch ( ArithmeticException ae ) { return null ; } }
Convert a string representing a decimal value to a long .
18,757
public static Pair < Grouper < RowBasedKey > , Accumulator < AggregateResult , Row > > createGrouperAccumulatorPair ( final GroupByQuery query , final boolean isInputRaw , final Map < String , ValueType > rawInputRowSignature , final GroupByQueryConfig config , final Supplier < ByteBuffer > bufferSupplier , final LimitedTemporaryStorage temporaryStorage , final ObjectMapper spillMapper , final AggregatorFactory [ ] aggregatorFactories , final int mergeBufferSize ) { return createGrouperAccumulatorPair ( query , isInputRaw , rawInputRowSignature , config , bufferSupplier , null , SINGLE_THREAD_CONCURRENCY_HINT , temporaryStorage , spillMapper , aggregatorFactories , null , UNKNOWN_THREAD_PRIORITY , false , UNKNOWN_TIMEOUT , mergeBufferSize ) ; }
Create a single - threaded grouper and accumulator .
18,758
public static Expr binaryOp ( BinaryOpExprBase binary , Expr left , Expr right ) { try { return binary . getClass ( ) . getDeclaredConstructor ( String . class , Expr . class , Expr . class ) . newInstance ( binary . op , left , right ) ; } catch ( Exception e ) { log . warn ( e , "failed to rewrite expression " + binary ) ; return binary ; } }
you should create it explicitly in here
18,759
public MetadataStorageTablesConfig getMetadataStorageTablesConfig ( ) { return new MetadataStorageTablesConfig ( null , null , null , segmentTable , null , null , null , null , null , null , null ) ; }
by the code using this
18,760
public void awaitNextIncrements ( long nextIncrements ) throws InterruptedException { if ( nextIncrements <= 0 ) { throw new IllegalArgumentException ( "nextIncrements is not positive: " + nextIncrements ) ; } if ( nextIncrements > MAX_COUNT / 4 ) { throw new UnsupportedOperationException ( "Couldn't wait for so many increments: " + nextIncrements ) ; } awaitCount ( ( sync . getCount ( ) + nextIncrements ) & MAX_COUNT ) ; }
Somewhat loosely defined wait for next N increments because the starting point is not defined from the Java Memory Model perspective .
18,761
private void downloadExtension ( Artifact versionedArtifact , File toLocation ) { final CollectRequest collectRequest = new CollectRequest ( ) ; collectRequest . setRoot ( new Dependency ( versionedArtifact , JavaScopes . RUNTIME ) ) ; final DependencyRequest dependencyRequest = new DependencyRequest ( collectRequest , DependencyFilterUtils . andFilter ( DependencyFilterUtils . classpathFilter ( JavaScopes . RUNTIME ) , new DependencyFilter ( ) { public boolean accept ( DependencyNode node , List < DependencyNode > parents ) { String scope = node . getDependency ( ) . getScope ( ) ; if ( scope != null ) { scope = StringUtils . toLowerCase ( scope ) ; if ( "provided" . equals ( scope ) ) { return false ; } if ( "test" . equals ( scope ) ) { return false ; } if ( "system" . equals ( scope ) ) { return false ; } } if ( accept ( node . getArtifact ( ) ) ) { return false ; } for ( DependencyNode parent : parents ) { if ( accept ( parent . getArtifact ( ) ) ) { return false ; } } return true ; } private boolean accept ( final Artifact artifact ) { return exclusions . contains ( artifact . getGroupId ( ) ) ; } } ) ) ; try { log . info ( "Start downloading extension [%s]" , versionedArtifact ) ; final List < Artifact > artifacts = aether . resolveArtifacts ( dependencyRequest ) ; for ( Artifact artifact : artifacts ) { if ( ! exclusions . contains ( artifact . getGroupId ( ) ) ) { log . info ( "Adding file [%s] at [%s]" , artifact . getFile ( ) . getName ( ) , toLocation . getAbsolutePath ( ) ) ; FileUtils . copyFileToDirectory ( artifact . getFile ( ) , toLocation ) ; } else { log . debug ( "Skipped Artifact[%s]" , artifact ) ; } } } catch ( Exception e ) { log . error ( e , "Unable to resolve artifacts for [%s]." , dependencyRequest ) ; throw new RuntimeException ( e ) ; } log . info ( "Finish downloading extension [%s]" , versionedArtifact ) ; }
Download the extension given its maven coordinate
18,762
private void createExtensionDirectory ( String coordinate , File atLocation ) { if ( atLocation . isDirectory ( ) ) { log . info ( "Directory [%s] already exists, skipping creating a directory" , atLocation . getAbsolutePath ( ) ) ; return ; } if ( ! atLocation . mkdir ( ) ) { throw new ISE ( "Unable to create directory at [%s] for coordinate [%s]" , atLocation . getAbsolutePath ( ) , coordinate ) ; } }
Create the extension directory for a specific maven coordinate . The name of this directory should be the artifactId in the coordinate
18,763
public void relocate ( final int oldPosition , final int newPosition , final ByteBuffer oldBuf , final ByteBuffer newBuf ) { HllSketch sketch = sketchCache . get ( oldBuf ) . get ( oldPosition ) ; final WritableMemory oldMem = getMemory ( oldBuf ) . writableRegion ( oldPosition , size ) ; if ( sketch . isSameResource ( oldMem ) ) { final WritableMemory newMem = getMemory ( newBuf ) . writableRegion ( newPosition , size ) ; sketch = HllSketch . writableWrap ( newMem ) ; } putSketchIntoCache ( newBuf , newPosition , sketch ) ; }
In very rare cases sketches can exceed given memory request on - heap memory and move there . We need to identify such sketches and reuse the same objects as opposed to wrapping new memory regions .
18,764
private void mergeWithSmoosher ( ) throws IOException { List < File > fileToProcess = new ArrayList < > ( completedFiles ) ; completedFiles = new ArrayList < > ( ) ; for ( File file : fileToProcess ) { add ( file ) ; if ( ! file . delete ( ) ) { LOG . warn ( "Unable to delete file [%s]" , file ) ; } } }
Merges temporary files created by delegated SmooshedWriters on to the main smoosh file .
18,765
private static TableMacro getView ( final SchemaPlus schemaPlus , final String functionName ) { final Collection < org . apache . calcite . schema . Function > functions = schemaPlus . getFunctions ( functionName ) ; for ( org . apache . calcite . schema . Function function : functions ) { if ( function . getParameters ( ) . isEmpty ( ) && function instanceof TableMacro ) { return ( TableMacro ) function ; } } return null ; }
Return a view macro that may or may not be defined in a certain schema . If it s not defined returns null .
18,766
private boolean hasMessagesPending ( ) { for ( Map . Entry < MessageQueue , ConcurrentSkipListSet < MessageExt > > entry : messageQueueTreeSetMap . entrySet ( ) ) { if ( ! entry . getValue ( ) . isEmpty ( ) ) { return true ; } } return false ; }
Check if there are locally pending messages to consume .
18,767
public static long zip ( File directory , OutputStream out ) throws IOException { if ( ! directory . isDirectory ( ) ) { throw new IOE ( "directory[%s] is not a directory" , directory ) ; } final ZipOutputStream zipOut = new ZipOutputStream ( out ) ; long totalSize = 0 ; for ( File file : directory . listFiles ( ) ) { log . info ( "Adding file[%s] with size[%,d]. Total size so far[%,d]" , file , file . length ( ) , totalSize ) ; if ( file . length ( ) > Integer . MAX_VALUE ) { zipOut . finish ( ) ; throw new IOE ( "file[%s] too large [%,d]" , file , file . length ( ) ) ; } zipOut . putNextEntry ( new ZipEntry ( file . getName ( ) ) ) ; totalSize += Files . asByteSource ( file ) . copyTo ( zipOut ) ; } zipOut . closeEntry ( ) ; zipOut . flush ( ) ; zipOut . finish ( ) ; return totalSize ; }
Zips the contents of the input directory to the output stream . Sub directories are skipped
18,768
public static FileUtils . FileCopyResult unzip ( final ByteSource byteSource , final File outDir , boolean cacheLocally ) throws IOException { return unzip ( byteSource , outDir , FileUtils . IS_EXCEPTION , cacheLocally ) ; }
Unzip the byteSource to the output directory . If cacheLocally is true the byteSource is cached to local disk before unzipping . This may cause more predictable behavior than trying to unzip a large file directly off a network stream for example .
18,769
public static FileUtils . FileCopyResult gunzip ( final File pulledFile , File outFile ) { return gunzip ( Files . asByteSource ( pulledFile ) , outFile ) ; }
gunzip the file to the output file .
18,770
public static long gunzip ( InputStream in , OutputStream out ) throws IOException { try ( GZIPInputStream gzipInputStream = gzipInputStream ( in ) ) { final long result = ByteStreams . copy ( gzipInputStream , out ) ; out . flush ( ) ; return result ; } finally { out . close ( ) ; } }
gunzip from the source stream to the destination stream .
18,771
public static FileUtils . FileCopyResult gunzip ( final ByteSource in , final File outFile , Predicate < Throwable > shouldRetry ) { return FileUtils . retryCopy ( new ByteSource ( ) { public InputStream openStream ( ) throws IOException { return gzipInputStream ( in . openStream ( ) ) ; } } , outFile , shouldRetry , DEFAULT_RETRY_COUNT ) ; }
A gunzip function to store locally
18,772
public static FileUtils . FileCopyResult gunzip ( final ByteSource in , File outFile ) { return gunzip ( in , outFile , FileUtils . IS_EXCEPTION ) ; }
Gunzip from the input stream to the output file
18,773
public static long gzip ( InputStream inputStream , OutputStream out ) throws IOException { try ( GZIPOutputStream outputStream = new GZIPOutputStream ( out ) ) { final long result = ByteStreams . copy ( inputStream , outputStream ) ; out . flush ( ) ; return result ; } finally { inputStream . close ( ) ; } }
Copy inputStream to out while wrapping out in a GZIPOutputStream Closes both input and output
18,774
public static FileUtils . FileCopyResult gzip ( final File inFile , final File outFile , Predicate < Throwable > shouldRetry ) { gzip ( Files . asByteSource ( inFile ) , Files . asByteSink ( outFile ) , shouldRetry ) ; return new FileUtils . FileCopyResult ( outFile ) ; }
Gzips the input file to the output
18,775
public static FileUtils . FileCopyResult gzip ( final File inFile , final File outFile ) { return gzip ( inFile , outFile , FileUtils . IS_EXCEPTION ) ; }
GZip compress the contents of inFile into outFile
18,776
public static InputStream decompress ( final InputStream in , final String fileName ) throws IOException { if ( fileName . endsWith ( GZ_SUFFIX ) ) { return gzipInputStream ( in ) ; } else if ( fileName . endsWith ( BZ2_SUFFIX ) ) { return new BZip2CompressorInputStream ( in , true ) ; } else if ( fileName . endsWith ( XZ_SUFFIX ) ) { return new XZCompressorInputStream ( in , true ) ; } else if ( fileName . endsWith ( SNAPPY_SUFFIX ) ) { return new FramedSnappyCompressorInputStream ( in ) ; } else if ( fileName . endsWith ( ZSTD_SUFFIX ) ) { return new ZstdCompressorInputStream ( in ) ; } else if ( fileName . endsWith ( ZIP_SUFFIX ) ) { final ZipInputStream zipIn = new ZipInputStream ( in , StandardCharsets . UTF_8 ) ; try { final ZipEntry nextEntry = zipIn . getNextEntry ( ) ; if ( nextEntry == null ) { zipIn . close ( ) ; return new ByteArrayInputStream ( new byte [ 0 ] ) ; } return zipIn ; } catch ( IOException e ) { try { zipIn . close ( ) ; } catch ( IOException e2 ) { e . addSuppressed ( e2 ) ; } throw e ; } } else { return in ; } }
Decompress an input stream from a file based on the filename .
18,777
private static CpuAcctMetric parse ( final List < String > lines ) { final int ncpus = lines . size ( ) - 1 ; final long [ ] usrTime = new long [ ncpus ] ; final long [ ] sysTime = new long [ ncpus ] ; for ( int i = 1 ; i < lines . size ( ) ; i ++ ) { final String [ ] splits = lines . get ( i ) . split ( CgroupUtil . SPACE_MATCH , 3 ) ; if ( splits . length != 3 ) { throw new RE ( "Error parsing [%s]" , lines . get ( i ) ) ; } final int cpuNum = Integer . parseInt ( splits [ 0 ] ) ; usrTime [ cpuNum ] = Long . parseLong ( splits [ 1 ] ) ; sysTime [ cpuNum ] = Long . parseLong ( splits [ 2 ] ) ; } return new CpuAcctMetric ( usrTime , sysTime ) ; }
Private because it requires a specific format and cant take a generic list of strings
18,778
public CpuAcctMetric snapshot ( ) { final File cpuacct ; try { cpuacct = new File ( cgroupDiscoverer . discover ( CGROUP ) . toFile ( ) , CGROUP_ACCT_FILE ) ; } catch ( RuntimeException re ) { LOG . error ( re , "Unable to fetch snapshot" ) ; return new CpuAcctMetric ( new long [ 0 ] , new long [ 0 ] ) ; } try { return parse ( Files . readAllLines ( cpuacct . toPath ( ) , StandardCharsets . UTF_8 ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Take a snapshot of the existing data .
18,779
@ Path ( "/isLeader" ) @ Produces ( MediaType . APPLICATION_JSON ) public Response isLeader ( ) { final boolean leading = coordinator . isLeader ( ) ; final Map < String , Boolean > response = ImmutableMap . of ( "leader" , leading ) ; if ( leading ) { return Response . ok ( response ) . build ( ) ; } else { return Response . status ( Response . Status . NOT_FOUND ) . entity ( response ) . build ( ) ; } }
This is an unsecured endpoint defined as such in UNSECURED_PATHS in CoordinatorJettyServerInitializer
18,780
@ Path ( "/mode" ) @ Produces ( MediaType . APPLICATION_JSON ) public Response getMode ( final HttpServletRequest req ) { IndexTaskUtils . datasourceAuthorizationCheck ( req , Action . READ , getDataSource ( ) , authorizerMapper ) ; return Response . ok ( isParallelMode ( ) ? "parallel" : "sequential" ) . build ( ) ; }
External APIs to get running status
18,781
private void serverInventoryInitialized ( ) { long start = System . currentTimeMillis ( ) ; long serverSyncWaitTimeout = config . getServerTimeout ( ) + 2 * ChangeRequestHttpSyncer . HTTP_TIMEOUT_EXTRA_MS ; List < DruidServerHolder > uninitializedServers = new ArrayList < > ( ) ; for ( DruidServerHolder server : servers . values ( ) ) { if ( ! server . isSyncedSuccessfullyAtleastOnce ( ) ) { uninitializedServers . add ( server ) ; } } while ( ! uninitializedServers . isEmpty ( ) && ( ( System . currentTimeMillis ( ) - start ) < serverSyncWaitTimeout ) ) { try { Thread . sleep ( 5000 ) ; } catch ( InterruptedException ex ) { throw new RE ( ex , "Interrupted while waiting for queryable server initial successful sync." ) ; } log . info ( "Checking whether all servers have been synced at least once yet...." ) ; Iterator < DruidServerHolder > iter = uninitializedServers . iterator ( ) ; while ( iter . hasNext ( ) ) { if ( iter . next ( ) . isSyncedSuccessfullyAtleastOnce ( ) ) { iter . remove ( ) ; } } } if ( uninitializedServers . isEmpty ( ) ) { log . info ( "All servers have been synced successfully at least once." ) ; } else { for ( DruidServerHolder server : uninitializedServers ) { log . warn ( "Server[%s] might not yet be synced successfully. We will continue to retry that in the background." , server . druidServer . getName ( ) ) ; } } log . info ( "Calling SegmentCallback.segmentViewInitialized() for all callbacks." ) ; runSegmentCallbacks ( new Function < SegmentCallback , CallbackAction > ( ) { public CallbackAction apply ( SegmentCallback input ) { return input . segmentViewInitialized ( ) ; } } ) ; }
segmentViewInitialized on all registered segment callbacks .
18,782
public static RelDataType createSqlType ( final RelDataTypeFactory typeFactory , final SqlTypeName typeName ) { return createSqlTypeWithNullability ( typeFactory , typeName , false ) ; }
Like RelDataTypeFactory . createSqlType but creates types that align best with how Druid represents them .
18,783
public static RelDataType createSqlTypeWithNullability ( final RelDataTypeFactory typeFactory , final SqlTypeName typeName , final boolean nullable ) { final RelDataType dataType ; switch ( typeName ) { case TIMESTAMP : dataType = typeFactory . createSqlType ( typeName , 3 ) ; break ; case CHAR : case VARCHAR : dataType = typeFactory . createTypeWithCharsetAndCollation ( typeFactory . createSqlType ( typeName ) , Calcites . defaultCharset ( ) , SqlCollation . IMPLICIT ) ; break ; default : dataType = typeFactory . createSqlType ( typeName ) ; } return typeFactory . createTypeWithNullability ( dataType , nullable ) ; }
Like RelDataTypeFactory . createSqlTypeWithNullability but creates types that align best with how Druid represents them .
18,784
public static long jodaToCalciteTimestamp ( final DateTime dateTime , final DateTimeZone timeZone ) { return dateTime . withZone ( timeZone ) . withZoneRetainFields ( DateTimeZone . UTC ) . getMillis ( ) ; }
Calcite expects TIMESTAMP types to be an instant that has the expected local time fields if printed as UTC .
18,785
public static int jodaToCalciteDate ( final DateTime dateTime , final DateTimeZone timeZone ) { final DateTime date = dateTime . withZone ( timeZone ) . dayOfMonth ( ) . roundFloorCopy ( ) ; return Days . daysBetween ( DateTimes . EPOCH , date . withZoneRetainFields ( DateTimeZone . UTC ) ) . getDays ( ) ; }
Calcite expects DATE types to be number of days from the epoch to the UTC date matching the local time fields .
18,786
public static TimestampString jodaToCalciteTimestampString ( final DateTime dateTime , final DateTimeZone timeZone ) { String timestampString = TRAILING_ZEROS . matcher ( CALCITE_TIMESTAMP_PRINTER . print ( dateTime . withZone ( timeZone ) ) ) . replaceAll ( "" ) ; return new TimestampString ( timestampString ) ; }
Calcite expects TIMESTAMP literals to be represented by TimestampStrings in the local time zone .
18,787
public static TimeString jodaToCalciteTimeString ( final DateTime dateTime , final DateTimeZone timeZone ) { String timeString = TRAILING_ZEROS . matcher ( CALCITE_TIME_PRINTER . print ( dateTime . withZone ( timeZone ) ) ) . replaceAll ( "" ) ; return new TimeString ( timeString ) ; }
Calcite expects TIME literals to be represented by TimeStrings in the local time zone .
18,788
public static DateString jodaToCalciteDateString ( final DateTime dateTime , final DateTimeZone timeZone ) { return new DateString ( CALCITE_DATE_PRINTER . print ( dateTime . withZone ( timeZone ) ) ) ; }
Calcite expects DATE literals to be represented by DateStrings in the local time zone .
18,789
public static FloatsColumn create ( ColumnarFloats column , ImmutableBitmap nullValueBitmap ) { if ( nullValueBitmap . isEmpty ( ) ) { return new FloatsColumn ( column ) ; } else { return new FloatsColumnWithNulls ( column , nullValueBitmap ) ; } }
Factory method to create FloatsColumn .
18,790
private Supplier < DruidLongPredicate > getLongPredicateSupplier ( ) { return new Supplier < DruidLongPredicate > ( ) { private final Object initLock = new Object ( ) ; private DruidLongPredicate predicate ; private void initLongValues ( ) { if ( predicate != null ) { return ; } synchronized ( initLock ) { if ( predicate != null ) { return ; } LongArrayList longs = new LongArrayList ( values . size ( ) ) ; for ( String value : values ) { final Long longValue = DimensionHandlerUtils . getExactLongFromDecimalString ( value ) ; if ( longValue != null ) { longs . add ( longValue ) ; } } if ( longs . size ( ) > NUMERIC_HASHING_THRESHOLD ) { final LongOpenHashSet longHashSet = new LongOpenHashSet ( longs ) ; predicate = input -> longHashSet . contains ( input ) ; } else { final long [ ] longArray = longs . toLongArray ( ) ; Arrays . sort ( longArray ) ; predicate = input -> Arrays . binarySearch ( longArray , input ) >= 0 ; } } } public DruidLongPredicate get ( ) { initLongValues ( ) ; return predicate ; } } ; }
This supplier must be thread - safe since this DimFilter will be accessed in the query runners .
18,791
public static void scheduleWithFixedDelay ( final ScheduledExecutorService exec , final Duration initialDelay , final Duration delay , final Runnable runnable ) { scheduleWithFixedDelay ( exec , initialDelay , delay , new Callable < Signal > ( ) { public Signal call ( ) { runnable . run ( ) ; return Signal . REPEAT ; } } ) ; }
Run runnable repeatedly with the given delay between calls after the given initial delay . Exceptions are caught and logged as errors .
18,792
public static void scheduleWithFixedDelay ( ScheduledExecutorService exec , Duration delay , Callable < Signal > callable ) { scheduleWithFixedDelay ( exec , delay , delay , callable ) ; }
Run callable repeatedly with the given delay between calls after the given initial delay until it returns Signal . STOP . Exceptions are caught and logged as errors .
18,793
public static void scheduleAtFixedRate ( final ScheduledExecutorService exec , final Duration initialDelay , final Duration period , final Runnable runnable ) { scheduleAtFixedRate ( exec , initialDelay , period , new Callable < Signal > ( ) { public Signal call ( ) { runnable . run ( ) ; return Signal . REPEAT ; } } ) ; }
Run runnable once every period after the given initial delay . Exceptions are caught and logged as errors .
18,794
public static String concatenateForRewrite ( final String base , final String encodedPath , final String encodedQueryString ) { final StringBuilder url = new StringBuilder ( base ) . append ( encodedPath ) ; if ( encodedQueryString != null ) { url . append ( "?" ) . append ( encodedQueryString ) ; } return url . toString ( ) ; }
Concatenate URI parts in a way that is useful for proxy servlets .
18,795
public void stop ( ) { giant . lock ( ) ; try { tasks . clear ( ) ; taskFutures . clear ( ) ; active = false ; managerExec . shutdownNow ( ) ; storageSyncExec . shutdownNow ( ) ; managementMayBeNecessary . signalAll ( ) ; } finally { giant . unlock ( ) ; } }
Shuts down the queue .
18,796
private void manage ( ) throws InterruptedException { log . info ( "Beginning management in %s." , config . getStartDelay ( ) ) ; Thread . sleep ( config . getStartDelay ( ) . getMillis ( ) ) ; taskRunner . restore ( ) ; while ( active ) { giant . lock ( ) ; try { final Map < String , ListenableFuture < TaskStatus > > runnerTaskFutures = new HashMap < > ( ) ; for ( final TaskRunnerWorkItem workItem : taskRunner . getKnownTasks ( ) ) { runnerTaskFutures . put ( workItem . getTaskId ( ) , workItem . getResult ( ) ) ; } for ( final Task task : ImmutableList . copyOf ( tasks ) ) { if ( ! taskFutures . containsKey ( task . getId ( ) ) ) { final ListenableFuture < TaskStatus > runnerTaskFuture ; if ( runnerTaskFutures . containsKey ( task . getId ( ) ) ) { runnerTaskFuture = runnerTaskFutures . get ( task . getId ( ) ) ; } else { final boolean taskIsReady ; try { taskIsReady = task . isReady ( taskActionClientFactory . create ( task ) ) ; } catch ( Exception e ) { log . warn ( e , "Exception thrown during isReady for task: %s" , task . getId ( ) ) ; notifyStatus ( task , TaskStatus . failure ( task . getId ( ) ) , "failed because of exception[%s]" , e . getClass ( ) ) ; continue ; } if ( taskIsReady ) { log . info ( "Asking taskRunner to run: %s" , task . getId ( ) ) ; runnerTaskFuture = taskRunner . run ( task ) ; } else { continue ; } } taskFutures . put ( task . getId ( ) , attachCallbacks ( task , runnerTaskFuture ) ) ; } else if ( isTaskPending ( task ) ) { taskRunner . run ( task ) ; } } final Set < String > tasksToKill = Sets . difference ( runnerTaskFutures . keySet ( ) , ImmutableSet . copyOf ( Lists . transform ( tasks , new Function < Task , Object > ( ) { public String apply ( Task task ) { return task . getId ( ) ; } } ) ) ) ; if ( ! tasksToKill . isEmpty ( ) ) { log . info ( "Asking taskRunner to clean up %,d tasks." , tasksToKill . size ( ) ) ; for ( final String taskId : tasksToKill ) { try { taskRunner . shutdown ( taskId , "task is not in runnerTaskFutures[%s]" , runnerTaskFutures . keySet ( ) ) ; } catch ( Exception e ) { log . warn ( e , "TaskRunner failed to clean up task: %s" , taskId ) ; } } } managementMayBeNecessary . awaitNanos ( MANAGEMENT_WAIT_TIMEOUT_NANOS ) ; } finally { giant . unlock ( ) ; } } }
Main task runner management loop . Meant to run forever or at least until we re stopped .
18,797
public boolean add ( final Task task ) throws EntryExistsException { if ( taskStorage . getTask ( task . getId ( ) ) . isPresent ( ) ) { throw new EntryExistsException ( StringUtils . format ( "Task %s is already exists" , task . getId ( ) ) ) ; } giant . lock ( ) ; try { Preconditions . checkState ( active , "Queue is not active!" ) ; Preconditions . checkNotNull ( task , "task" ) ; Preconditions . checkState ( tasks . size ( ) < config . getMaxSize ( ) , "Too many tasks (max = %,d)" , config . getMaxSize ( ) ) ; taskStorage . insert ( task , TaskStatus . running ( task . getId ( ) ) ) ; addTaskInternal ( task ) ; managementMayBeNecessary . signalAll ( ) ; return true ; } finally { giant . unlock ( ) ; } }
Adds some work to the queue and the underlying task storage facility with a generic running status .
18,798
public void shutdown ( final String taskId , String reasonFormat , Object ... args ) { giant . lock ( ) ; try { Preconditions . checkNotNull ( taskId , "taskId" ) ; for ( final Task task : tasks ) { if ( task . getId ( ) . equals ( taskId ) ) { notifyStatus ( task , TaskStatus . failure ( taskId ) , reasonFormat , args ) ; break ; } } } finally { giant . unlock ( ) ; } }
Shuts down a task if it has not yet finished .
18,799
private void notifyStatus ( final Task task , final TaskStatus taskStatus , String reasonFormat , Object ... args ) { giant . lock ( ) ; try { Preconditions . checkNotNull ( task , "task" ) ; Preconditions . checkNotNull ( taskStatus , "status" ) ; Preconditions . checkState ( active , "Queue is not active!" ) ; Preconditions . checkArgument ( task . getId ( ) . equals ( taskStatus . getId ( ) ) , "Mismatching task ids[%s/%s]" , task . getId ( ) , taskStatus . getId ( ) ) ; try { taskRunner . shutdown ( task . getId ( ) , reasonFormat , args ) ; } catch ( Exception e ) { log . warn ( e , "TaskRunner failed to cleanup task after completion: %s" , task . getId ( ) ) ; } int removed = 0 ; for ( int i = tasks . size ( ) - 1 ; i >= 0 ; i -- ) { if ( tasks . get ( i ) . getId ( ) . equals ( task . getId ( ) ) ) { removed ++ ; removeTaskInternal ( tasks . get ( i ) ) ; break ; } } if ( removed == 0 ) { log . warn ( "Unknown task completed: %s" , task . getId ( ) ) ; } else if ( removed > 1 ) { log . makeAlert ( "Removed multiple copies of task" ) . addData ( "count" , removed ) . addData ( "task" , task . getId ( ) ) . emit ( ) ; } taskFutures . remove ( task . getId ( ) ) ; if ( removed > 0 ) { try { final Optional < TaskStatus > previousStatus = taskStorage . getStatus ( task . getId ( ) ) ; if ( ! previousStatus . isPresent ( ) || ! previousStatus . get ( ) . isRunnable ( ) ) { log . makeAlert ( "Ignoring notification for already-complete task" ) . addData ( "task" , task . getId ( ) ) . emit ( ) ; } else { taskStorage . setStatus ( taskStatus ) ; log . info ( "Task done: %s" , task ) ; managementMayBeNecessary . signalAll ( ) ; } } catch ( Exception e ) { log . makeAlert ( e , "Failed to persist status for task" ) . addData ( "task" , task . getId ( ) ) . addData ( "statusCode" , taskStatus . getStatusCode ( ) ) . emit ( ) ; } } } finally { giant . unlock ( ) ; } }
Notify this queue that some task has an updated status . If this update is valid the status will be persisted in the task storage facility . If the status is a completed status the task will be unlocked and no further updates will be accepted .