idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
23,000
synchronized void refresh ( RollingSegmentHandle source ) { Preconditions . checkArgument ( source . getSegmentName ( ) . equals ( this . getSegmentName ( ) ) , "SegmentName mismatch." ) ; if ( this . readOnly == source . readOnly ) { this . headerHandle = source . headerHandle ; } this . segmentChunks = new ArrayList < > ( source . chunks ( ) ) ; setHeaderLength ( source . getHeaderLength ( ) ) ; if ( source . isSealed ( ) ) { markSealed ( ) ; } if ( source . isDeleted ( ) ) { markDeleted ( ) ; } }
Updates the contents of this handle with information from the given one .
23,001
synchronized void markSealed ( ) { if ( ! this . sealed ) { this . sealed = true ; this . segmentChunks = Collections . unmodifiableList ( this . segmentChunks ) ; this . activeChunkHandle = null ; } }
Records the fact that the Segment represented by this Handle has been sealed .
23,002
synchronized void addChunk ( SegmentChunk segmentChunk , SegmentHandle activeChunkHandle ) { Preconditions . checkState ( ! this . sealed , "Cannot add SegmentChunks for a Sealed Handle." ) ; if ( this . segmentChunks . size ( ) > 0 ) { long expectedOffset = this . segmentChunks . get ( this . segmentChunks . size ( ) - 1 ) . getLastOffset ( ) ; Preconditions . checkArgument ( segmentChunk . getStartOffset ( ) == expectedOffset , "Invalid SegmentChunk StartOffset. Expected %s, given %s." , expectedOffset , segmentChunk . getStartOffset ( ) ) ; } Preconditions . checkNotNull ( activeChunkHandle , "activeChunkHandle" ) ; Preconditions . checkArgument ( ! activeChunkHandle . isReadOnly ( ) , "Active SegmentChunk handle cannot be readonly." ) ; Preconditions . checkArgument ( activeChunkHandle . getSegmentName ( ) . equals ( segmentChunk . getName ( ) ) , "Active SegmentChunk handle must be for the last SegmentChunk." ) ; this . activeChunkHandle = activeChunkHandle ; this . segmentChunks . add ( segmentChunk ) ; }
Adds a new SegmentChunk .
23,003
synchronized void addChunks ( List < SegmentChunk > segmentChunks ) { Preconditions . checkState ( ! this . sealed , "Cannot add SegmentChunks for a Sealed Handle." ) ; long expectedOffset = 0 ; if ( this . segmentChunks . size ( ) > 0 ) { expectedOffset = this . segmentChunks . get ( this . segmentChunks . size ( ) - 1 ) . getLastOffset ( ) ; } else if ( segmentChunks . size ( ) > 0 ) { expectedOffset = segmentChunks . get ( 0 ) . getStartOffset ( ) ; } for ( SegmentChunk s : segmentChunks ) { Preconditions . checkArgument ( s . getStartOffset ( ) == expectedOffset , "Invalid SegmentChunk StartOffset. Expected %s, given %s." , expectedOffset , s . getStartOffset ( ) ) ; expectedOffset += s . getLength ( ) ; } this . segmentChunks . addAll ( segmentChunks ) ; this . activeChunkHandle = null ; }
Adds multiple SegmentChunks .
23,004
synchronized void setActiveChunkHandle ( SegmentHandle handle ) { Preconditions . checkArgument ( handle == null || ! handle . isReadOnly ( ) , "Active SegmentChunk handle cannot be readonly." ) ; SegmentChunk last = lastChunk ( ) ; Preconditions . checkState ( last != null , "Cannot set an Active SegmentChunk handle when there are no SegmentChunks." ) ; Preconditions . checkArgument ( handle == null || handle . getSegmentName ( ) . equals ( last . getName ( ) ) , "Active SegmentChunk handle must be for the last SegmentChunk." ) ; this . activeChunkHandle = handle ; }
Sets the Active SegmentChunk handle .
23,005
< T > CompletableFuture < T > executeIfEmpty ( DirectSegmentAccess segment , Supplier < CompletableFuture < T > > action , TimeoutTimer timer ) { return this . recoveryTracker . waitIfNeeded ( segment , ( ) -> this . conditionalUpdateProcessor . addWithFilter ( conditionKey -> conditionKey . getKey ( ) == segment . getSegmentId ( ) , ( ) -> isTableSegmentEmpty ( segment , timer ) . thenCompose ( isEmpty -> { if ( isEmpty ) { return action . get ( ) ; } else { return Futures . failedFuture ( new TableSegmentNotEmptyException ( segment . getInfo ( ) . getName ( ) ) ) ; } } ) ) ) ; }
Executes the given action only if the Segment is empty .
23,006
CompletableFuture < Map < UUID , Long > > getBucketOffsets ( DirectSegmentAccess segment , Collection < UUID > hashes , TimeoutTimer timer ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; if ( hashes . isEmpty ( ) ) { return CompletableFuture . completedFuture ( Collections . emptyMap ( ) ) ; } val result = new HashMap < UUID , Long > ( ) ; val toLookup = new ArrayList < UUID > ( ) ; for ( UUID hash : hashes ) { if ( result . containsKey ( hash ) ) { continue ; } val existingValue = this . cache . get ( segment . getSegmentId ( ) , hash ) ; if ( existingValue == null ) { result . put ( hash , TableKey . NOT_EXISTS ) ; toLookup . add ( hash ) ; } else if ( ! existingValue . isRemoval ( ) ) { result . put ( hash , existingValue . getSegmentOffset ( ) ) ; } else { long backpointerOffset = this . cache . getBackpointer ( segment . getSegmentId ( ) , existingValue . getSegmentOffset ( ) ) ; if ( backpointerOffset < 0 ) { result . put ( hash , TableKey . NOT_EXISTS ) ; toLookup . add ( hash ) ; } else { result . put ( hash , existingValue . getSegmentOffset ( ) ) ; } } } if ( toLookup . isEmpty ( ) ) { return CompletableFuture . completedFuture ( result ) ; } else { return this . recoveryTracker . waitIfNeeded ( segment , ( ) -> getBucketOffsetFromSegment ( segment , result , toLookup , timer ) ) ; } }
Finds the Last Bucket Offsets for the given KeyHashes .
23,007
CompletableFuture < List < Long > > update ( DirectSegmentAccess segment , TableKeyBatch batch , Supplier < CompletableFuture < Long > > persist , TimeoutTimer timer ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; if ( batch . isConditional ( ) ) { val keys = batch . getVersionedItems ( ) . stream ( ) . map ( item -> Maps . immutableEntry ( segment . getSegmentId ( ) , item . getHash ( ) ) ) . collect ( Collectors . toList ( ) ) ; return this . conditionalUpdateProcessor . add ( keys , ( ) -> validateConditionalUpdate ( segment , batch , timer ) . thenComposeAsync ( v -> persist . get ( ) , this . executor ) . thenApplyAsync ( batchOffset -> updateCache ( segment , batch , batchOffset ) , this . executor ) ) ; } else { return persist . get ( ) . thenApplyAsync ( batchOffset -> updateCache ( segment , batch , batchOffset ) , this . executor ) ; } }
Performs a Batch Update or Removal .
23,008
private CompletableFuture < Void > validateConditionalUpdate ( DirectSegmentAccess segment , TableKeyBatch batch , TimeoutTimer timer ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; List < UUID > hashes = batch . getVersionedItems ( ) . stream ( ) . map ( TableKeyBatch . Item :: getHash ) . collect ( Collectors . toList ( ) ) ; CompletableFuture < Void > result = getBucketOffsets ( segment , hashes , timer ) . thenAccept ( offsets -> validateConditionalUpdate ( batch . getVersionedItems ( ) , offsets , segment . getInfo ( ) . getName ( ) ) ) ; return Futures . exceptionallyCompose ( result , ex -> { ex = Exceptions . unwrap ( ex ) ; if ( ex instanceof BadKeyVersionException ) { return validateConditionalUpdateFailures ( segment , ( ( BadKeyVersionException ) ex ) . getExpectedVersions ( ) , timer ) ; } return Futures . failedFuture ( ex ) ; } ) ; }
Validates all the conditional updates specified in the given TableKeyBatch .
23,009
@ SneakyThrows ( ConditionalTableUpdateException . class ) private void validateConditionalUpdate ( List < TableKeyBatch . Item > items , Map < UUID , Long > bucketOffsets , String segmentName ) { val badKeyVersions = new HashMap < TableKey , Long > ( ) ; for ( val item : items ) { TableKey key = item . getKey ( ) ; Long bucketOffset = bucketOffsets . get ( item . getHash ( ) ) ; assert key . hasVersion ( ) : "validateConditionalUpdate for TableKey with no compare version" ; if ( bucketOffset == TableKey . NOT_EXISTS ) { if ( key . getVersion ( ) != TableKey . NOT_EXISTS ) { throw new KeyNotExistsException ( segmentName , key . getKey ( ) ) ; } } else if ( bucketOffset != key . getVersion ( ) ) { badKeyVersions . put ( key , bucketOffset ) ; } } if ( ! badKeyVersions . isEmpty ( ) ) { throw new BadKeyVersionException ( segmentName , badKeyVersions ) ; } }
Validates a list of UpdateBatchItems against their actual Table Bucket offsets .
23,010
< T extends WriterSegmentProcessor > long getHighestCommittedSequenceNumber ( Iterable < T > processors ) { long lowestUncommittedSeqNo = Long . MAX_VALUE ; for ( WriterSegmentProcessor a : processors ) { if ( ! a . isClosed ( ) ) { long firstSeqNo = a . getLowestUncommittedSequenceNumber ( ) ; if ( firstSeqNo >= 0 ) { lowestUncommittedSeqNo = Math . min ( lowestUncommittedSeqNo , firstSeqNo - 1 ) ; } } } lowestUncommittedSeqNo = Math . min ( lowestUncommittedSeqNo , this . state . getLastReadSequenceNumber ( ) ) ; return lowestUncommittedSeqNo ; }
Determines the largest Sequence Number that can be safely truncated from the Writer s Data Source . All operations up to and including the one for this Sequence Number have been successfully committed to External Storage .
23,011
private void checkAssignment ( ) { long traceId = LoggerHelpers . traceEnter ( log , "checkAssignment" ) ; try { Exceptions . checkNotClosed ( closed . get ( ) , this ) ; Set < Integer > desiredList = getDesiredContainerList ( ) ; if ( desiredList != null ) { Collection < Integer > runningContainers = new HashSet < > ( this . handles . keySet ( ) ) ; Collection < Integer > containersPendingTasks = new HashSet < > ( this . pendingTasks ) ; Collection < Integer > containersToBeStarted = CollectionHelpers . filterOut ( desiredList , runningContainers ) ; containersToBeStarted = CollectionHelpers . filterOut ( containersToBeStarted , containersPendingTasks ) ; Collection < Integer > containersToBeStopped = CollectionHelpers . filterOut ( runningContainers , desiredList ) ; containersToBeStopped = CollectionHelpers . filterOut ( containersToBeStopped , containersPendingTasks ) ; log . info ( "Container Changes: Desired = {}, Current = {}, PendingTasks = {}, ToStart = {}, ToStop = {}." , desiredList , runningContainers , containersPendingTasks , containersToBeStarted , containersToBeStopped ) ; containersToBeStarted . forEach ( this :: startContainer ) ; containersToBeStopped . forEach ( this :: stopContainer ) ; } else { log . warn ( "No segment container assignments found" ) ; } } catch ( Throwable e ) { log . warn ( "Failed to monitor the segmentcontainer assignment: " , e ) ; } finally { LoggerHelpers . traceLeave ( log , "checkAssignment" , traceId ) ; } }
The container assignment monitor . This method will fetch the current owned containers for this host and ensures that the local containers state reflects this .
23,012
private CompletableFuture < Void > stopContainer ( int containerId ) { log . info ( "Stopping Container {}." , containerId ) ; ContainerHandle handle = handles . get ( containerId ) ; if ( handle == null ) { log . warn ( "Container {} handle is null, container is pending start or already unregistered." , containerId ) ; return null ; } else { this . pendingTasks . add ( containerId ) ; try { return registry . stopContainer ( handle , CLOSE_TIMEOUT_PER_CONTAINER ) . whenComplete ( ( aVoid , throwable ) -> { if ( throwable != null ) { log . warn ( "Stopping container {} failed: {}" , containerId , throwable ) ; } try { unregisterHandle ( containerId ) ; } finally { this . pendingTasks . remove ( containerId ) ; } } ) ; } catch ( Throwable e ) { this . pendingTasks . remove ( containerId ) ; throw e ; } } }
Stop the container given its id .
23,013
public static < T > Property < T > named ( String name , T defaultValue ) { return new Property < > ( name , defaultValue ) ; }
Creates a new instance of the Property class with the given default value .
23,014
public void start ( ) throws Exception { if ( isInProcZK ) { zkUrl = "localhost:" + zkPort ; startLocalZK ( ) ; } else { URI zkUri = new URI ( "temp://" + zkUrl ) ; zkHost = zkUri . getHost ( ) ; zkPort = zkUri . getPort ( ) ; } if ( isInProcHDFS ) { startLocalHDFS ( ) ; hdfsUrl = String . format ( "hdfs://localhost:%d/" , localHdfs . getNameNodePort ( ) ) ; } cleanUpZK ( ) ; if ( isInProcController ) { startLocalControllers ( ) ; } if ( isInProcSegmentStore ) { nodeServiceStarter = new ServiceStarter [ segmentStoreCount ] ; startLocalSegmentStores ( ) ; } }
Kicks off the cluster creation . right now it can be done only once in lifetime of a process .
23,015
private void startLocalSegmentStore ( int segmentStoreId ) throws Exception { Properties authProps = new Properties ( ) ; authProps . setProperty ( "pravega.client.auth.method" , "Default" ) ; authProps . setProperty ( "pravega.client.auth.userName" , "arvind" ) ; authProps . setProperty ( "pravega.client.auth.password" , "1111_aaaa" ) ; ServiceBuilderConfig . Builder configBuilder = ServiceBuilderConfig . builder ( ) . include ( System . getProperties ( ) ) . include ( authProps ) . include ( ServiceConfig . builder ( ) . with ( ServiceConfig . CONTAINER_COUNT , containerCount ) . with ( ServiceConfig . THREAD_POOL_SIZE , THREADPOOL_SIZE ) . with ( ServiceConfig . ZK_URL , "localhost:" + zkPort ) . with ( ServiceConfig . SECURE_ZK , this . secureZK ) . with ( ServiceConfig . ZK_TRUSTSTORE_LOCATION , jksTrustFile ) . with ( ServiceConfig . ZK_TRUST_STORE_PASSWORD_PATH , keyPasswordFile ) . with ( ServiceConfig . LISTENING_PORT , this . segmentStorePorts [ segmentStoreId ] ) . with ( ServiceConfig . CLUSTER_NAME , this . clusterName ) . with ( ServiceConfig . ENABLE_TLS , this . enableTls ) . with ( ServiceConfig . KEY_FILE , this . keyFile ) . with ( ServiceConfig . CERT_FILE , this . certFile ) . with ( ServiceConfig . CACHE_POLICY_MAX_TIME , 60 ) . with ( ServiceConfig . CACHE_POLICY_MAX_SIZE , 128 * 1024 * 1024L ) . with ( ServiceConfig . DATALOG_IMPLEMENTATION , isInMemStorage ? ServiceConfig . DataLogType . INMEMORY : ServiceConfig . DataLogType . BOOKKEEPER ) . with ( ServiceConfig . STORAGE_IMPLEMENTATION , isInMemStorage ? ServiceConfig . StorageType . INMEMORY : ServiceConfig . StorageType . FILESYSTEM ) ) . include ( DurableLogConfig . builder ( ) . with ( DurableLogConfig . CHECKPOINT_COMMIT_COUNT , 100 ) . with ( DurableLogConfig . CHECKPOINT_MIN_COMMIT_COUNT , 100 ) . with ( DurableLogConfig . CHECKPOINT_TOTAL_COMMIT_LENGTH , 100 * 1024 * 1024L ) ) . include ( AutoScalerConfig . builder ( ) . with ( AutoScalerConfig . CONTROLLER_URI , ( this . enableTls ? "tls" : "tcp" ) + "://localhost:" + controllerPorts [ 0 ] ) . with ( AutoScalerConfig . TOKEN_SIGNING_KEY , "secret" ) . with ( AutoScalerConfig . AUTH_ENABLED , this . enableAuth ) . with ( AutoScalerConfig . TLS_ENABLED , this . enableTls ) . with ( AutoScalerConfig . TLS_CERT_FILE , this . certFile ) . with ( AutoScalerConfig . VALIDATE_HOSTNAME , false ) ) . include ( MetricsConfig . builder ( ) . with ( MetricsConfig . ENABLE_STATISTICS , enableMetrics ) ) ; nodeServiceStarter [ segmentStoreId ] = new ServiceStarter ( configBuilder . build ( ) ) ; nodeServiceStarter [ segmentStoreId ] . start ( ) ; }
Starts a SegmentStore with the given id . This is re - entrant . Eventually this will allow starting and stopping of individual SegmentStore instances . This is not possible right now .
23,016
int commit ( long upToTransactionId ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "commit" , upToTransactionId ) ; List < Long > commits = new ArrayList < > ( ) ; while ( ! this . transactions . isEmpty ( ) && this . transactions . peekFirst ( ) . getTransactionId ( ) <= upToTransactionId ) { ContainerMetadataUpdateTransaction txn = this . transactions . removeFirst ( ) ; txn . seal ( ) ; txn . commit ( this . metadata ) ; commits . add ( txn . getTransactionId ( ) ) ; } if ( commits . size ( ) > 0 && ! this . transactions . isEmpty ( ) ) { this . transactions . peekFirst ( ) . rebase ( this . metadata ) ; } LoggerHelpers . traceLeave ( log , this . traceObjectId , "commit" , traceId , commits ) ; return commits . size ( ) ; }
Commits all outstanding changes to the base Container Metadata up to and including the one for the given UpdateTransaction .
23,017
void rollback ( long fromTransactionId ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "rollback" , fromTransactionId ) ; List < Long > rolledBack = new ArrayList < > ( ) ; while ( ! this . transactions . isEmpty ( ) && this . transactions . peekLast ( ) . getTransactionId ( ) >= fromTransactionId ) { ContainerMetadataUpdateTransaction txn = this . transactions . removeLast ( ) ; txn . seal ( ) ; rolledBack . add ( txn . getTransactionId ( ) ) ; } LoggerHelpers . traceLeave ( log , this . traceObjectId , "rollback" , traceId , rolledBack ) ; }
Discards any outstanding changes starting at the given UpdateTransaction forward .
23,018
public CompletableFuture < Void > process ( Operation operation ) { CompletableFuture < Void > result = new CompletableFuture < > ( ) ; if ( ! isRunning ( ) ) { result . completeExceptionally ( new IllegalContainerStateException ( "OperationProcessor is not running." ) ) ; } else { log . debug ( "{}: process {}." , this . traceObjectId , operation ) ; try { this . operationQueue . add ( new CompletableOperation ( operation , result ) ) ; } catch ( Throwable e ) { if ( Exceptions . mustRethrow ( e ) ) { throw e ; } result . completeExceptionally ( e ) ; } } return result ; }
Processes the given Operation . This method returns when the given Operation has been added to the internal queue .
23,019
private CompletableFuture < Void > throttle ( ) { val delay = new AtomicReference < ThrottlerCalculator . DelayResult > ( this . throttlerCalculator . getThrottlingDelay ( ) ) ; if ( ! delay . get ( ) . isMaximum ( ) ) { return throttleOnce ( delay . get ( ) . getDurationMillis ( ) , delay . get ( ) . isMaximum ( ) ) ; } else { return Futures . loop ( ( ) -> ! delay . get ( ) . isMaximum ( ) , ( ) -> throttleOnce ( delay . get ( ) . getDurationMillis ( ) , delay . get ( ) . isMaximum ( ) ) . thenRun ( ( ) -> delay . set ( this . throttlerCalculator . getThrottlingDelay ( ) ) ) , this . executor ) ; } }
region Queue Processing
23,020
private void closeQueue ( Throwable causingException ) { Collection < CompletableOperation > remainingOperations = this . operationQueue . close ( ) ; if ( remainingOperations != null && remainingOperations . size ( ) > 0 ) { Throwable failException = causingException != null ? causingException : new CancellationException ( ) ; cancelIncompleteOperations ( remainingOperations , failException ) ; } this . commitQueue . cancelPendingTake ( ) ; }
Closes the Operation Queue and fails all Operations in it with the given exception .
23,021
private void cancelIncompleteOperations ( Iterable < CompletableOperation > operations , Throwable failException ) { assert failException != null : "no exception to set" ; int cancelCount = 0 ; for ( CompletableOperation o : operations ) { if ( ! o . isDone ( ) ) { this . state . failOperation ( o , failException ) ; cancelCount ++ ; } } log . warn ( "{}: Cancelling {} operations with exception: {}." , this . traceObjectId , cancelCount , failException . toString ( ) ) ; }
Cancels those Operations in the given list that have not yet completed with the given exception .
23,022
protected CuratorFramework createZKClient ( ) { val serviceConfig = getServiceConfig ( ) ; CuratorFramework zkClient = CuratorFrameworkFactory . builder ( ) . connectString ( serviceConfig . getZkURL ( ) ) . namespace ( "pravega/" + serviceConfig . getClusterName ( ) ) . retryPolicy ( new ExponentialBackoffRetry ( serviceConfig . getZkRetrySleepMs ( ) , serviceConfig . getZkRetryCount ( ) ) ) . sessionTimeoutMs ( serviceConfig . getZkSessionTimeoutMs ( ) ) . build ( ) ; zkClient . start ( ) ; return zkClient ; }
Creates a new instance of the CuratorFramework class using configuration from the shared AdminCommandState .
23,023
public void takeLeadership ( CuratorFramework client ) throws Exception { log . info ( "Obtained leadership to monitor the Host to Segment Container Mapping" ) ; hostsChange . release ( ) ; pravegaServiceCluster = new ClusterZKImpl ( client , ClusterType . HOST ) ; pravegaServiceCluster . addListener ( ( type , host ) -> { switch ( type ) { case HOST_ADDED : case HOST_REMOVED : log . info ( "Received event: {} for host: {}. Wake up leader for rebalancing" , type , host ) ; hostsChange . release ( ) ; break ; case ERROR : log . info ( "Received error event when monitoring the pravega host cluster, ignoring..." ) ; break ; } } ) ; while ( true ) { try { if ( suspended . get ( ) ) { log . info ( "Monitor is suspended, waiting for notification to resume" ) ; suspendMonitor . acquire ( ) ; log . info ( "Resuming monitor" ) ; } hostsChange . acquire ( ) ; log . info ( "Received rebalance event" ) ; waitForRebalance ( ) ; hostsChange . drainPermits ( ) ; triggerRebalance ( ) ; } catch ( InterruptedException e ) { log . warn ( "Leadership interrupted, releasing monitor thread" ) ; pravegaServiceCluster . close ( ) ; throw e ; } catch ( Exception e ) { if ( ! suspended . get ( ) ) { log . warn ( "Failed to perform rebalancing, relinquishing leadership" ) ; pravegaServiceCluster . close ( ) ; throw e ; } } } }
This function is called when the current instance is made the leader . The leadership is relinquished when this function exits .
23,024
private void performNextWrite ( ) { Append append = getNextAppend ( ) ; if ( append == null ) { return ; } long traceId = LoggerHelpers . traceEnter ( log , "storeAppend" , append ) ; Timer timer = new Timer ( ) ; storeAppend ( append ) . whenComplete ( ( v , e ) -> { handleAppendResult ( append , e , timer ) ; LoggerHelpers . traceLeave ( log , "storeAppend" , traceId , v , e ) ; } ) . whenComplete ( ( v , e ) -> append . getData ( ) . release ( ) ) ; }
If there isn t already an append outstanding against the store write a new one . Appends are opportunistically batched here . i . e . If many are waiting they are combined into a single append and that is written .
23,025
private void pauseOrResumeReading ( ) { int bytesWaiting ; synchronized ( lock ) { bytesWaiting = waitingAppends . values ( ) . stream ( ) . mapToInt ( a -> a . getData ( ) . readableBytes ( ) ) . sum ( ) ; } if ( bytesWaiting > HIGH_WATER_MARK ) { log . debug ( "Pausing writing from connection {}" , connection ) ; connection . pauseReading ( ) ; } if ( bytesWaiting < LOW_WATER_MARK ) { log . trace ( "Resuming writing from connection {}" , connection ) ; connection . resumeReading ( ) ; } }
If there is too much data waiting throttle the producer by stopping consumption from the socket . If there is room for more data we resume consuming from the socket .
23,026
public void append ( Append append ) { log . trace ( "Processing append received from client {}" , append ) ; UUID id = append . getWriterId ( ) ; synchronized ( lock ) { Long lastEventNumber = latestEventNumbers . get ( Pair . of ( append . getSegment ( ) , id ) ) ; Preconditions . checkState ( lastEventNumber != null , "Data from unexpected connection: %s." , id ) ; Preconditions . checkState ( append . getEventNumber ( ) >= lastEventNumber , "Event was already appended." ) ; waitingAppends . put ( id , append ) ; } pauseOrResumeReading ( ) ; performNextWrite ( ) ; }
Append data to the store . Because ordering dictates that there only be one outstanding append from a given connection this is implemented by adding the append to a queue .
23,027
public String get ( Property < String > property ) throws ConfigurationException { return tryGet ( property , s -> s ) ; }
Gets the value of a String property .
23,028
public < T extends Enum < T > > T getEnum ( Property < T > property , Class < T > enumClass ) throws ConfigurationException { return tryGet ( property , s -> Enum . valueOf ( enumClass , s ) ) ; }
Gets the value of an Enumeration property .
23,029
public ClientConnection createFlow ( final Flow flow , final ReplyProcessor rp ) { Exceptions . checkNotClosed ( closed . get ( ) , this ) ; Preconditions . checkState ( ! disableFlow . get ( ) , "Ensure flows are enabled." ) ; log . info ( "Creating Flow {} for endpoint {}. The current Channel is {}." , flow . getFlowId ( ) , connectionName , channel . get ( ) ) ; if ( flowIdReplyProcessorMap . put ( flow . getFlowId ( ) , rp ) != null ) { throw new IllegalArgumentException ( "Multiple flows cannot be created with the same Flow id " + flow . getFlowId ( ) ) ; } return new ClientConnectionImpl ( connectionName , flow . getFlowId ( ) , batchSizeTracker , this ) ; }
Create a flow on existing connection .
23,030
public void closeFlow ( ClientConnection clientConnection ) { final ClientConnectionImpl clientConnectionImpl = ( ClientConnectionImpl ) clientConnection ; int flow = clientConnectionImpl . getFlowId ( ) ; log . info ( "Closing Flow {} for endpoint {}" , flow , clientConnectionImpl . getConnectionName ( ) ) ; flowIdReplyProcessorMap . remove ( flow ) ; }
Close a flow . This is invoked when the ClientConnection is closed .
23,031
void completeWhenRegistered ( final CompletableFuture < Void > future ) { Preconditions . checkNotNull ( future , "future" ) ; registeredFutureLatch . register ( future ) ; }
This function completes the input future when the channel is registered .
23,032
synchronized PageWrapper getRootPage ( ) { return this . pageByOffset . values ( ) . stream ( ) . filter ( page -> page . getParent ( ) == null ) . findFirst ( ) . orElse ( null ) ; }
Gets a pointer to the Root Page .
23,033
void initializeReader ( long initialAllocationDelay ) { boolean alreadyAdded = sync . updateState ( ( state , updates ) -> { if ( state . getSegments ( readerId ) == null ) { log . debug ( "Adding reader {} to reader grop. CurrentState is: {}" , readerId , state ) ; updates . add ( new AddReader ( readerId ) ) ; return false ; } else { return true ; } } ) ; if ( alreadyAdded ) { throw new IllegalStateException ( "The requested reader: " + readerId + " cannot be added to the group because it is already in the group. Perhaps close() was not called?" ) ; } long randomDelay = ( long ) ( Math . random ( ) * Math . min ( initialAllocationDelay , sync . getState ( ) . getConfig ( ) . getGroupRefreshTimeMillis ( ) ) ) ; acquireTimer . reset ( Duration . ofMillis ( initialAllocationDelay + randomDelay ) ) ; }
Add this reader to the reader group so that it is able to acquire segments
23,034
static void readerShutdown ( String readerId , Position lastPosition , StateSynchronizer < ReaderGroupState > sync ) { sync . updateState ( ( state , updates ) -> { Set < Segment > segments = state . getSegments ( readerId ) ; if ( segments == null ) { return ; } log . debug ( "Removing reader {} from reader grop. CurrentState is: {}" , readerId , state ) ; updates . add ( new RemoveReader ( readerId , lastPosition == null ? Collections . emptyMap ( ) : lastPosition . asImpl ( ) . getOwnedSegmentsWithOffsets ( ) ) ) ; } ) ; }
Shuts down a reader releasing all of its segments . The reader should cease all operations .
23,035
boolean handleEndOfSegment ( Segment segmentCompleted ) throws ReaderNotInReaderGroupException { final Map < Segment , List < Long > > segmentToPredecessor ; if ( sync . getState ( ) . getEndSegments ( ) . containsKey ( segmentCompleted ) ) { segmentToPredecessor = Collections . emptyMap ( ) ; } else { val successors = getAndHandleExceptions ( controller . getSuccessors ( segmentCompleted ) , RuntimeException :: new ) ; segmentToPredecessor = successors . getSegmentToPredecessor ( ) ; } AtomicBoolean reinitRequired = new AtomicBoolean ( false ) ; boolean result = sync . updateState ( ( state , updates ) -> { if ( ! state . isReaderOnline ( readerId ) ) { reinitRequired . set ( true ) ; } else { log . debug ( "Marking segment {} as completed in reader group. CurrentState is: {}" , segmentCompleted , state ) ; reinitRequired . set ( false ) ; if ( state . getCheckpointForReader ( readerId ) == null ) { updates . add ( new SegmentCompleted ( readerId , segmentCompleted , segmentToPredecessor ) ) ; return true ; } } return false ; } ) ; if ( reinitRequired . get ( ) ) { throw new ReaderNotInReaderGroupException ( readerId ) ; } acquireTimer . zero ( ) ; return result ; }
Handles a segment being completed by calling the controller to gather all successors to the completed segment . To ensure consistent checkpoints a segment cannot be released while a checkpoint for the reader is pending so it may or may not succeed .
23,036
Segment findSegmentToReleaseIfRequired ( ) { fetchUpdatesIfNeeded ( ) ; Segment segment = null ; synchronized ( decisionLock ) { if ( ! releaseTimer . hasRemaining ( ) && sync . getState ( ) . getCheckpointForReader ( readerId ) == null && doesReaderOwnTooManySegments ( sync . getState ( ) ) ) { segment = findSegmentToRelease ( ) ; if ( segment != null ) { releaseTimer . reset ( UPDATE_WINDOW ) ; acquireTimer . reset ( UPDATE_WINDOW ) ; } } } return segment ; }
If a segment should be released because the distribution of segments is imbalanced and this reader has not done so in a while this returns the segment that should be released .
23,037
private boolean doesReaderOwnTooManySegments ( ReaderGroupState state ) { Map < String , Double > sizesOfAssignemnts = state . getRelativeSizes ( ) ; Set < Segment > assignedSegments = state . getSegments ( readerId ) ; if ( sizesOfAssignemnts . isEmpty ( ) || assignedSegments == null || assignedSegments . size ( ) <= 1 ) { return false ; } double min = sizesOfAssignemnts . values ( ) . stream ( ) . min ( Double :: compareTo ) . get ( ) ; return sizesOfAssignemnts . get ( readerId ) > min + Math . max ( 1 , state . getNumberOfUnassignedSegments ( ) ) ; }
Returns true if this reader owns multiple segments and has more than a full segment more than the reader with the least assigned to it .
23,038
private Segment findSegmentToRelease ( ) { Set < Segment > segments = sync . getState ( ) . getSegments ( readerId ) ; return segments . stream ( ) . max ( ( s1 , s2 ) -> Double . compare ( hashHelper . hashToRange ( s1 . getScopedName ( ) ) , hashHelper . hashToRange ( s2 . getScopedName ( ) ) ) ) . orElse ( null ) ; }
Given a set of segments returns one to release . The one returned is arbitrary .
23,039
long getEndOffsetForSegment ( Segment segment ) { return Optional . ofNullable ( sync . getState ( ) . getEndSegments ( ) . get ( segment ) ) . orElse ( Long . MAX_VALUE ) ; }
Fetch the configured end offset for a configured segment . If end offset is not configured return Long . MAX_VALUE .
23,040
boolean releaseSegment ( Segment segment , long lastOffset , long timeLag ) throws ReaderNotInReaderGroupException { sync . updateState ( ( state , updates ) -> { Set < Segment > segments = state . getSegments ( readerId ) ; if ( segments != null && segments . contains ( segment ) && state . getCheckpointForReader ( readerId ) == null && doesReaderOwnTooManySegments ( state ) ) { updates . add ( new ReleaseSegment ( readerId , segment , lastOffset ) ) ; updates . add ( new UpdateDistanceToTail ( readerId , timeLag ) ) ; } } ) ; ReaderGroupState state = sync . getState ( ) ; releaseTimer . reset ( calculateReleaseTime ( readerId , state ) ) ; acquireTimer . reset ( calculateAcquireTime ( readerId , state ) ) ; if ( ! state . isReaderOnline ( readerId ) ) { throw new ReaderNotInReaderGroupException ( readerId ) ; } return ! state . getSegments ( readerId ) . contains ( segment ) ; }
Releases a segment to another reader . This reader should no longer read from the segment .
23,041
Map < Segment , Long > acquireNewSegmentsIfNeeded ( long timeLag ) throws ReaderNotInReaderGroupException { fetchUpdatesIfNeeded ( ) ; if ( shouldAcquireSegment ( ) ) { return acquireSegment ( timeLag ) ; } else { return Collections . emptyMap ( ) ; } }
If there are unassigned segments and this host has not acquired one in a while acquires them .
23,042
public static CompletableFuture < Void > startAsync ( Service service , Executor executor ) { Preconditions . checkState ( service . state ( ) == Service . State . NEW , "Service expected to be %s but was %s." , Service . State . NEW , service . state ( ) ) ; Preconditions . checkNotNull ( executor , "executor" ) ; CompletableFuture < Void > result = new CompletableFuture < > ( ) ; service . addListener ( new StartupListener ( result ) , executor ) ; service . startAsync ( ) ; return result ; }
Asynchronously starts a Service and returns a CompletableFuture that will indicate when it is running .
23,043
public static CompletableFuture < Void > stopAsync ( Service service , Executor executor ) { CompletableFuture < Void > result = new CompletableFuture < > ( ) ; onStop ( service , ( ) -> result . complete ( null ) , result :: completeExceptionally , executor ) ; service . stopAsync ( ) ; return result ; }
Asynchronously stops a Service and returns a CompletableFuture that will indicate when it is stopped .
23,044
public static void onStop ( Service service , Runnable terminatedCallback , Consumer < Throwable > failureCallback , Executor executor ) { ShutdownListener listener = new ShutdownListener ( terminatedCallback , failureCallback ) ; service . addListener ( listener , executor ) ; Service . State state = service . state ( ) ; if ( state == Service . State . FAILED ) { listener . failed ( Service . State . FAILED , service . failureCause ( ) ) ; } else if ( state == Service . State . TERMINATED ) { listener . terminated ( Service . State . TERMINATED ) ; } }
Attaches the given callbacks which will be invoked when the given Service enters a TERMINATED or FAILED state . The callbacks are optional and may be invoked synchronously if the Service is already in one of these states .
23,045
public static boolean isTerminating ( Service . State state ) { return state == Service . State . STOPPING || state == Service . State . TERMINATED || state == Service . State . FAILED ; }
Determines whether the given Service . State indicates the Service is either in the process of Stopping or already Terminated or Failed .
23,046
public void createTransaction ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( CREATE_TRANSACTION ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( CREATE_TRANSACTION , 1 , streamTags ( scope , streamName ) ) ; createTransactionLatency . reportSuccessValue ( latency . toMillis ( ) ) ; }
This method increments the global and Stream - related counters of created Transactions and reports the latency of the operation .
23,047
public void createTransactionFailed ( String scope , String streamName ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( CREATE_TRANSACTION_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( CREATE_TRANSACTION_FAILED , 1 , streamTags ( scope , streamName ) ) ; }
This method increments the global and Stream - related counters of failed Transaction create operations .
23,048
public void commitTransaction ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( COMMIT_TRANSACTION ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( COMMIT_TRANSACTION , 1 , streamTags ( scope , streamName ) ) ; commitTransactionLatency . reportSuccessValue ( latency . toMillis ( ) ) ; }
This method increments the global and Stream - related counters of committed Transactions and reports the latency of the operation .
23,049
public void commitTransactionFailed ( String scope , String streamName , String txnId ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( COMMIT_TRANSACTION_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( COMMIT_TRANSACTION_FAILED , 1 , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . incCounterValue ( COMMIT_TRANSACTION_FAILED , 1 , transactionTags ( scope , streamName , txnId ) ) ; }
This method increments the global Stream - related and Transaction - related counters of failed commit operations .
23,050
public void abortTransaction ( String scope , String streamName , Duration latency ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( ABORT_TRANSACTION ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( ABORT_TRANSACTION , 1 , streamTags ( scope , streamName ) ) ; abortTransactionLatency . reportSuccessValue ( latency . toMillis ( ) ) ; }
This method increments the global and Stream - related counters of aborted Transactions and reports the latency of the operation .
23,051
public void abortTransactionFailed ( String scope , String streamName , String txnId ) { DYNAMIC_LOGGER . incCounterValue ( globalMetricName ( ABORT_TRANSACTION_FAILED ) , 1 ) ; DYNAMIC_LOGGER . incCounterValue ( ABORT_TRANSACTION_FAILED , 1 , streamTags ( scope , streamName ) ) ; DYNAMIC_LOGGER . incCounterValue ( ABORT_TRANSACTION_FAILED , 1 , transactionTags ( scope , streamName , txnId ) ) ; }
This method increments the global Stream - related and Transaction - related counters of failed abort operations .
23,052
public static void reportOpenTransactions ( String scope , String streamName , int ongoingTransactions ) { DYNAMIC_LOGGER . reportGaugeValue ( OPEN_TRANSACTIONS , ongoingTransactions , streamTags ( scope , streamName ) ) ; }
This method reports the current number of open Transactions for a Stream .
23,053
public Principal authenticate ( List < String > authHeader ) throws AuthException { if ( isAuthEnabled ( ) ) { String credentials = parseCredentials ( authHeader ) ; return pravegaAuthManager . authenticate ( credentials ) ; } return null ; }
Authenticates the subject represented by the specified HTTP Authorization Header value .
23,054
public int getNumberOfSegments ( ) { return assignedSegments . values ( ) . stream ( ) . mapToInt ( Map :: size ) . sum ( ) + unassignedSegments . size ( ) ; }
Returns the number of segments currently being read from and that are unassigned within the reader group .
23,055
public boolean isEndOfData ( ) { return futureSegments . isEmpty ( ) && unassignedSegments . isEmpty ( ) && assignedSegments . values ( ) . stream ( ) . allMatch ( Map :: isEmpty ) ; }
This functions returns true if the readers part of reader group for sealed streams have completely read the data .
23,056
protected void executionComplete ( Throwable exception ) { Collection < QueueItem > toFail = null ; Throwable failEx = null ; synchronized ( this . stateLock ) { this . activeCount -- ; if ( exception != null && ! this . closed ) { failEx = new ProcessingException ( "A previous item failed to commit. Cannot process new items." , exception ) ; toFail = new ArrayList < > ( this . pendingItems ) ; this . pendingItems . clear ( ) ; this . closed = true ; } if ( this . emptyNotifier != null && this . activeCount == 0 && this . pendingItems . isEmpty ( ) ) { this . emptyNotifier . release ( ) ; this . emptyNotifier = null ; } } if ( toFail != null ) { for ( QueueItem q : toFail ) { q . result . completeExceptionally ( failEx ) ; } return ; } synchronized ( this . processingLock ) { while ( true ) { QueueItem toProcess ; synchronized ( this . stateLock ) { if ( hasCapacity ( ) && ! this . pendingItems . isEmpty ( ) ) { toProcess = this . pendingItems . pollFirst ( ) ; this . activeCount ++ ; } else { break ; } } Futures . completeAfter ( ( ) -> processInternal ( toProcess . data ) , toProcess . result ) ; } } }
Callback that is invoked when an item has completed execution .
23,057
protected < T > CompletableFuture < T > invoke ( String streamSegmentName , Function < SegmentContainer , CompletableFuture < T > > toInvoke , String methodName , Object ... logArgs ) { long traceId = LoggerHelpers . traceEnter ( log , methodName , logArgs ) ; SegmentContainer container ; try { int containerId = this . segmentToContainerMapper . getContainerId ( streamSegmentName ) ; container = this . segmentContainerRegistry . getContainer ( containerId ) ; } catch ( ContainerNotFoundException ex ) { return Futures . failedFuture ( ex ) ; } CompletableFuture < T > resultFuture = toInvoke . apply ( container ) ; if ( log . isTraceEnabled ( ) ) { resultFuture . thenAccept ( r -> LoggerHelpers . traceLeave ( log , methodName , traceId , r ) ) ; } return resultFuture ; }
Executes the given Function on the SegmentContainer that the given Segment maps to .
23,058
CompletableFuture < Void > createSegment ( String segmentName , Collection < AttributeUpdate > attributes , Duration timeout ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , traceObjectId , "createSegment" , segmentName ) ; long segmentId = this . connector . containerMetadata . getStreamSegmentId ( segmentName , true ) ; if ( isValidSegmentId ( segmentId ) ) { return Futures . failedFuture ( new StreamSegmentExistsException ( segmentName ) ) ; } ArrayView segmentInfo = SegmentInfo . serialize ( SegmentInfo . newSegment ( segmentName , attributes ) ) ; CompletableFuture < Void > result = createSegment ( segmentName , segmentInfo , new TimeoutTimer ( timeout ) ) ; if ( log . isTraceEnabled ( ) ) { result . thenAccept ( v -> LoggerHelpers . traceLeave ( log , traceObjectId , "createSegment" , traceId , segmentName ) ) ; } return result ; }
Creates a new Segment with given name .
23,059
CompletableFuture < SegmentProperties > getSegmentInfo ( String segmentName , Duration timeout ) { long streamSegmentId = this . connector . containerMetadata . getStreamSegmentId ( segmentName , true ) ; CompletableFuture < SegmentProperties > result ; if ( isValidSegmentId ( streamSegmentId ) ) { SegmentMetadata sm = this . connector . containerMetadata . getStreamSegmentMetadata ( streamSegmentId ) ; if ( sm . isDeleted ( ) || sm . isMerged ( ) ) { result = Futures . failedFuture ( new StreamSegmentNotExistsException ( segmentName ) ) ; } else { result = CompletableFuture . completedFuture ( sm . getSnapshot ( ) ) ; } } else { QueuedCallback < SegmentProperties > queuedCallback = checkConcurrentAssignment ( segmentName , id -> CompletableFuture . completedFuture ( this . connector . containerMetadata . getStreamSegmentMetadata ( id ) . getSnapshot ( ) ) ) ; if ( queuedCallback != null ) { result = queuedCallback . result ; } else { result = getSegmentInfoInternal ( segmentName , timeout ) . thenApply ( rawData -> SegmentInfo . deserialize ( rawData ) . getProperties ( ) ) ; } } return result ; }
Gets information about a Segment . If the Segment is active it returns this information directly from the in - memory Metadata . If the Segment is not active it fetches the information from Storage and returns it without activating the segment in the Metadata or otherwise touching the DurableLog .
23,060
CompletableFuture < Void > updateSegmentInfo ( SegmentMetadata segmentMetadata , Duration timeout ) { if ( segmentMetadata . isMerged ( ) ) { return Futures . failedFuture ( new StreamSegmentMergedException ( segmentMetadata . getName ( ) ) ) ; } else if ( segmentMetadata . isDeleted ( ) ) { return Futures . failedFuture ( new StreamSegmentNotExistsException ( segmentMetadata . getName ( ) ) ) ; } ArrayView toWrite = SegmentInfo . serialize ( new SegmentInfo ( segmentMetadata . getId ( ) , segmentMetadata . getSnapshot ( ) ) ) ; return updateSegmentInfo ( segmentMetadata . getName ( ) , toWrite , timeout ) ; }
Updates information about a Segment .
23,061
private void assignSegmentId ( String segmentName , Duration timeout ) { TimeoutTimer timer = new TimeoutTimer ( timeout ) ; Futures . exceptionListener ( getSegmentInfoInternal ( segmentName , timer . getRemaining ( ) ) . thenComposeAsync ( si -> submitAssignmentWithRetry ( SegmentInfo . deserialize ( si ) , timer . getRemaining ( ) ) , this . executor ) , ex -> failAssignment ( segmentName , ex ) ) ; }
Attempts to map a Segment to an Id by first trying to retrieve an existing id and should that not exist assign a new one .
23,062
private CompletableFuture < Long > submitAssignmentWithRetry ( SegmentInfo segmentInfo , Duration timeout ) { return retryWithCleanup ( ( ) -> submitAssignment ( segmentInfo , false , timeout ) ) ; }
Same as submitAssignment but retries exactly once in case TooManyActiveSegmentsException was encountered in which case it forces a metadata cleanup before retrying . If the second attempt also fails there will be no more retry and the Exception from the second failure will be the one that this call fails with too .
23,063
private long completeAssignment ( String streamSegmentName , long streamSegmentId ) { assert streamSegmentId != ContainerMetadata . NO_STREAM_SEGMENT_ID : "no valid streamSegmentId given" ; finishPendingRequests ( streamSegmentName , PendingRequest :: complete , streamSegmentId ) ; return streamSegmentId ; }
Completes the assignment for the given StreamSegmentName by completing the waiting CompletableFuture .
23,064
private void failAssignment ( String streamSegmentName , Throwable reason ) { finishPendingRequests ( streamSegmentName , PendingRequest :: completeExceptionally , reason ) ; }
Fails the assignment for the given StreamSegment Id with the given reason .
23,065
private < T > QueuedCallback < T > checkConcurrentAssignment ( String segmentName , Function < Long , CompletableFuture < T > > thenCompose ) { QueuedCallback < T > queuedCallback = null ; synchronized ( this . pendingRequests ) { PendingRequest pendingRequest = this . pendingRequests . getOrDefault ( segmentName , null ) ; if ( pendingRequest != null ) { queuedCallback = new QueuedCallback < > ( thenCompose ) ; pendingRequest . callbacks . add ( queuedCallback ) ; } } return queuedCallback ; }
Attempts to piggyback a task on any existing concurrent assignment if any such assignment exists .
23,066
private < T > CompletableFuture < T > retryWithCleanup ( Supplier < CompletableFuture < T > > toTry ) { CompletableFuture < T > result = new CompletableFuture < > ( ) ; toTry . get ( ) . thenAccept ( result :: complete ) . exceptionally ( ex -> { try { if ( Exceptions . unwrap ( ex ) instanceof TooManyActiveSegmentsException ) { log . debug ( "{}: Forcing metadata cleanup due to capacity exceeded ({})." , this . traceObjectId , Exceptions . unwrap ( ex ) . getMessage ( ) ) ; CompletableFuture < T > f = this . connector . getMetadataCleanup ( ) . get ( ) . thenComposeAsync ( v -> toTry . get ( ) , this . executor ) ; f . thenAccept ( result :: complete ) ; Futures . exceptionListener ( f , result :: completeExceptionally ) ; } else { result . completeExceptionally ( ex ) ; } } catch ( Throwable t ) { result . completeExceptionally ( t ) ; throw t ; } return null ; } ) ; return result ; }
Retries Future from the given supplier exactly once if encountering TooManyActiveSegmentsException .
23,067
public void createScope ( final CreateScopeRequest createScopeRequest , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "createScope" ) ; try { NameUtils . validateUserScopeName ( createScopeRequest . getScopeName ( ) ) ; } catch ( IllegalArgumentException | NullPointerException e ) { log . warn ( "Create scope failed due to invalid scope name {}" , createScopeRequest . getScopeName ( ) ) ; asyncResponse . resume ( Response . status ( Status . BAD_REQUEST ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "createScope" , traceId ) ; return ; } try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofScopes ( ) , READ_UPDATE ) ; } catch ( AuthException e ) { log . warn ( "Create scope for {} failed due to authentication failure {}." , createScopeRequest . getScopeName ( ) , e ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "createScope" , traceId ) ; return ; } controllerService . createScope ( createScopeRequest . getScopeName ( ) ) . thenApply ( scopeStatus -> { if ( scopeStatus . getStatus ( ) == CreateScopeStatus . Status . SUCCESS ) { log . info ( "Successfully created new scope: {}" , createScopeRequest . getScopeName ( ) ) ; return Response . status ( Status . CREATED ) . entity ( new ScopeProperty ( ) . scopeName ( createScopeRequest . getScopeName ( ) ) ) . build ( ) ; } else if ( scopeStatus . getStatus ( ) == CreateScopeStatus . Status . SCOPE_EXISTS ) { log . warn ( "Scope name: {} already exists" , createScopeRequest . getScopeName ( ) ) ; return Response . status ( Status . CONFLICT ) . build ( ) ; } else { log . warn ( "Failed to create scope: {}" , createScopeRequest . getScopeName ( ) ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . exceptionally ( exception -> { log . warn ( "createScope for scope: {} failed, exception: {}" , createScopeRequest . getScopeName ( ) , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "createScope" , traceId ) ) ; }
Implementation of createScope REST API .
23,068
public void deleteScope ( final String scopeName , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "deleteScope" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofScopes ( ) , READ_UPDATE ) ; } catch ( AuthException e ) { log . warn ( "Delete scope for {} failed due to authentication failure." , scopeName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "createStream" , traceId ) ; return ; } controllerService . deleteScope ( scopeName ) . thenApply ( scopeStatus -> { if ( scopeStatus . getStatus ( ) == DeleteScopeStatus . Status . SUCCESS ) { log . info ( "Successfully deleted scope: {}" , scopeName ) ; return Response . status ( Status . NO_CONTENT ) . build ( ) ; } else if ( scopeStatus . getStatus ( ) == DeleteScopeStatus . Status . SCOPE_NOT_FOUND ) { log . warn ( "Scope: {} not found" , scopeName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else if ( scopeStatus . getStatus ( ) == DeleteScopeStatus . Status . SCOPE_NOT_EMPTY ) { log . warn ( "Cannot delete scope: {} with non-empty streams" , scopeName ) ; return Response . status ( Status . PRECONDITION_FAILED ) . build ( ) ; } else { log . warn ( "deleteScope for {} failed" , scopeName ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . exceptionally ( exception -> { log . warn ( "deleteScope for {} failed with exception: {}" , scopeName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "deleteScope" , traceId ) ) ; }
Implementation of deleteScope REST API .
23,069
public void deleteStream ( final String scopeName , final String streamName , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "deleteStream" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofStreamInScope ( scopeName , streamName ) , READ_UPDATE ) ; } catch ( AuthException e ) { log . warn ( "Delete stream for {} failed due to authentication failure." , streamName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "deleteStream" , traceId ) ; return ; } controllerService . deleteStream ( scopeName , streamName ) . thenApply ( deleteStreamStatus -> { if ( deleteStreamStatus . getStatus ( ) == DeleteStreamStatus . Status . SUCCESS ) { log . info ( "Successfully deleted stream: {}" , streamName ) ; return Response . status ( Status . NO_CONTENT ) . build ( ) ; } else if ( deleteStreamStatus . getStatus ( ) == DeleteStreamStatus . Status . STREAM_NOT_FOUND ) { log . warn ( "Scope: {}, Stream {} not found" , scopeName , streamName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else if ( deleteStreamStatus . getStatus ( ) == DeleteStreamStatus . Status . STREAM_NOT_SEALED ) { log . warn ( "Cannot delete unsealed stream: {}" , streamName ) ; return Response . status ( Status . PRECONDITION_FAILED ) . build ( ) ; } else { log . warn ( "deleteStream for {} failed" , streamName ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . exceptionally ( exception -> { log . warn ( "deleteStream for {} failed with exception: {}" , streamName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "deleteStream" , traceId ) ) ; }
Implementation of deleteStream REST API .
23,070
public void getScope ( final String scopeName , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "getScope" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofScope ( scopeName ) , READ ) ; } catch ( AuthException e ) { log . warn ( "Get scope for {} failed due to authentication failure." , scopeName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "getScope" , traceId ) ; return ; } controllerService . getScope ( scopeName ) . thenApply ( scope -> { return Response . status ( Status . OK ) . entity ( new ScopeProperty ( ) . scopeName ( scope ) ) . build ( ) ; } ) . exceptionally ( exception -> { if ( exception . getCause ( ) instanceof StoreException . DataNotFoundException ) { log . warn ( "Scope: {} not found" , scopeName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else { log . warn ( "getScope for {} failed with exception: {}" , scopeName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "getScope" , traceId ) ) ; }
Implementation of getScope REST API .
23,071
public void listScopes ( final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "listScopes" ) ; final Principal principal ; final List < String > authHeader = getAuthorizationHeader ( ) ; try { principal = restAuthHelper . authenticate ( authHeader ) ; restAuthHelper . authorize ( authHeader , AuthResourceRepresentation . ofScopes ( ) , principal , READ ) ; } catch ( AuthException e ) { log . warn ( "Get scopes failed due to authentication failure." , e ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "listScopes" , traceId ) ; return ; } controllerService . listScopes ( ) . thenApply ( scopesList -> { ScopesList scopes = new ScopesList ( ) ; scopesList . forEach ( scope -> { try { if ( restAuthHelper . isAuthorized ( authHeader , AuthResourceRepresentation . ofScope ( scope ) , principal , READ ) ) { scopes . addScopesItem ( new ScopeProperty ( ) . scopeName ( scope ) ) ; } } catch ( AuthException e ) { log . warn ( e . getMessage ( ) , e ) ; } } ) ; return Response . status ( Status . OK ) . entity ( scopes ) . build ( ) ; } ) . exceptionally ( exception -> { log . warn ( "listScopes failed with exception: " , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( response -> { asyncResponse . resume ( response ) ; LoggerHelpers . traceLeave ( log , "listScopes" , traceId ) ; return response ; } ) ; }
Implementation of listScopes REST API .
23,072
public void listStreams ( final String scopeName , final String showInternalStreams , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "listStreams" ) ; final Principal principal ; final List < String > authHeader = getAuthorizationHeader ( ) ; try { principal = restAuthHelper . authenticate ( authHeader ) ; restAuthHelper . authorize ( authHeader , AuthResourceRepresentation . ofStreamsInScope ( scopeName ) , principal , READ ) ; } catch ( AuthException e ) { log . warn ( "List streams for {} failed due to authentication failure." , scopeName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "listStreams" , traceId ) ; return ; } boolean showOnlyInternalStreams = showInternalStreams != null && showInternalStreams . equals ( "true" ) ; controllerService . listStreamsInScope ( scopeName ) . thenApply ( streamsList -> { StreamsList streams = new StreamsList ( ) ; streamsList . forEach ( ( stream , config ) -> { try { if ( restAuthHelper . isAuthorized ( authHeader , AuthResourceRepresentation . ofStreamInScope ( scopeName , stream ) , principal , READ ) ) { if ( ! showOnlyInternalStreams ^ stream . startsWith ( INTERNAL_NAME_PREFIX ) ) { streams . addStreamsItem ( ModelHelper . encodeStreamResponse ( scopeName , stream , config ) ) ; } } } catch ( AuthException e ) { log . warn ( e . getMessage ( ) , e ) ; } } ) ; log . info ( "Successfully fetched streams for scope: {}" , scopeName ) ; return Response . status ( Status . OK ) . entity ( streams ) . build ( ) ; } ) . exceptionally ( exception -> { if ( exception . getCause ( ) instanceof StoreException . DataNotFoundException || exception instanceof StoreException . DataNotFoundException ) { log . warn ( "Scope name: {} not found" , scopeName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else { log . warn ( "listStreams for {} failed with exception: {}" , scopeName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "listStreams" , traceId ) ) ; }
Implementation of listStreams REST API .
23,073
public void updateStream ( final String scopeName , final String streamName , final UpdateStreamRequest updateStreamRequest , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "updateStream" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofStreamInScope ( scopeName , streamName ) , READ_UPDATE ) ; } catch ( AuthException e ) { log . warn ( "Update stream for {} failed due to authentication failure." , scopeName + "/" + streamName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "Update stream" , traceId ) ; return ; } StreamConfiguration streamConfiguration = ModelHelper . getUpdateStreamConfig ( updateStreamRequest ) ; controllerService . updateStream ( scopeName , streamName , streamConfiguration ) . thenApply ( streamStatus -> { if ( streamStatus . getStatus ( ) == UpdateStreamStatus . Status . SUCCESS ) { log . info ( "Successfully updated stream config for: {}/{}" , scopeName , streamName ) ; return Response . status ( Status . OK ) . entity ( ModelHelper . encodeStreamResponse ( scopeName , streamName , streamConfiguration ) ) . build ( ) ; } else if ( streamStatus . getStatus ( ) == UpdateStreamStatus . Status . STREAM_NOT_FOUND || streamStatus . getStatus ( ) == UpdateStreamStatus . Status . SCOPE_NOT_FOUND ) { log . warn ( "Stream: {}/{} not found" , scopeName , streamName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else { log . warn ( "updateStream failed for {}/{}" , scopeName , streamName ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . exceptionally ( exception -> { log . warn ( "updateStream for {}/{} failed with exception: {}" , scopeName , streamName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "updateStream" , traceId ) ) ; }
Implementation of updateStream REST API .
23,074
public void updateStreamState ( final String scopeName , final String streamName , final StreamState updateStreamStateRequest , SecurityContext securityContext , AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "updateStreamState" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofStreamInScope ( scopeName , streamName ) , READ_UPDATE ) ; } catch ( AuthException e ) { log . warn ( "Update stream for {} failed due to authentication failure." , scopeName + "/" + streamName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "Update stream" , traceId ) ; return ; } if ( updateStreamStateRequest . getStreamState ( ) != StreamState . StreamStateEnum . SEALED ) { log . warn ( "Received invalid stream state: {} from client for stream {}/{}" , updateStreamStateRequest . getStreamState ( ) , scopeName , streamName ) ; asyncResponse . resume ( Response . status ( Status . BAD_REQUEST ) . build ( ) ) ; return ; } controllerService . sealStream ( scopeName , streamName ) . thenApply ( updateStreamStatus -> { if ( updateStreamStatus . getStatus ( ) == UpdateStreamStatus . Status . SUCCESS ) { log . info ( "Successfully sealed stream: {}" , streamName ) ; return Response . status ( Status . OK ) . entity ( updateStreamStateRequest ) . build ( ) ; } else if ( updateStreamStatus . getStatus ( ) == UpdateStreamStatus . Status . SCOPE_NOT_FOUND || updateStreamStatus . getStatus ( ) == UpdateStreamStatus . Status . STREAM_NOT_FOUND ) { log . warn ( "Scope: {} or Stream {} not found" , scopeName , streamName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else { log . warn ( "updateStreamState for {} failed" , streamName ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . exceptionally ( exception -> { log . warn ( "updateStreamState for {} failed with exception: {}" , streamName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "updateStreamState" , traceId ) ) ; }
Implementation of updateStreamState REST API .
23,075
public void getScalingEvents ( final String scopeName , final String streamName , final Long from , final Long to , final SecurityContext securityContext , final AsyncResponse asyncResponse ) { long traceId = LoggerHelpers . traceEnter ( log , "getScalingEvents" ) ; try { restAuthHelper . authenticateAuthorize ( getAuthorizationHeader ( ) , AuthResourceRepresentation . ofStreamInScope ( scopeName , streamName ) , READ ) ; } catch ( AuthException e ) { log . warn ( "Get scaling events for {} failed due to authentication failure." , scopeName + "/" + streamName ) ; asyncResponse . resume ( Response . status ( Status . fromStatusCode ( e . getResponseCode ( ) ) ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "Get scaling events" , traceId ) ; return ; } if ( from < 0 || to < 0 || from > to ) { log . warn ( "Received invalid request from client for scopeName/streamName: {}/{} " , scopeName , streamName ) ; asyncResponse . resume ( Response . status ( Status . BAD_REQUEST ) . build ( ) ) ; LoggerHelpers . traceLeave ( log , "getScalingEvents" , traceId ) ; return ; } controllerService . getScaleRecords ( scopeName , streamName , from , to ) . thenApply ( listScaleMetadata -> { Iterator < ScaleMetadata > metadataIterator = listScaleMetadata . iterator ( ) ; List < ScaleMetadata > finalScaleMetadataList = new ArrayList < ScaleMetadata > ( ) ; ScaleMetadata referenceEvent = null ; while ( metadataIterator . hasNext ( ) ) { ScaleMetadata scaleMetadata = metadataIterator . next ( ) ; if ( scaleMetadata . getTimestamp ( ) >= from && scaleMetadata . getTimestamp ( ) <= to ) { finalScaleMetadataList . add ( scaleMetadata ) ; } else if ( ( scaleMetadata . getTimestamp ( ) < from ) && ! ( referenceEvent != null && referenceEvent . getTimestamp ( ) > scaleMetadata . getTimestamp ( ) ) ) { referenceEvent = scaleMetadata ; } } if ( referenceEvent != null ) { finalScaleMetadataList . add ( 0 , referenceEvent ) ; } log . info ( "Successfully fetched required scaling events for scope: {}, stream: {}" , scopeName , streamName ) ; return Response . status ( Status . OK ) . entity ( finalScaleMetadataList ) . build ( ) ; } ) . exceptionally ( exception -> { if ( exception . getCause ( ) instanceof StoreException . DataNotFoundException || exception instanceof StoreException . DataNotFoundException ) { log . warn ( "Stream/Scope name: {}/{} not found" , scopeName , streamName ) ; return Response . status ( Status . NOT_FOUND ) . build ( ) ; } else { log . warn ( "getScalingEvents for scopeName/streamName: {}/{} failed with exception " , scopeName , streamName , exception ) ; return Response . status ( Status . INTERNAL_SERVER_ERROR ) . build ( ) ; } } ) . thenApply ( asyncResponse :: resume ) . thenAccept ( x -> LoggerHelpers . traceLeave ( log , "getScalingEvents" , traceId ) ) ; }
Implementation of getScalingEvents REST API .
23,076
private void initialize ( ByteArraySegment footer , long footerOffset , long indexLength ) { if ( footer . getLength ( ) != FOOTER_LENGTH ) { throw new IllegalDataFormatException ( String . format ( "Wrong footer length. Expected %s, actual %s." , FOOTER_LENGTH , footer . getLength ( ) ) ) ; } long rootPageOffset = getRootPageOffset ( footer ) ; int rootPageLength = getRootPageLength ( footer ) ; if ( rootPageOffset + rootPageLength > footerOffset ) { throw new IllegalDataFormatException ( String . format ( "Wrong footer information. RootPage Offset (%s) + Length (%s) exceeds Footer Offset (%s)." , rootPageOffset , rootPageLength , footerOffset ) ) ; } setState ( indexLength , rootPageOffset , rootPageLength ) ; }
Initializes the BTreeIndex using information from the given footer .
23,077
private CompletableFuture < UpdateablePageCollection > applyUpdates ( Iterator < PageEntry > updates , TimeoutTimer timer ) { UpdateablePageCollection pageCollection = new UpdateablePageCollection ( this . state . get ( ) . length ) ; AtomicReference < PageWrapper > lastPage = new AtomicReference < > ( null ) ; val lastPageUpdates = new ArrayList < PageEntry > ( ) ; return Futures . loop ( updates :: hasNext , ( ) -> { PageEntry next = updates . next ( ) ; return locatePage ( next . getKey ( ) , pageCollection , timer ) . thenAccept ( page -> { PageWrapper last = lastPage . get ( ) ; if ( page != last ) { if ( last != null ) { last . getPage ( ) . update ( lastPageUpdates ) ; } lastPage . set ( page ) ; lastPageUpdates . clear ( ) ; } lastPageUpdates . add ( next ) ; } ) ; } , this . executor ) . thenApplyAsync ( v -> { if ( lastPage . get ( ) != null ) { lastPage . get ( ) . getPage ( ) . update ( lastPageUpdates ) ; } return pageCollection ; } , this . executor ) ; }
Executes the given updates on the index . Loads up any necessary BTreePage instances in memory but does not persist the changes to the external data source nor does it reassign offsets to the modified pages perform splits etc .
23,078
private CompletableFuture < ? > loadSmallestOffsetPage ( PageCollection pageCollection , TimeoutTimer timer ) { if ( pageCollection . getCount ( ) <= 1 ) { return CompletableFuture . completedFuture ( null ) ; } long minOffset = calculateMinOffset ( pageCollection . getRootPage ( ) ) ; return locatePage ( page -> getPagePointer ( minOffset , page ) , page -> ! page . isIndexPage ( ) || page . getOffset ( ) == minOffset , pageCollection , timer ) ; }
Loads the BTreePage with the smallest offset from the DataSource . The purpose of this is for incremental compaction . The page with the smallest offset will be moved to the end of the index which allows the external data source to perform any truncation necessary in order to free up space .
23,079
private void processSplitPage ( List < BTreePage > splitResult , PageModificationContext context ) { PageWrapper originalPage = context . getPageWrapper ( ) ; for ( int i = 0 ; i < splitResult . size ( ) ; i ++ ) { val page = splitResult . get ( i ) ; ByteArraySegment newPageKey ; long newOffset ; long minOffset ; PageWrapper processedPage ; if ( i == 0 ) { originalPage . setPage ( page ) ; newPageKey = originalPage . getPageKey ( ) ; context . getPageCollection ( ) . complete ( originalPage ) ; processedPage = originalPage ; } else { newPageKey = page . getKeyAt ( 0 ) ; processedPage = PageWrapper . wrapNew ( page , originalPage . getParent ( ) , new PagePointer ( newPageKey , PagePointer . NO_OFFSET , page . getLength ( ) ) ) ; context . getPageCollection ( ) . insert ( processedPage ) ; context . getPageCollection ( ) . complete ( processedPage ) ; } newOffset = processedPage . getOffset ( ) ; minOffset = calculateMinOffset ( processedPage ) ; processedPage . setMinOffset ( minOffset ) ; context . updatePagePointer ( new PagePointer ( newPageKey , newOffset , page . getLength ( ) , minOffset ) ) ; } }
Processes a Page Split result . The first split page will replace the existing page while the remaining pages will need to be inserted as children into the parent .
23,080
private long calculateMinOffset ( PageWrapper pageWrapper ) { long min = pageWrapper . getOffset ( ) ; if ( ! pageWrapper . isIndexPage ( ) ) { return min ; } BTreePage page = pageWrapper . getPage ( ) ; int count = page . getCount ( ) ; for ( int pos = 0 ; pos < count ; pos ++ ) { long ppMinOffset = deserializePointerMinOffset ( page . getValueAt ( pos ) ) ; min = Math . min ( min , ppMinOffset ) ; } return min ; }
Calculates the Minimum Page Offset for this PageWrapper . The Minimum Page Offset is the smallest of this PageWrapper s offset and the MinOffsets of all this PageWrapper s direct children .
23,081
private CompletableFuture < PageWrapper > locatePage ( ByteArraySegment key , PageCollection pageCollection , TimeoutTimer timer ) { Preconditions . checkArgument ( key . getLength ( ) == this . leafPageConfig . getKeyLength ( ) , "Invalid key length." ) ; Preconditions . checkArgument ( pageCollection . getIndexLength ( ) <= this . state . get ( ) . length , "Unexpected PageCollection.IndexLength." ) ; if ( this . state . get ( ) . rootPageOffset == PagePointer . NO_OFFSET && pageCollection . getCount ( ) == 0 ) { return CompletableFuture . completedFuture ( pageCollection . insert ( PageWrapper . wrapNew ( createEmptyLeafPage ( ) , null , null ) ) ) ; } return locatePage ( page -> getPagePointer ( key , page ) , page -> ! page . isIndexPage ( ) , pageCollection , timer ) ; }
Locates the Leaf Page that contains or should contain the given Key .
23,082
private CompletableFuture < PageWrapper > locatePage ( Function < BTreePage , PagePointer > getChildPointer , Predicate < PageWrapper > found , PageCollection pageCollection , TimeoutTimer timer ) { AtomicReference < PagePointer > pagePointer = new AtomicReference < > ( new PagePointer ( null , this . state . get ( ) . rootPageOffset , this . state . get ( ) . rootPageLength ) ) ; CompletableFuture < PageWrapper > result = new CompletableFuture < > ( ) ; AtomicReference < PageWrapper > parentPage = new AtomicReference < > ( null ) ; Futures . loop ( ( ) -> ! result . isDone ( ) , ( ) -> fetchPage ( pagePointer . get ( ) , parentPage . get ( ) , pageCollection , timer . getRemaining ( ) ) . thenAccept ( page -> { if ( found . test ( page ) ) { result . complete ( page ) ; } else { PagePointer childPointer = getChildPointer . apply ( page . getPage ( ) ) ; pagePointer . set ( childPointer ) ; parentPage . set ( page ) ; } } ) , this . executor ) . exceptionally ( ex -> { result . completeExceptionally ( ex ) ; return null ; } ) ; return result ; }
Locates the BTreePage according to the given criteria .
23,083
private CompletableFuture < PageWrapper > fetchPage ( PagePointer pagePointer , PageWrapper parentPage , PageCollection pageCollection , Duration timeout ) { PageWrapper fromCache = pageCollection . get ( pagePointer . getOffset ( ) ) ; if ( fromCache != null ) { return CompletableFuture . completedFuture ( fromCache ) ; } return readPage ( pagePointer . getOffset ( ) , pagePointer . getLength ( ) , timeout ) . thenApply ( data -> { if ( data . getLength ( ) != pagePointer . getLength ( ) ) { throw new IllegalDataFormatException ( String . format ( "Requested page of length %s from offset %s, got a page of length %s." , pagePointer . getLength ( ) , pagePointer . getOffset ( ) , data . getLength ( ) ) ) ; } val pageConfig = BTreePage . isIndexPage ( data ) ? this . indexPageConfig : this . leafPageConfig ; return pageCollection . insert ( PageWrapper . wrapExisting ( new BTreePage ( pageConfig , data ) , parentPage , pagePointer ) ) ; } ) ; }
Loads up a single Page .
23,084
private CompletableFuture < ByteArraySegment > readPage ( long offset , int length , Duration timeout ) { return this . read . apply ( offset , length , timeout ) ; }
Reads the contents of a single page from the external data source .
23,085
private CompletableFuture < Long > writePages ( UpdateablePageCollection pageCollection , Duration timeout ) { IndexState state = this . state . get ( ) ; Preconditions . checkState ( state != null , "Cannot write without fetching the state first." ) ; val pages = new ArrayList < Map . Entry < Long , ByteArraySegment > > ( ) ; val oldOffsets = new ArrayList < Long > ( ) ; long offset = state . length ; PageWrapper lastPage = null ; for ( PageWrapper p : pageCollection . getPagesSortedByOffset ( ) ) { if ( offset >= 0 ) { Preconditions . checkArgument ( p . getOffset ( ) == offset , "Expecting Page offset %s, found %s." , offset , p . getOffset ( ) ) ; } pages . add ( new AbstractMap . SimpleImmutableEntry < > ( offset , p . getPage ( ) . getContents ( ) ) ) ; if ( p . getPointer ( ) != null && p . getPointer ( ) . getOffset ( ) >= 0 ) { oldOffsets . add ( p . getPointer ( ) . getOffset ( ) ) ; } offset = p . getOffset ( ) + p . getPage ( ) . getLength ( ) ; lastPage = p ; } Preconditions . checkArgument ( lastPage != null && lastPage . getParent ( ) == null , "Last page to be written is not the root page" ) ; Preconditions . checkArgument ( pageCollection . getIndexLength ( ) == offset , "IndexLength mismatch." ) ; pages . add ( new AbstractMap . SimpleImmutableEntry < > ( offset , getFooter ( lastPage . getOffset ( ) , lastPage . getPage ( ) . getLength ( ) ) ) ) ; long oldFooterOffset = getFooterOffset ( state . length ) ; if ( oldFooterOffset >= 0 ) { oldOffsets . add ( oldFooterOffset ) ; } pageCollection . collectRemovedPageOffsets ( oldOffsets ) ; long rootOffset = lastPage . getOffset ( ) ; int rootLength = lastPage . getPage ( ) . getContents ( ) . getLength ( ) ; long rootMinOffset = lastPage . getMinOffset ( ) ; assert rootMinOffset >= 0 : "root.MinOffset not set" ; return this . write . apply ( pages , oldOffsets , rootMinOffset , timeout ) . thenApply ( indexLength -> setState ( indexLength , rootOffset , rootLength ) . length ) ; }
Writes the contents of all the BTreePages in the given PageCollection to the external data source .
23,086
private void formatHeaderAndFooter ( int itemCount , int id ) { this . header . set ( VERSION_OFFSET , CURRENT_VERSION ) ; this . header . set ( FLAGS_OFFSET , getFlags ( this . config . isIndexPage ? FLAG_INDEX_PAGE : FLAG_NONE ) ) ; setHeaderId ( id ) ; setCount ( itemCount ) ; setFooterId ( id ) ; }
Formats the Header and Footer of this BTreePage with the given information .
23,087
ByteArraySegment getValueAt ( int pos ) { Preconditions . checkElementIndex ( pos , getCount ( ) , "pos must be non-negative and smaller than the number of items." ) ; return this . data . subSegment ( pos * this . config . entryLength + this . config . keyLength , this . config . valueLength ) ; }
Gets a ByteArraySegment representing the value at the given Position .
23,088
ByteArraySegment getKeyAt ( int pos ) { Preconditions . checkElementIndex ( pos , getCount ( ) , "pos must be non-negative and smaller than the number of items." ) ; return this . data . subSegment ( pos * this . config . entryLength , this . config . keyLength ) ; }
Gets the Key at the given Position .
23,089
void setFirstKey ( ByteArraySegment newKey ) { Preconditions . checkState ( getCount ( ) > 0 , "BTreePage is empty. Cannot set first key." ) ; Preconditions . checkArgument ( newKey . getLength ( ) == this . config . getKeyLength ( ) , "Incorrect key length." ) ; Preconditions . checkArgument ( KEY_COMPARATOR . compare ( newKey , getKeyAt ( 0 ) ) <= 0 , "Replacement first Key must be smaller than or equal to the existing first key." ) ; this . data . copyFrom ( newKey , 0 , newKey . getLength ( ) ) ; }
Updates the first PageEntry s key to the given value .
23,090
List < PageEntry > getEntries ( int firstIndex , int lastIndex ) { Preconditions . checkArgument ( firstIndex <= lastIndex , "firstIndex must be smaller than or equal to lastIndex." ) ; ArrayList < PageEntry > result = new ArrayList < > ( ) ; for ( int i = firstIndex ; i <= lastIndex ; i ++ ) { result . add ( getEntryAt ( i ) ) ; } return result ; }
Gets all the Page Entries between the two indices .
23,091
private BTreePage applyInsertsAndRemovals ( ChangeInfo ci ) { int newCount = getCount ( ) + ci . insertCount - ci . deleteCount ; val newPage = new BTreePage ( this . config , new ByteArraySegment ( new byte [ DATA_OFFSET + newCount * this . config . entryLength + FOOTER_LENGTH ] ) , false ) ; newPage . formatHeaderAndFooter ( newCount , getHeaderId ( ) ) ; int readIndex = 0 ; int writeIndex = 0 ; for ( val e : ci . changes ) { int entryIndex = e . getKey ( ) * this . config . entryLength ; if ( entryIndex > readIndex ) { int length = entryIndex - readIndex ; assert length % this . config . entryLength == 0 ; newPage . data . copyFrom ( this . data , readIndex , writeIndex , length ) ; writeIndex += length ; } PageEntry entryContents = e . getValue ( ) ; readIndex = entryIndex ; if ( entryContents != null ) { newPage . setEntryAtIndex ( writeIndex , entryContents ) ; writeIndex += this . config . entryLength ; } else { readIndex += this . config . getEntryLength ( ) ; } } if ( readIndex < this . data . getLength ( ) ) { int length = this . data . getLength ( ) - readIndex ; newPage . data . copyFrom ( this . data , readIndex , writeIndex , length ) ; } return newPage ; }
Inserts the new PageEntry instances at the given offsets .
23,092
private void setValueAtPosition ( int pos , ByteArraySegment value ) { Preconditions . checkElementIndex ( pos , getCount ( ) , "pos must be non-negative and smaller than the number of items." ) ; Preconditions . checkArgument ( value . getLength ( ) == this . config . valueLength , "Given value has incorrect length." ) ; this . data . copyFrom ( value , pos * this . config . entryLength + this . config . keyLength , value . getLength ( ) ) ; }
Sets the Value at the given position .
23,093
public CompletableFuture < Void > add ( Operation operation , Duration timeout ) { ensureRunning ( ) ; return this . operationProcessor . process ( operation ) ; }
region OperationLog Implementation
23,094
public void setStreamSegmentId ( long value ) { Preconditions . checkState ( this . streamSegmentId == ContainerMetadata . NO_STREAM_SEGMENT_ID , "StreamSegmentId has already been assigned for this operation." ) ; Preconditions . checkArgument ( value != ContainerMetadata . NO_STREAM_SEGMENT_ID , "Invalid StreamSegmentId" ) ; this . streamSegmentId = value ; }
Sets the StreamSegmentId for this operation .
23,095
public boolean authenticateAndAuthorize ( String resource , String credentials , AuthHandler . Permissions level ) throws AuthenticationException { Preconditions . checkNotNull ( credentials , "credentials" ) ; boolean retVal = false ; try { String [ ] parts = extractMethodAndToken ( credentials ) ; String method = parts [ 0 ] ; String token = parts [ 1 ] ; AuthHandler handler = getHandler ( method ) ; Preconditions . checkNotNull ( handler , "Can not find handler." ) ; Principal principal ; if ( ( principal = handler . authenticate ( token ) ) == null ) { throw new AuthenticationException ( "Authentication failure" ) ; } retVal = handler . authorize ( resource , principal ) . ordinal ( ) >= level . ordinal ( ) ; } catch ( AuthException e ) { throw new AuthenticationException ( "Authentication failure" ) ; } return retVal ; }
API to authenticate and authorize access to a given resource .
23,096
public void registerInterceptors ( ServerBuilder < ? > builder ) { try { if ( serverConfig . isAuthorizationEnabled ( ) ) { ServiceLoader < AuthHandler > loader = ServiceLoader . load ( AuthHandler . class ) ; for ( AuthHandler handler : loader ) { try { handler . initialize ( serverConfig ) ; synchronized ( this ) { if ( handlerMap . putIfAbsent ( handler . getHandlerName ( ) , handler ) != null ) { log . warn ( "Handler with name {} already exists. Not replacing it with the latest handler" ) ; continue ; } } builder . intercept ( new PravegaInterceptor ( handler ) ) ; } catch ( Exception e ) { log . warn ( "Exception while initializing auth handler {}" , handler , e ) ; } } } } catch ( Throwable e ) { log . warn ( "Exception while loading the auth handlers" , e ) ; } }
Loads the custom implementations of the AuthHandler interface dynamically . Registers the interceptors with grpc . Stores the implementation in a local map for routing the REST auth request .
23,097
public double addNewSample ( double newSample ) { final double sample = calculateLog ( newSample ) ; return Double . longBitsToDouble ( valueEncodedAsLong . updateAndGet ( value -> { return Double . doubleToRawLongBits ( sample * newSampleWeight + ( 1.0 - newSampleWeight ) * Double . longBitsToDouble ( value ) ) ; } ) ) ; }
Adds a new sample to the moving average and returns the updated value .
23,098
synchronized QueueStats getStatistics ( ) { int size = this . writes . size ( ) ; double fillRatio = calculateFillRatio ( this . totalLength , size ) ; int processingTime = this . lastDurationMillis ; if ( processingTime == 0 && size > 0 ) { processingTime = ( int ) ( ( this . timeSupplier . get ( ) - this . writes . peekFirst ( ) . getQueueAddedTimestamp ( ) ) / AbstractTimer . NANOS_TO_MILLIS ) ; } return new QueueStats ( size , fillRatio , processingTime ) ; }
Gets a snapshot of the queue internals .
23,099
synchronized void add ( Write write ) { Exceptions . checkNotClosed ( this . closed , this ) ; this . writes . addLast ( write ) ; this . totalLength += write . data . getLength ( ) ; write . setQueueAddedTimestamp ( this . timeSupplier . get ( ) ) ; }
Adds a new Write to the end of the queue .