idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
23,200
public static String getSegmentChunkName ( String segmentName , long offset ) { Preconditions . checkArgument ( ! segmentName . contains ( OFFSET_SUFFIX ) , "segmentName is already a SegmentChunk name" ) ; return segmentName + OFFSET_SUFFIX + Long . toString ( offset ) ; }
Gets the name of the SegmentChunk for the given Segment and Offset .
23,201
public static String getMetadataSegmentName ( int containerId ) { Preconditions . checkArgument ( containerId >= 0 , "containerId must be a non-negative number." ) ; return String . format ( METADATA_SEGMENT_NAME_FORMAT , containerId ) ; }
Gets the name of the Segment that is used to store the Container s Segment Metadata . There is one such Segment per container .
23,202
public static long computeSegmentId ( int segmentNumber , int epoch ) { Preconditions . checkArgument ( segmentNumber >= 0 ) ; Preconditions . checkArgument ( epoch >= 0 ) ; return ( long ) epoch << 32 | ( segmentNumber & 0xFFFFFFFFL ) ; }
Method to compute 64 bit segment id which takes segment number and epoch and composes it as msb = epoch lsb = segmentNumber . Primary id identifies the segment container mapping and primary + secondary uniquely identifies a segment within a stream .
23,203
public static String getScopedStreamName ( String scope , String streamName ) { return getScopedStreamNameInternal ( scope , streamName ) . toString ( ) ; }
Compose and return scoped stream name .
23,204
public static String getQualifiedStreamSegmentName ( String scope , String streamName , long segmentId ) { int segmentNumber = getSegmentNumber ( segmentId ) ; int epoch = getEpoch ( segmentId ) ; StringBuffer sb = getScopedStreamNameInternal ( scope , streamName ) ; sb . append ( '/' ) ; sb . append ( segmentNumber ) ; sb . append ( EPOCH_DELIMITER ) ; sb . append ( epoch ) ; return sb . toString ( ) ; }
Method to generate Fully Qualified StreamSegmentName using scope stream and segment id .
23,205
public static List < String > extractSegmentTokens ( String qualifiedName ) { Preconditions . checkNotNull ( qualifiedName ) ; String originalSegmentName = isTransactionSegment ( qualifiedName ) ? getParentStreamSegmentName ( qualifiedName ) : qualifiedName ; List < String > retVal = new LinkedList < > ( ) ; String [ ] tokens = originalSegmentName . split ( "[/]" ) ; int segmentIdIndex = tokens . length == 2 ? 1 : 2 ; long segmentId ; if ( tokens [ segmentIdIndex ] . contains ( EPOCH_DELIMITER ) ) { String [ ] segmentIdTokens = tokens [ segmentIdIndex ] . split ( EPOCH_DELIMITER ) ; segmentId = computeSegmentId ( Integer . parseInt ( segmentIdTokens [ 0 ] ) , Integer . parseInt ( segmentIdTokens [ 1 ] ) ) ; } else { segmentId = computeSegmentId ( Integer . parseInt ( tokens [ segmentIdIndex ] ) , 0 ) ; } retVal . add ( tokens [ 0 ] ) ; if ( tokens . length == 3 ) { retVal . add ( tokens [ 1 ] ) ; } retVal . add ( Long . toString ( segmentId ) ) ; return retVal ; }
Method to extract different parts of stream segment name . The tokens extracted are in following order scope stream name and segment id . If its a transational segment the transaction id is ignored . This function works even when scope is not set .
23,206
public static List < String > extractTableSegmentTokens ( String qualifiedName ) { Preconditions . checkNotNull ( qualifiedName ) ; List < String > retVal = new LinkedList < > ( ) ; String [ ] tokens = qualifiedName . split ( "[/]" ) ; Preconditions . checkArgument ( tokens . length > 2 ) ; Preconditions . checkArgument ( tokens [ 1 ] . equals ( TABLES ) ) ; retVal . add ( tokens [ 0 ] ) ; for ( int i = 2 ; i < tokens . length ; i ++ ) { retVal . add ( tokens [ i ] ) ; } return retVal ; }
Method to extract tokens that were used to compose fully qualified table segment name using method getQualifiedTableName .
23,207
public static boolean isTableSegment ( String qualifiedName ) { Preconditions . checkNotNull ( qualifiedName ) ; String [ ] tokens = qualifiedName . split ( "[/]" ) ; Preconditions . checkArgument ( tokens . length > 2 ) ; return tokens [ 1 ] . equals ( TABLES ) ; }
Method to check if given segment name is a table name generated using getQualifiedTableName .
23,208
public boolean add ( T item ) { ListNode < T > node = new ListNode < > ( item ) ; synchronized ( this . lock ) { if ( this . tail == null ) { this . head = node ; } else { if ( item . getSequenceNumber ( ) <= this . tail . item . getSequenceNumber ( ) ) { return false ; } this . tail . next = node ; } this . tail = node ; } return true ; }
Adds a new item at the end of the list but only if the given item has a Sequence Number higher than the last element in the list .
23,209
public int truncate ( long upToSequenceNumber ) { int count = 0 ; synchronized ( this . lock ) { while ( this . head != null && this . head . item . getSequenceNumber ( ) <= upToSequenceNumber ) { this . head = trim ( this . head ) ; count ++ ; } if ( this . head == null ) { this . tail = null ; } } return count ; }
Truncates items from the beginning of the list up to and including the element with the given Sequence Number .
23,210
public Iterator < T > read ( long afterSequenceNumber , int count ) { ListNode < T > firstNode ; synchronized ( this . lock ) { firstNode = this . head ; } while ( firstNode != null && firstNode . item . getSequenceNumber ( ) <= afterSequenceNumber ) { synchronized ( this . lock ) { firstNode = firstNode . next ; } } return new NodeIterator < > ( firstNode , count , this . lock ) ; }
Reads a number of items starting with the first one that has a Sequence Number higher than the given one .
23,211
private void handleReadResult ( ReadSegment request , ReadResult result ) { String segment = request . getSegment ( ) ; ArrayList < ReadResultEntryContents > cachedEntries = new ArrayList < > ( ) ; ReadResultEntry nonCachedEntry = collectCachedEntries ( request . getOffset ( ) , result , cachedEntries ) ; final String operation = "readSegment" ; boolean truncated = nonCachedEntry != null && nonCachedEntry . getType ( ) == Truncated ; boolean endOfSegment = nonCachedEntry != null && nonCachedEntry . getType ( ) == EndOfStreamSegment ; boolean atTail = nonCachedEntry != null && nonCachedEntry . getType ( ) == Future ; if ( ! cachedEntries . isEmpty ( ) || endOfSegment ) { ByteBuffer data = copyData ( cachedEntries ) ; SegmentRead reply = new SegmentRead ( segment , request . getOffset ( ) , atTail , endOfSegment , data , request . getRequestId ( ) ) ; connection . send ( reply ) ; this . statsRecorder . read ( segment , reply . getData ( ) . array ( ) . length ) ; } else if ( truncated ) { segmentStore . getStreamSegmentInfo ( segment , TIMEOUT ) . thenAccept ( info -> connection . send ( new SegmentIsTruncated ( request . getRequestId ( ) , segment , info . getStartOffset ( ) , EMPTY_STACK_TRACE , nonCachedEntry . getStreamSegmentOffset ( ) ) ) ) . exceptionally ( e -> handleException ( request . getRequestId ( ) , segment , nonCachedEntry . getStreamSegmentOffset ( ) , operation , wrapCancellationException ( e ) ) ) ; } else { Preconditions . checkState ( nonCachedEntry != null , "No ReadResultEntries returned from read!?" ) ; nonCachedEntry . requestContent ( TIMEOUT ) ; nonCachedEntry . getContent ( ) . thenAccept ( contents -> { ByteBuffer data = copyData ( Collections . singletonList ( contents ) ) ; SegmentRead reply = new SegmentRead ( segment , nonCachedEntry . getStreamSegmentOffset ( ) , false , endOfSegment , data , request . getRequestId ( ) ) ; connection . send ( reply ) ; this . statsRecorder . read ( segment , reply . getData ( ) . array ( ) . length ) ; } ) . exceptionally ( e -> { if ( Exceptions . unwrap ( e ) instanceof StreamSegmentTruncatedException ) { final String clientReplyStackTrace = replyWithStackTraceOnError ? e . getMessage ( ) : EMPTY_STACK_TRACE ; connection . send ( new SegmentIsTruncated ( request . getRequestId ( ) , segment , nonCachedEntry . getStreamSegmentOffset ( ) , clientReplyStackTrace , nonCachedEntry . getStreamSegmentOffset ( ) ) ) ; } else { handleException ( request . getRequestId ( ) , segment , nonCachedEntry . getStreamSegmentOffset ( ) , operation , wrapCancellationException ( e ) ) ; } return null ; } ) . exceptionally ( e -> handleException ( request . getRequestId ( ) , segment , nonCachedEntry . getStreamSegmentOffset ( ) , operation , wrapCancellationException ( e ) ) ) ; } }
Handles a readResult . If there are cached entries that can be returned without blocking only these are returned . Otherwise the call will request the data and setup a callback to return the data when it is available . If no data is available but it was detected that the Segment had been truncated beyond the current offset an appropriate message is sent back over the connection .
23,212
private ReadResultEntry collectCachedEntries ( long initialOffset , ReadResult readResult , ArrayList < ReadResultEntryContents > cachedEntries ) { long expectedOffset = initialOffset ; while ( readResult . hasNext ( ) ) { ReadResultEntry entry = readResult . next ( ) ; if ( entry . getType ( ) == Cache ) { Preconditions . checkState ( entry . getStreamSegmentOffset ( ) == expectedOffset , "Data returned from read was not contiguous." ) ; ReadResultEntryContents content = entry . getContent ( ) . getNow ( null ) ; expectedOffset += content . getLength ( ) ; cachedEntries . add ( content ) ; } else { return entry ; } } return null ; }
Reads all of the cachedEntries from the ReadResult and puts their content into the cachedEntries list . Upon encountering a non - cached entry it stops iterating and returns it .
23,213
@ SneakyThrows ( IOException . class ) private ByteBuffer copyData ( List < ReadResultEntryContents > contents ) { int totalSize = contents . stream ( ) . mapToInt ( ReadResultEntryContents :: getLength ) . sum ( ) ; ByteBuffer data = ByteBuffer . allocate ( totalSize ) ; int bytesCopied = 0 ; for ( ReadResultEntryContents content : contents ) { int copied = StreamHelpers . readAll ( content . getData ( ) , data . array ( ) , bytesCopied , totalSize - bytesCopied ) ; Preconditions . checkState ( copied == content . getLength ( ) , "Read fewer bytes than available." ) ; bytesCopied += copied ; } return data ; }
Copy all of the contents provided into a byteBuffer and return it .
23,214
public void start ( ) throws Exception { Exceptions . checkNotClosed ( this . closed , this ) ; log . info ( "Initializing metrics provider ..." ) ; MetricsProvider . initialize ( builderConfig . getConfig ( MetricsConfig :: builder ) ) ; statsProvider = MetricsProvider . getMetricsProvider ( ) ; statsProvider . start ( ) ; log . info ( "Initializing ZooKeeper Client ..." ) ; this . zkClient = createZKClient ( ) ; log . info ( "Initializing Service Builder ..." ) ; this . serviceBuilder . initialize ( ) ; log . info ( "Creating StreamSegmentService ..." ) ; StreamSegmentStore service = this . serviceBuilder . createStreamSegmentService ( ) ; log . info ( "Creating TableStoreService ..." ) ; TableStore tableStoreService = this . serviceBuilder . createTableStoreService ( ) ; log . info ( "Creating Segment Stats recorder ..." ) ; autoScaleMonitor = new AutoScaleMonitor ( service , builderConfig . getConfig ( AutoScalerConfig :: builder ) ) ; TokenVerifierImpl tokenVerifier = new TokenVerifierImpl ( builderConfig . getConfig ( AutoScalerConfig :: builder ) ) ; log . info ( serviceConfig . toString ( ) ) ; log . info ( builderConfig . getConfig ( AutoScalerConfig :: builder ) . toString ( ) ) ; this . listener = new PravegaConnectionListener ( this . serviceConfig . isEnableTls ( ) , this . serviceConfig . getListeningIPAddress ( ) , this . serviceConfig . getListeningPort ( ) , service , tableStoreService , autoScaleMonitor . getStatsRecorder ( ) , autoScaleMonitor . getTableSegmentStatsRecorder ( ) , tokenVerifier , this . serviceConfig . getCertFile ( ) , this . serviceConfig . getKeyFile ( ) , this . serviceConfig . isReplyWithStackTraceOnError ( ) ) ; this . listener . startListening ( ) ; log . info ( "PravegaConnectionListener started successfully." ) ; log . info ( "StreamSegmentService started." ) ; }
region Service Operation
23,215
public void await ( long timeoutMillis ) throws InterruptedException , TimeoutException { if ( released . get ( ) ) { return ; } if ( ! impl . tryAcquire ( timeoutMillis , TimeUnit . MILLISECONDS ) ) { throw new TimeoutException ( "Timeout expired prior to latch becoming available." ) ; } }
Block until another thread calls release or the thread is interrupted .
23,216
public void release ( ) { if ( released . compareAndSet ( false , true ) ) { synchronized ( releasingLock ) { if ( released . get ( ) ) { impl . release ( Integer . MAX_VALUE ) ; } } } }
Allow all waiting threads to go through and all future threads to proceed without blocking .
23,217
public void reset ( ) { if ( released . compareAndSet ( true , false ) ) { synchronized ( releasingLock ) { if ( ! released . get ( ) ) { impl . drainPermits ( ) ; } } } }
Resets the latch to an un - release state .
23,218
public void close ( ) { if ( ! this . closed ) { try { this . entries . releaseLock ( this . clientId ) ; } catch ( DataLogWriterNotPrimaryException ex ) { } this . closed = true ; } }
region DurableDataLog Implementation
23,219
@ GuardedBy ( "readers" ) private String updateGroupStateIfNeeded ( ) throws ReaderNotInReaderGroupException { if ( atCheckpoint != null ) { groupState . checkpoint ( atCheckpoint , getPosition ( ) ) ; releaseSegmentsIfNeeded ( ) ; } String checkpoint = groupState . getCheckpoint ( ) ; if ( checkpoint != null ) { log . info ( "{} at checkpoint {}" , this , checkpoint ) ; if ( groupState . isCheckpointSilent ( checkpoint ) ) { groupState . checkpoint ( checkpoint , getPosition ( ) ) ; if ( atCheckpoint != null ) { releaseSegmentsIfNeeded ( ) ; atCheckpoint = null ; } return null ; } else { atCheckpoint = checkpoint ; return atCheckpoint ; } } else { atCheckpoint = null ; acquireSegmentsIfNeeded ( ) ; return null ; } }
If the last call was a checkpoint updates the reader group state to indicate it has completed and releases segments .
23,220
@ GuardedBy ( "readers" ) private void releaseSegmentsIfNeeded ( ) throws ReaderNotInReaderGroupException { releaseSealedSegments ( ) ; Segment segment = groupState . findSegmentToReleaseIfRequired ( ) ; if ( segment != null ) { log . info ( "{} releasing segment {}" , this , segment ) ; EventSegmentReader reader = readers . stream ( ) . filter ( r -> r . getSegmentId ( ) . equals ( segment ) ) . findAny ( ) . orElse ( null ) ; if ( reader != null ) { if ( groupState . releaseSegment ( segment , reader . getOffset ( ) , getLag ( ) ) ) { readers . remove ( reader ) ; reader . close ( ) ; } } } }
Releases segments . This must not be invoked except immediately after a checkpoint .
23,221
private void releaseSealedSegments ( ) throws ReaderNotInReaderGroupException { for ( Iterator < Segment > iterator = sealedSegments . iterator ( ) ; iterator . hasNext ( ) ; ) { Segment oldSegment = iterator . next ( ) ; if ( groupState . handleEndOfSegment ( oldSegment ) ) { iterator . remove ( ) ; } else { break ; } } }
Releases all sealed segments unless there is a checkpoint pending for this reader .
23,222
public CompletableFuture < StreamConfiguration > getConfiguration ( ) { return getConfigurationData ( false ) . thenApply ( x -> x . getObject ( ) . getStreamConfiguration ( ) ) ; }
Fetch configuration at configurationPath .
23,223
public CompletableFuture < StreamSegmentRecord > getSegment ( final long segmentId ) { int epoch = StreamSegmentNameUtils . getEpoch ( segmentId ) ; return getEpochRecord ( epoch ) . thenApply ( epochRecord -> { Optional < StreamSegmentRecord > segmentRecord = epochRecord . getSegments ( ) . stream ( ) . filter ( x -> x . segmentId ( ) == segmentId ) . findAny ( ) ; return segmentRecord . orElseThrow ( ( ) -> StoreException . create ( StoreException . Type . DATA_NOT_FOUND , "segment not found in epoch" ) ) ; } ) ; }
Fetches Segment metadata from the epoch in which segment was created .
23,224
private long findSegmentSplitsMerges ( List < StreamSegmentRecord > referenceSegmentsList , List < StreamSegmentRecord > targetSegmentsList ) { return referenceSegmentsList . stream ( ) . filter ( segment -> targetSegmentsList . stream ( ) . filter ( target -> target . overlaps ( segment ) ) . count ( ) > 1 ) . count ( ) ; }
Method to calculate number of splits and merges .
23,225
public CompletableFuture < VersionedMetadata < EpochTransitionRecord > > submitScale ( final List < Long > segmentsToSeal , final List < Map . Entry < Double , Double > > newRanges , final long scaleTimestamp , final VersionedMetadata < EpochTransitionRecord > existing ) { return verifyNotSealed ( ) . thenCompose ( v -> { if ( existing == null ) { return getEpochTransition ( ) ; } else { return CompletableFuture . completedFuture ( existing ) ; } } ) . thenCompose ( record -> getActiveEpochRecord ( true ) . thenCompose ( currentEpoch -> { if ( ! record . getObject ( ) . equals ( EpochTransitionRecord . EMPTY ) ) { if ( ! RecordHelper . verifyRecordMatchesInput ( segmentsToSeal , newRanges , false , record . getObject ( ) ) ) { log . debug ( "scale conflict, another scale operation is ongoing" ) ; throw new EpochTransitionOperationExceptions . ConflictException ( ) ; } return CompletableFuture . completedFuture ( record ) ; } else { if ( ! RecordHelper . canScaleFor ( segmentsToSeal , currentEpoch ) ) { return updateEpochTransitionNode ( new VersionedMetadata < > ( EpochTransitionRecord . EMPTY , record . getVersion ( ) ) ) . thenApply ( x -> { log . warn ( "scale precondition failed {}" , segmentsToSeal ) ; throw new EpochTransitionOperationExceptions . PreConditionFailureException ( ) ; } ) ; } if ( ! RecordHelper . validateInputRange ( segmentsToSeal , newRanges , currentEpoch ) ) { log . error ( "scale input invalid {} {}" , segmentsToSeal , newRanges ) ; throw new EpochTransitionOperationExceptions . InputInvalidException ( ) ; } EpochTransitionRecord epochTransition = RecordHelper . computeEpochTransition ( currentEpoch , segmentsToSeal , newRanges , scaleTimestamp ) ; return updateEpochTransitionNode ( new VersionedMetadata < > ( epochTransition , record . getVersion ( ) ) ) . thenApply ( version -> { log . info ( "scale for stream {}/{} accepted. Segments to seal = {}" , scope , name , epochTransition . getSegmentsToSeal ( ) ) ; return new VersionedMetadata < > ( epochTransition , version ) ; } ) ; } } ) ) ; }
This method attempts to start a new scale workflow . For this it first computes epoch transition and stores it in the metadastore . This method can be called by manual scale or during the processing of auto - scale event . Which means there could be concurrent calls to this method .
23,226
private < T > void authenticateExecuteAndProcessResults ( Supplier < String > authenticator , Function < String , CompletableFuture < T > > call , final StreamObserver < T > streamObserver , RequestTag requestTag ) { try { String delegationToken ; delegationToken = authenticator . get ( ) ; CompletableFuture < T > result = call . apply ( delegationToken ) ; result . whenComplete ( ( value , ex ) -> { log . debug ( "result = {}" , value ) ; logAndUntrackRequestTag ( requestTag ) ; if ( ex != null ) { Throwable cause = Exceptions . unwrap ( ex ) ; log . error ( "Controller api failed with error: " , ex ) ; String errorDescription = replyWithStackTraceOnError ? "controllerStackTrace=" + Throwables . getStackTraceAsString ( ex ) : cause . getMessage ( ) ; streamObserver . onError ( Status . INTERNAL . withCause ( cause ) . withDescription ( errorDescription ) . asRuntimeException ( ) ) ; } else if ( value != null ) { streamObserver . onNext ( value ) ; streamObserver . onCompleted ( ) ; } } ) ; } catch ( Exception e ) { log . error ( "Controller api failed with authenticator error" ) ; logAndUntrackRequestTag ( requestTag ) ; streamObserver . onError ( Status . UNAUTHENTICATED . withDescription ( "Authentication failed" ) . asRuntimeException ( ) ) ; } }
Convert responses from CompletableFuture to gRPC s Observer pattern .
23,227
public static Segment fromScopedName ( String qualifiedName ) { if ( StreamSegmentNameUtils . isTransactionSegment ( qualifiedName ) ) { String originalSegmentName = StreamSegmentNameUtils . getParentStreamSegmentName ( qualifiedName ) ; return fromScopedName ( originalSegmentName ) ; } else { List < String > tokens = StreamSegmentNameUtils . extractSegmentTokens ( qualifiedName ) ; if ( tokens . size ( ) == 2 ) { String scope = null ; String streamName = tokens . get ( 0 ) ; long segmentId = Long . parseLong ( tokens . get ( 1 ) ) ; return new Segment ( scope , streamName , segmentId ) ; } else { String scope = tokens . get ( 0 ) ; String streamName = tokens . get ( 1 ) ; long segmentId = Long . parseLong ( tokens . get ( 2 ) ) ; return new Segment ( scope , streamName , segmentId ) ; } } }
Parses fully scoped name and creates the segment .
23,228
public CompletableFuture < CloseableResource < T > > getResource ( ) { CompletableFuture < CloseableResource < T > > future ; boolean tryCreateNewResource = false ; synchronized ( lock ) { T t = idleResources . poll ( ) ; if ( t != null ) { future = CompletableFuture . completedFuture ( new CloseableResource < > ( t , this ) ) ; } else { future = new CompletableFuture < > ( ) ; WaitingRequest < T > request = new WaitingRequest < > ( future ) ; waitQueue . add ( request ) ; tryCreateNewResource = true ; } } if ( tryCreateNewResource ) { tryCreateNewResource ( ) ; } return future ; }
Method to get a resource initialized with supplied arg . This method attempts to find an existing available resource . If not found it submits a new waiting request for whenever a resource becomes available . A resource could become available because such a resource was returned to the pool or a new resource was created . It also opportunistically submits a request to create a new resource if required .
23,229
private void returnResource ( T t , boolean isValid ) { if ( ! isValid ) { handleInvalid ( t ) ; } else { WaitingRequest < T > waiting ; boolean toDestroy = false ; synchronized ( lock ) { waiting = waitQueue . poll ( ) ; if ( waiting == null ) { if ( ! isRunning ) { resourceCount -- ; toDestroy = true ; } else { if ( idleResources . size ( ) < maxIdle ) { idleResources . offer ( t ) ; } else { resourceCount -- ; toDestroy = true ; } } } } if ( waiting != null ) { waiting . future . complete ( new CloseableResource < > ( t , this ) ) ; } if ( toDestroy ) { if ( listener != null ) { listener . notify ( Event . Destroyed ) ; } tDestroyer . accept ( t ) ; } } }
Method to return resource back to the pool . Callers are expected to call close on the closableResource which in turn calls the return resource method to return the resource to the pool so that it can be reused .
23,230
public void shutdown ( ) { T t ; synchronized ( lock ) { isRunning = false ; t = idleResources . poll ( ) ; } while ( t != null ) { returnResource ( t , true ) ; synchronized ( lock ) { t = idleResources . poll ( ) ; } } }
Shutdown the resource manager where all returned resources are closed and not put back into the idle queue of resources . It is important to note that even after shutdown is initiated if getresource is invoked it will return a resource .
23,231
private < R > R execute ( String segmentName , Callable < R > operation ) throws StreamSegmentException { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; try { return operation . call ( ) ; } catch ( Exception e ) { return throwException ( segmentName , e ) ; } }
Executes the given Callable and returns its result while translating any Exceptions bubbling out of it into StreamSegmentExceptions .
23,232
public void register ( Client client ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; Preconditions . checkNotNull ( client , "client" ) ; synchronized ( this . clients ) { if ( ! this . clients . contains ( client ) ) { this . clients . add ( client ) ; client . updateGenerations ( this . currentGeneration . get ( ) , this . oldestGeneration . get ( ) ) ; } } log . info ( "{} Registered {}." , TRACE_OBJECT_ID , client ) ; }
Registers the given client to this CacheManager .
23,233
public void unregister ( Client client ) { if ( this . closed . get ( ) ) { return ; } Preconditions . checkNotNull ( client , "client" ) ; synchronized ( this . clients ) { this . clients . remove ( client ) ; } log . info ( "{} Unregistered {}." , TRACE_OBJECT_ID , client ) ; }
Unregisters the given client from this CacheManager .
23,234
public void write ( int b ) throws IOException { Exceptions . checkNotClosed ( this . closed , this ) ; Preconditions . checkState ( this . currentFrame != null , "No current frame exists. Most likely no record is started." ) ; int attemptCount = 0 ; int totalBytesWritten = 0 ; while ( totalBytesWritten == 0 && attemptCount < 2 ) { totalBytesWritten += this . currentFrame . append ( ( byte ) b ) ; if ( totalBytesWritten == 0 ) { this . currentFrame . endEntry ( false ) ; flush ( ) ; createNewFrame ( ) ; startNewRecordInCurrentFrame ( false ) ; } attemptCount ++ ; } if ( totalBytesWritten == 0 ) { throw new SerializationException ( "Unable to make progress in serializing to DataFrame." ) ; } }
region OutputStream Implementation
23,235
public byte get ( int index ) { Preconditions . checkElementIndex ( index , this . length , "index" ) ; return this . array [ index + this . startOffset ] ; }
region ArrayView Implementation
23,236
public void set ( int index , byte value ) { Preconditions . checkState ( ! this . readOnly , "Cannot modify a read-only ByteArraySegment." ) ; Preconditions . checkElementIndex ( index , this . length , "index" ) ; this . array [ index + this . startOffset ] = value ; }
Sets the value at the specified index .
23,237
public void copyFrom ( ByteArraySegment source , int targetOffset , int length ) { Preconditions . checkState ( ! this . readOnly , "Cannot modify a read-only ByteArraySegment." ) ; Exceptions . checkArrayRange ( targetOffset , length , this . length , "index" , "values.length" ) ; Preconditions . checkElementIndex ( length , source . getLength ( ) + 1 , "length" ) ; System . arraycopy ( source . array , source . startOffset , this . array , targetOffset + this . startOffset , length ) ; }
Copies a specified number of bytes from the given ByteArraySegment into this ByteArraySegment .
23,238
public ByteArraySegment subSegment ( int offset , int length , boolean readOnly ) { Exceptions . checkArrayRange ( offset , length , this . length , "offset" , "length" ) ; return new ByteArraySegment ( this . array , this . startOffset + offset , length , readOnly || this . readOnly ) ; }
Returns a new ByteArraySegment that is a sub - segment of this ByteArraySegment . The new ByteArraySegment wraps the same underlying byte array that this ByteArraySegment does .
23,239
void append ( T logItem ) throws IOException { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; long seqNo = logItem . getSequenceNumber ( ) ; Exceptions . checkArgument ( this . lastSerializedSequenceNumber < seqNo , "logItem" , "Invalid sequence number. Expected: greater than %d, given: %d." , this . lastSerializedSequenceNumber , seqNo ) ; long previousLastStartedSequenceNumber = this . lastStartedSequenceNumber ; try { this . outputStream . startNewRecord ( ) ; this . lastStartedSequenceNumber = seqNo ; this . serializer . serialize ( this . outputStream , logItem ) ; this . outputStream . endRecord ( ) ; this . lastSerializedSequenceNumber = seqNo ; } catch ( Exception ex ) { if ( this . closed . get ( ) ) { throw new ObjectClosedException ( this , ex ) ; } else if ( ex instanceof ObjectClosedException ) { close ( ) ; } else { this . outputStream . discardRecord ( ) ; this . lastStartedSequenceNumber = previousLastStartedSequenceNumber ; } throw ex ; } }
Appends a LogItem to the DataFrameBuilder . If any exceptions happened during serialization whatever contents was written to the DataFrame will be discarded . Note that if a LogItem spans multiple DataFrames in case of failure the content serialized to already committed DataFrames will not be discarded . That case will have to be dealt with upon reading DataFrames from the DataFrameLog .
23,240
public void initializeStreamWriters ( final EventStreamClientFactory clientFactory , final ControllerEventProcessorConfig config ) { if ( ! commitWriterFuture . isDone ( ) ) { commitWriterFuture . complete ( clientFactory . createEventWriter ( config . getCommitStreamName ( ) , ControllerEventProcessors . COMMIT_EVENT_SERIALIZER , EventWriterConfig . builder ( ) . build ( ) ) ) ; } if ( ! abortWriterFuture . isDone ( ) ) { abortWriterFuture . complete ( clientFactory . createEventWriter ( config . getAbortStreamName ( ) , ControllerEventProcessors . ABORT_EVENT_SERIALIZER , EventWriterConfig . builder ( ) . build ( ) ) ) ; } this . setReady ( ) ; }
Initializes stream writers for commit and abort streams . This method should be called immediately after creating StreamTransactionMetadataTasks object .
23,241
public CompletableFuture < Pair < VersionedTransactionData , List < StreamSegmentRecord > > > createTxn ( final String scope , final String stream , final long lease , final OperationContext contextOpt ) { final OperationContext context = getNonNullOperationContext ( scope , stream , contextOpt ) ; return createTxnBody ( scope , stream , lease , context ) ; }
Create transaction .
23,242
public CompletableFuture < PingTxnStatus > pingTxn ( final String scope , final String stream , final UUID txId , final long lease , final OperationContext contextOpt ) { final OperationContext context = getNonNullOperationContext ( scope , stream , contextOpt ) ; return pingTxnBody ( scope , stream , txId , lease , context ) ; }
Transaction heartbeat that increases transaction timeout by lease number of milliseconds .
23,243
public CompletableFuture < TxnStatus > abortTxn ( final String scope , final String stream , final UUID txId , final Version version , final OperationContext contextOpt ) { final OperationContext context = getNonNullOperationContext ( scope , stream , contextOpt ) ; return withRetriesAsync ( ( ) -> sealTxnBody ( hostId , scope , stream , false , txId , version , context ) , RETRYABLE_PREDICATE , 3 , executor ) ; }
Abort transaction .
23,244
public CompletableFuture < TxnStatus > commitTxn ( final String scope , final String stream , final UUID txId , final OperationContext contextOpt ) { final OperationContext context = getNonNullOperationContext ( scope , stream , contextOpt ) ; return withRetriesAsync ( ( ) -> sealTxnBody ( hostId , scope , stream , true , txId , null , context ) , RETRYABLE_PREDICATE , 3 , executor ) ; }
Commit transaction .
23,245
CompletableFuture < Pair < VersionedTransactionData , List < StreamSegmentRecord > > > createTxnBody ( final String scope , final String stream , final long lease , final OperationContext ctx ) { CompletableFuture < Void > validate = validate ( lease ) ; long maxExecutionPeriod = Math . min ( MAX_EXECUTION_TIME_MULTIPLIER * lease , Duration . ofDays ( 1 ) . toMillis ( ) ) ; return validate . thenCompose ( validated -> RetryHelper . withRetriesAsync ( ( ) -> streamMetadataStore . generateTransactionId ( scope , stream , ctx , executor ) . thenCompose ( txnId -> { CompletableFuture < Void > addIndex = addTxnToIndex ( scope , stream , txnId ) ; CompletableFuture < VersionedTransactionData > txnFuture = createTxnInStore ( scope , stream , lease , ctx , maxExecutionPeriod , txnId , addIndex ) ; CompletableFuture < List < StreamSegmentRecord > > segmentsFuture = txnFuture . thenComposeAsync ( txnData -> streamMetadataStore . getSegmentsInEpoch ( scope , stream , txnData . getEpoch ( ) , ctx , executor ) , executor ) ; CompletableFuture < Void > notify = segmentsFuture . thenComposeAsync ( activeSegments -> notifyTxnCreation ( scope , stream , activeSegments , txnId ) , executor ) . whenComplete ( ( v , e ) -> log . trace ( "Txn={}, notified segments stores" , txnId ) ) ; return notify . whenCompleteAsync ( ( result , ex ) -> { addTxnToTimeoutService ( scope , stream , lease , maxExecutionPeriod , txnId , txnFuture ) ; } , executor ) . thenApplyAsync ( v -> { List < StreamSegmentRecord > segments = segmentsFuture . join ( ) . stream ( ) . map ( x -> { long generalizedSegmentId = RecordHelper . generalizedSegmentId ( x . segmentId ( ) , txnId ) ; int epoch = StreamSegmentNameUtils . getEpoch ( generalizedSegmentId ) ; int segmentNumber = StreamSegmentNameUtils . getSegmentNumber ( generalizedSegmentId ) ; return StreamSegmentRecord . builder ( ) . creationEpoch ( epoch ) . segmentNumber ( segmentNumber ) . creationTime ( x . getCreationTime ( ) ) . keyStart ( x . getKeyStart ( ) ) . keyEnd ( x . getKeyEnd ( ) ) . build ( ) ; } ) . collect ( Collectors . toList ( ) ) ; return new ImmutablePair < > ( txnFuture . join ( ) , segments ) ; } , executor ) ; } ) , e -> { Throwable unwrap = Exceptions . unwrap ( e ) ; return unwrap instanceof StoreException . WriteConflictException || unwrap instanceof StoreException . DataNotFoundException ; } , 5 , executor ) ) ; }
Creates txn on the specified stream .
23,246
CompletableFuture < PingTxnStatus > pingTxnBody ( final String scope , final String stream , final UUID txnId , final long lease , final OperationContext ctx ) { if ( ! timeoutService . isRunning ( ) ) { return CompletableFuture . completedFuture ( createStatus ( Status . DISCONNECTED ) ) ; } log . debug ( "Txn={}, updating txn node in store and extending lease" , txnId ) ; return fenceTxnUpdateLease ( scope , stream , txnId , lease , ctx ) ; }
Ping a txn thereby updating its timeout to current time + lease .
23,247
private void rollover ( RollingSegmentHandle handle ) throws StreamSegmentException { Preconditions . checkArgument ( handle . getHeaderHandle ( ) != null , "Cannot rollover a Segment with no header." ) ; Preconditions . checkArgument ( ! handle . isReadOnly ( ) , "Cannot rollover using a read-only handle." ) ; Preconditions . checkArgument ( ! handle . isSealed ( ) , "Cannot rollover a Sealed Segment." ) ; log . debug ( "Rolling over '{}'." , handle ) ; sealActiveChunk ( handle ) ; try { createChunk ( handle ) ; } catch ( StreamSegmentExistsException ex ) { int chunkCount = handle . chunks ( ) . size ( ) ; handle . refresh ( openHandle ( handle . getSegmentName ( ) , false ) ) ; if ( chunkCount == handle . chunks ( ) . size ( ) ) { throw ex ; } else { log . warn ( "Aborted rollover due to concurrent rollover detected ('{}')." , handle ) ; } } }
region SegmentChunk Operations
23,248
private void createHeader ( RollingSegmentHandle handle ) throws StreamSegmentException { Preconditions . checkArgument ( handle . getHeaderHandle ( ) == null , "handle already has a header." ) ; String headerName = StreamSegmentNameUtils . getHeaderSegmentName ( handle . getSegmentName ( ) ) ; this . baseStorage . create ( headerName ) ; val headerHandle = this . baseStorage . openWrite ( headerName ) ; val newHandle = new RollingSegmentHandle ( headerHandle , handle . getRollingPolicy ( ) , handle . chunks ( ) ) ; serializeHandle ( newHandle ) ; handle . refresh ( newHandle ) ; }
region Header Operations
23,249
private Collection < WriterSegmentProcessor > createWriterProcessors ( UpdateableSegmentMetadata segmentMetadata ) { ImmutableList . Builder < WriterSegmentProcessor > builder = ImmutableList . builder ( ) ; this . extensions . values ( ) . forEach ( p -> builder . addAll ( p . createWriterSegmentProcessors ( segmentMetadata ) ) ) ; return builder . build ( ) ; }
Creates WriterSegmentProcessors for the given Segment Metadata from all registered Extensions .
23,250
private void doStop ( Throwable cause ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , traceObjectId , "doStop" ) ; log . info ( "{}: Stopping." , this . traceObjectId ) ; CompletableFuture . allOf ( Services . stopAsync ( this . metadataCleaner , this . executor ) , Services . stopAsync ( this . writer , this . executor ) , Services . stopAsync ( this . durableLog , this . executor ) ) . whenCompleteAsync ( ( r , ex ) -> { Throwable failureCause = getFailureCause ( this . durableLog , this . writer , this . metadataCleaner ) ; if ( failureCause == null ) { failureCause = cause ; } else if ( cause != null && failureCause != cause ) { failureCause . addSuppressed ( cause ) ; } if ( failureCause == null ) { log . info ( "{}: Stopped." , this . traceObjectId ) ; LoggerHelpers . traceLeave ( log , traceObjectId , "doStop" , traceId ) ; notifyStopped ( ) ; } else { log . warn ( "{}: Failed due to component failure." , this . traceObjectId ) ; LoggerHelpers . traceLeave ( log , traceObjectId , "doStop" , traceId ) ; notifyFailed ( failureCause ) ; } } , this . executor ) . exceptionally ( ex -> { notifyFailed ( ex ) ; return null ; } ) ; }
Stops the StreamSegmentContainer by stopping all components waiting for them to stop and reports a normal shutdown or failure based on case . It will report a normal shutdown only if all components shut down normally and cause is null . Otherwise the container will report either the exception of the failed component or the given cause .
23,251
public Collection < SegmentProperties > getActiveSegments ( ) { ensureRunning ( ) ; logRequest ( "getActiveSegments" ) ; return this . metadata . getAllStreamSegmentIds ( ) . stream ( ) . map ( this . metadata :: getStreamSegmentMetadata ) . filter ( Objects :: nonNull ) . map ( SegmentMetadata :: getSnapshot ) . collect ( Collectors . toList ( ) ) ; }
region SegmentContainer Implementation
23,252
private CompletableFuture < Void > trySealStreamSegment ( SegmentMetadata metadata , Duration timeout ) { if ( metadata . isSealed ( ) ) { return CompletableFuture . completedFuture ( null ) ; } else { return Futures . exceptionallyExpecting ( this . durableLog . add ( new StreamSegmentSealOperation ( metadata . getId ( ) ) , timeout ) , ex -> ex instanceof StreamSegmentSealedException , null ) ; } }
Attempts to seal a Segment that may already be sealed .
23,253
private < T extends Operation & AttributeUpdaterOperation > CompletableFuture < Void > processAttributeUpdaterOperation ( T operation , TimeoutTimer timer ) { Collection < AttributeUpdate > updates = operation . getAttributeUpdates ( ) ; if ( updates == null || updates . isEmpty ( ) ) { return this . durableLog . add ( operation , timer . getRemaining ( ) ) ; } return Futures . exceptionallyCompose ( this . durableLog . add ( operation , timer . getRemaining ( ) ) , ex -> { ex = Exceptions . unwrap ( ex ) ; if ( ex instanceof BadAttributeUpdateException && ( ( BadAttributeUpdateException ) ex ) . isPreviousValueMissing ( ) ) { SegmentMetadata segmentMetadata = this . metadata . getStreamSegmentMetadata ( operation . getStreamSegmentId ( ) ) ; Collection < UUID > attributeIds = updates . stream ( ) . map ( AttributeUpdate :: getAttributeId ) . filter ( id -> ! Attributes . isCoreAttribute ( id ) ) . collect ( Collectors . toList ( ) ) ; if ( ! attributeIds . isEmpty ( ) ) { return getAndCacheAttributes ( segmentMetadata , attributeIds , true , timer ) . thenComposeAsync ( attributes -> { return this . durableLog . add ( operation , timer . getRemaining ( ) ) ; } , this . executor ) ; } } return Futures . failedFuture ( ex ) ; } ) ; }
Processes the given AttributeUpdateOperation with exactly one retry in case it was rejected because of an attribute update failure due to the attribute value missing from the in - memory cache .
23,254
public CompletableFuture < io . pravega . client . batch . StreamInfo > getStreamInfo ( final Stream stream ) { Preconditions . checkNotNull ( stream , "stream" ) ; StreamManagerImpl streamManager = new StreamManagerImpl ( this . controller , this . connectionFactory ) ; return streamManager . getStreamInfo ( stream ) . thenApply ( info -> new io . pravega . client . batch . StreamInfo ( info . getScope ( ) , info . getStreamName ( ) , info . getTailStreamCut ( ) , info . getHeadStreamCut ( ) ) ) ; }
Used to fetch the StreamInfo of a given stream . This should be removed in time .
23,255
public List < FutureReadResultEntry > close ( ) { List < FutureReadResultEntry > result ; synchronized ( this . reads ) { if ( this . closed ) { result = Collections . emptyList ( ) ; } else { result = new ArrayList < > ( this . reads ) ; this . reads . clear ( ) ; this . closed = true ; } } return result ; }
Closes this instance of the FutureReadResultEntryCollection class .
23,256
public void add ( FutureReadResultEntry entry ) { synchronized ( this . reads ) { Exceptions . checkNotClosed ( this . closed , this ) ; this . reads . add ( entry ) ; } }
Adds a new Result Entry .
23,257
Collection < FutureReadResultEntry > poll ( long maxOffset ) { List < FutureReadResultEntry > result = new ArrayList < > ( ) ; synchronized ( this . reads ) { Exceptions . checkNotClosed ( this . closed , this ) ; while ( this . reads . size ( ) > 0 && this . reads . peek ( ) . getStreamSegmentOffset ( ) <= maxOffset ) { result . add ( this . reads . poll ( ) ) ; } } return result ; }
Finds the Result Entries that have a starting offset before the given offset removes them from the collection and returns them .
23,258
private void issueRequestIfNeeded ( ) { int updatedReadLength = computeReadLength ( offset + buffer . dataAvailable ( ) ) ; if ( ! receivedEndOfSegment && ! receivedTruncated && updatedReadLength > 0 && outstandingRequest == null ) { log . trace ( "Issuing read request for segment {} of {} bytes" , getSegmentId ( ) , updatedReadLength ) ; outstandingRequest = asyncInput . read ( offset + buffer . dataAvailable ( ) , updatedReadLength ) ; } }
Issues a request - if there is enough room for another request and we aren t already waiting on one and - if we have not read up to the configured endOffset .
23,259
private int computeReadLength ( long currentFetchOffset ) { Preconditions . checkState ( endOffset >= currentFetchOffset , "Current offset up to to which events are fetched should be less than the configured end offset" ) ; int currentReadLength = Math . max ( minReadLength , buffer . capacityAvailable ( ) ) ; if ( UNBOUNDED_END_OFFSET == endOffset ) { return currentReadLength ; } long numberOfBytesRemaining = endOffset - currentFetchOffset ; return Math . toIntExact ( Math . min ( currentReadLength , numberOfBytesRemaining ) ) ; }
Compute the read length based on the current fetch offset and the configured end offset .
23,260
private CompletableFuture < Void > establishConnection ( PravegaNodeUri location , FlowHandler handler ) { final Bootstrap b = getNettyBootstrap ( ) . handler ( getChannelInitializer ( location , handler ) ) ; final CompletableFuture < Void > connectionComplete = new CompletableFuture < > ( ) ; try { b . connect ( location . getEndpoint ( ) , location . getPort ( ) ) . addListener ( ( ChannelFutureListener ) future -> { if ( future . isSuccess ( ) ) { Channel ch = future . channel ( ) ; log . debug ( "Connect operation completed for channel:{}, local address:{}, remote address:{}" , ch . id ( ) , ch . localAddress ( ) , ch . remoteAddress ( ) ) ; channelGroup . add ( ch ) ; connectionComplete . complete ( null ) ; } else { connectionComplete . completeExceptionally ( new ConnectionFailedException ( future . cause ( ) ) ) ; } } ) ; } catch ( Exception e ) { connectionComplete . completeExceptionally ( new ConnectionFailedException ( e ) ) ; } final CompletableFuture < Void > channelRegisteredFuture = new CompletableFuture < > ( ) ; handler . completeWhenRegistered ( channelRegisteredFuture ) ; return CompletableFuture . allOf ( connectionComplete , channelRegisteredFuture ) ; }
Establish a new connection to the Pravega Node .
23,261
synchronized void add ( long size , int generation ) { Preconditions . checkArgument ( size >= 0 , "size must be a non-negative number" ) ; Preconditions . checkArgument ( generation >= 0 , "generation must be a non-negative number" ) ; this . totalSize += size ; int newCount = this . generations . getOrDefault ( generation , 0 ) + 1 ; this . generations . put ( generation , newCount ) ; }
Records the addition of an element of the given size to the given generation .
23,262
synchronized void remove ( long size , int generation ) { Preconditions . checkArgument ( size >= 0 , "size must be a non-negative number" ) ; this . totalSize -= size ; if ( this . totalSize < 0 ) { this . totalSize = 0 ; } removeFromGeneration ( generation ) ; }
Records the removal of an element of the given size from the given generation .
23,263
synchronized CacheManager . CacheStatus toCacheStatus ( ) { AtomicInteger oldestGeneration = new AtomicInteger ( Integer . MAX_VALUE ) ; AtomicInteger newestGeneration = new AtomicInteger ( 0 ) ; this . generations . keySet ( ) . forEach ( g -> { if ( oldestGeneration . get ( ) > g ) { oldestGeneration . set ( g ) ; } if ( newestGeneration . get ( ) < g ) { newestGeneration . set ( g ) ; } } ) ; return new CacheManager . CacheStatus ( this . totalSize , Math . min ( newestGeneration . get ( ) , oldestGeneration . get ( ) ) , newestGeneration . get ( ) ) ; }
Generates a CacheManager . CacheStatus object with the information in this ReadIndexSummary object .
23,264
public void fail ( Throwable ex ) { this . done = true ; if ( this . failureHandler != null ) { Callbacks . invokeSafely ( this . failureHandler , ex , cex -> log . error ( "Fail Callback invocation failure." , cex ) ) ; } }
Completes the operation with failure .
23,265
static PageWrapper wrapExisting ( BTreePage page , PageWrapper parent , PagePointer pointer ) { return new PageWrapper ( page , parent , pointer , false ) ; }
Creates a new instance of the PageWrapper class for an existing Page .
23,266
static PageWrapper wrapNew ( BTreePage page , PageWrapper parent , PagePointer pointer ) { return new PageWrapper ( page , parent , pointer , true ) ; }
Creates a new instance of the PageWrapper class for a new Page .
23,267
void setOffset ( long value ) { if ( this . pointer != null && this . offset . get ( ) != this . pointer . getOffset ( ) ) { throw new IllegalStateException ( "Cannot assign offset more than once." ) ; } this . offset . set ( value ) ; }
Updates the offset of the wrapped BTreePage .
23,268
void rebase ( ContainerMetadata baseMetadata ) { Preconditions . checkArgument ( baseMetadata . getContainerId ( ) == this . containerId , "ContainerId mismatch" ) ; Preconditions . checkArgument ( baseMetadata . isRecoveryMode ( ) == this . isRecoveryMode ( ) , "isRecoveryMode mismatch" ) ; this . baseMetadata = baseMetadata ; this . maximumActiveSegmentCount = baseMetadata . getMaximumActiveSegmentCount ( ) ; this . baseNewSegmentCount = getNewSegmentCount ( baseMetadata ) ; resetNewSequenceNumber ( ) ; }
Rebases this UpdateTransaction to the given ContainerMetadata .
23,269
void commit ( UpdateableContainerMetadata target ) { Preconditions . checkArgument ( target . getContainerId ( ) == this . containerId , "ContainerId mismatch" ) ; Preconditions . checkArgument ( target . isRecoveryMode ( ) == this . isRecoveryMode ( ) , "isRecoveryMode mismatch" ) ; if ( target . isRecoveryMode ( ) ) { if ( this . processedCheckpoint ) { target . reset ( ) ; } assert this . newSequenceNumber >= ContainerMetadata . INITIAL_OPERATION_SEQUENCE_NUMBER : "Invalid Sequence Number " + this . newSequenceNumber ; target . setOperationSequenceNumber ( this . newSequenceNumber ) ; } this . segmentUpdates . values ( ) . forEach ( txn -> { UpdateableSegmentMetadata targetSegmentMetadata = target . getStreamSegmentMetadata ( txn . getId ( ) ) ; if ( targetSegmentMetadata == null ) { targetSegmentMetadata = this . newSegments . get ( txn . getId ( ) ) ; } txn . apply ( targetSegmentMetadata ) ; } ) ; copySegmentMetadata ( this . newSegments . values ( ) , target ) ; this . newTruncationPoints . forEach ( target :: setValidTruncationPoint ) ; clear ( ) ; }
Commits all pending changes to the given target Container Metadata .
23,270
void preProcessOperation ( Operation operation ) throws ContainerException , StreamSegmentException { checkNotSealed ( ) ; if ( operation instanceof SegmentOperation ) { val segmentMetadata = getSegmentUpdateTransaction ( ( ( SegmentOperation ) operation ) . getStreamSegmentId ( ) ) ; if ( segmentMetadata . isDeleted ( ) ) { throw new StreamSegmentNotExistsException ( segmentMetadata . getName ( ) ) ; } if ( operation instanceof StreamSegmentAppendOperation ) { segmentMetadata . preProcessOperation ( ( StreamSegmentAppendOperation ) operation ) ; } else if ( operation instanceof StreamSegmentSealOperation ) { segmentMetadata . preProcessOperation ( ( StreamSegmentSealOperation ) operation ) ; } else if ( operation instanceof MergeSegmentOperation ) { MergeSegmentOperation mbe = ( MergeSegmentOperation ) operation ; SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction ( mbe . getSourceSegmentId ( ) ) ; sourceMetadata . preProcessAsSourceSegment ( mbe ) ; segmentMetadata . preProcessAsTargetSegment ( mbe , sourceMetadata ) ; } else if ( operation instanceof UpdateAttributesOperation ) { segmentMetadata . preProcessOperation ( ( UpdateAttributesOperation ) operation ) ; } else if ( operation instanceof StreamSegmentTruncateOperation ) { segmentMetadata . preProcessOperation ( ( StreamSegmentTruncateOperation ) operation ) ; } else if ( operation instanceof DeleteSegmentOperation ) { segmentMetadata . preProcessOperation ( ( DeleteSegmentOperation ) operation ) ; } } if ( operation instanceof MetadataCheckpointOperation ) { processMetadataOperation ( ( MetadataCheckpointOperation ) operation ) ; } else if ( operation instanceof StorageMetadataCheckpointOperation ) { processMetadataOperation ( ( StorageMetadataCheckpointOperation ) operation ) ; } else if ( operation instanceof StreamSegmentMapOperation ) { preProcessMetadataOperation ( ( StreamSegmentMapOperation ) operation ) ; } }
Pre - processes the given Operation . See OperationMetadataUpdater . preProcessOperation for more details on behavior .
23,271
void acceptOperation ( Operation operation ) throws MetadataUpdateException { checkNotSealed ( ) ; if ( operation instanceof SegmentOperation ) { val segmentMetadata = getSegmentUpdateTransaction ( ( ( SegmentOperation ) operation ) . getStreamSegmentId ( ) ) ; segmentMetadata . setLastUsed ( operation . getSequenceNumber ( ) ) ; if ( operation instanceof StreamSegmentAppendOperation ) { segmentMetadata . acceptOperation ( ( StreamSegmentAppendOperation ) operation ) ; } else if ( operation instanceof StreamSegmentSealOperation ) { segmentMetadata . acceptOperation ( ( StreamSegmentSealOperation ) operation ) ; } else if ( operation instanceof MergeSegmentOperation ) { MergeSegmentOperation mto = ( MergeSegmentOperation ) operation ; SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction ( mto . getSourceSegmentId ( ) ) ; sourceMetadata . acceptAsSourceSegment ( mto ) ; sourceMetadata . setLastUsed ( operation . getSequenceNumber ( ) ) ; segmentMetadata . acceptAsTargetSegment ( mto , sourceMetadata ) ; } else if ( operation instanceof UpdateAttributesOperation ) { segmentMetadata . acceptOperation ( ( UpdateAttributesOperation ) operation ) ; } else if ( operation instanceof StreamSegmentTruncateOperation ) { segmentMetadata . acceptOperation ( ( StreamSegmentTruncateOperation ) operation ) ; } else if ( operation instanceof DeleteSegmentOperation ) { segmentMetadata . acceptOperation ( ( DeleteSegmentOperation ) operation ) ; } } if ( operation instanceof MetadataCheckpointOperation ) { this . newTruncationPoints . add ( operation . getSequenceNumber ( ) ) ; } else if ( operation instanceof StreamSegmentMapOperation ) { acceptMetadataOperation ( ( StreamSegmentMapOperation ) operation ) ; } }
Accepts the given Operation . The Operation s effects are reflected in the pending transaction . This method has no effect on Metadata Operations . See OperationMetadataUpdater . acceptOperation for more details on behavior .
23,272
private SegmentMetadataUpdateTransaction getSegmentUpdateTransaction ( long segmentId ) throws MetadataUpdateException { SegmentMetadataUpdateTransaction tsm = tryGetSegmentUpdateTransaction ( segmentId ) ; if ( tsm == null ) { throw new MetadataUpdateException ( this . containerId , String . format ( "No metadata entry exists for Segment Id %d." , segmentId ) ) ; } return tsm ; }
Gets all pending changes for the given Segment .
23,273
private SegmentMetadataUpdateTransaction getOrCreateSegmentUpdateTransaction ( String segmentName , long segmentId ) { SegmentMetadataUpdateTransaction sm = tryGetSegmentUpdateTransaction ( segmentId ) ; if ( sm == null ) { SegmentMetadata baseSegmentMetadata = createSegmentMetadata ( segmentName , segmentId ) ; sm = new SegmentMetadataUpdateTransaction ( baseSegmentMetadata , this . recoveryMode ) ; this . segmentUpdates . put ( segmentId , sm ) ; } return sm ; }
Gets an UpdateableSegmentMetadata for the given Segment . If already registered it returns that instance otherwise it creates and records a new Segment metadata .
23,274
private SegmentMetadataUpdateTransaction tryGetSegmentUpdateTransaction ( long segmentId ) { SegmentMetadataUpdateTransaction sm = this . segmentUpdates . getOrDefault ( segmentId , null ) ; if ( sm == null ) { SegmentMetadata baseSegmentMetadata = this . baseMetadata . getStreamSegmentMetadata ( segmentId ) ; if ( baseSegmentMetadata == null ) { baseSegmentMetadata = this . newSegments . getOrDefault ( segmentId , null ) ; } if ( baseSegmentMetadata != null ) { sm = new SegmentMetadataUpdateTransaction ( baseSegmentMetadata , this . recoveryMode ) ; this . segmentUpdates . put ( segmentId , sm ) ; } } return sm ; }
Attempts to get a SegmentMetadataUpdateTransaction for an existing or new Segment .
23,275
private UpdateableSegmentMetadata createSegmentMetadata ( String segmentName , long segmentId ) { UpdateableSegmentMetadata metadata = new StreamSegmentMetadata ( segmentName , segmentId , this . containerId ) ; this . newSegments . put ( metadata . getId ( ) , metadata ) ; this . newSegmentNames . put ( metadata . getName ( ) , metadata . getId ( ) ) ; return metadata ; }
Creates a new UpdateableSegmentMetadata for the given Segment and registers it .
23,276
public void write ( int b ) throws IOException { if ( this . isClosed ) { throw new IOException ( "OutputStream is closed." ) ; } if ( this . position >= this . length ) { throw new IOException ( "Buffer capacity exceeded." ) ; } this . array [ this . offset + this . position ] = ( byte ) b ; this . position ++ ; }
region OutputStream and RandomAccessOutputStream Implementation
23,277
public static String retrieveMasterToken ( String tokenSigningKey ) { Map < String , Object > claims = new HashMap < > ( ) ; claims . put ( "*" , String . valueOf ( READ_UPDATE ) ) ; return Jwts . builder ( ) . setSubject ( "segmentstoreresource" ) . setAudience ( "segmentstore" ) . setClaims ( claims ) . signWith ( SignatureAlgorithm . HS512 , tokenSigningKey . getBytes ( ) ) . compact ( ) ; }
Retrieves a master token for internal controller to segmentstore communication .
23,278
public boolean shouldRequestContents ( ReadResultEntryType entryType , long streamSegmentOffset ) { return entryType == ReadResultEntryType . Cache || entryType == ReadResultEntryType . Storage || entryType == ReadResultEntryType . Future ; }
region AsyncReadResultHandler implementation
23,279
public void setContents ( ByteArraySegment contents ) { Preconditions . checkNotNull ( contents , "contents" ) ; Preconditions . checkState ( this . contents == null , "This operation has already had its contents set." ) ; this . contents = contents ; }
Sets the Contents of this MetadataCheckpointOperation .
23,280
int compare ( ByteArraySegment b1 , ByteArraySegment b2 ) { assert b1 . getLength ( ) == b2 . getLength ( ) ; return compare ( b1 . array ( ) , b1 . arrayOffset ( ) , b2 . array ( ) , b2 . arrayOffset ( ) , b1 . getLength ( ) ) ; }
Compares two non - null ByteArraySegments of the same length using lexicographic bitwise comparison .
23,281
int compare ( byte [ ] b1 , int offset1 , byte [ ] b2 , int offset2 , int length ) { int r ; for ( int i = 0 ; i < length ; i ++ ) { r = ( b1 [ offset1 + i ] & 0xFF ) - ( b2 [ offset2 + i ] & 0xFF ) ; if ( r != 0 ) { return r ; } } return 0 ; }
Compares two byte arrays from the given offsets using lexicographic bitwise comparison .
23,282
private void notifyStoppedOrFailed ( Throwable runException ) { final Throwable stopException = this . stopException . get ( ) ; if ( runException == null ) { runException = stopException ; } if ( runException instanceof CancellationException ) { runException = null ; } if ( runException == null ) { notifyStopped ( ) ; } else { if ( stopException != null && stopException != runException ) { stopException . addSuppressed ( runException ) ; runException = stopException ; } notifyFailed ( runException ) ; } log . info ( "{}: Stopped." , this . traceObjectId ) ; }
Notifies the AbstractService to enter the TERMINATED or FAILED state based on the current state of the Service and the given Exception .
23,283
private CompletableFuture < Boolean > abortTransaction ( OperationContext context , String scope , String stream , long requestId ) { return streamMetadataStore . getActiveTxns ( scope , stream , context , executor ) . thenCompose ( activeTxns -> { if ( activeTxns == null || activeTxns . isEmpty ( ) ) { return CompletableFuture . completedFuture ( true ) ; } else { return Futures . allOf ( activeTxns . entrySet ( ) . stream ( ) . map ( txIdPair -> { CompletableFuture < Void > voidCompletableFuture ; if ( txIdPair . getValue ( ) . getTxnStatus ( ) . equals ( TxnStatus . OPEN ) ) { voidCompletableFuture = Futures . toVoid ( streamTransactionMetadataTasks . abortTxn ( scope , stream , txIdPair . getKey ( ) , null , context ) . exceptionally ( e -> { Throwable cause = Exceptions . unwrap ( e ) ; if ( cause instanceof StoreException . IllegalStateException || cause instanceof StoreException . WriteConflictException || cause instanceof StoreException . DataNotFoundException ) { log . debug ( requestId , "A known exception thrown during seal stream " + "while trying to abort transaction on stream {}/{}" , scope , stream , cause ) ; } else { log . warn ( requestId , "Exception thrown during seal stream while trying " + "to abort transaction on stream {}/{}" , scope , stream , cause ) ; } return null ; } ) ) ; } else { voidCompletableFuture = CompletableFuture . completedFuture ( null ) ; } return voidCompletableFuture ; } ) . collect ( Collectors . toList ( ) ) ) . thenApply ( v -> false ) ; } } ) ; }
A method that issues abort request for all outstanding transactions on the stream which are processed asynchronously . This method returns false if it found transactions to abort true otherwise .
23,284
public void start ( ) throws Exception { Preconditions . checkState ( this . tmpDir . get ( ) != null , "Not Initialized." ) ; val s = new ZooKeeperServer ( this . tmpDir . get ( ) , this . tmpDir . get ( ) , ZooKeeperServer . DEFAULT_TICK_TIME ) ; if ( ! this . server . compareAndSet ( null , s ) ) { s . shutdown ( ) ; throw new IllegalStateException ( "Already started." ) ; } this . serverFactory . set ( NettyServerCnxnFactory . createFactory ( ) ) ; val address = LOOPBACK_ADDRESS + ":" + this . zkPort ; log . info ( "Starting Zookeeper server at " + address + " ..." ) ; this . serverFactory . get ( ) . configure ( new InetSocketAddress ( LOOPBACK_ADDRESS , this . zkPort ) , 1000 , secureZK ) ; this . serverFactory . get ( ) . startup ( s ) ; if ( ! waitForServerUp ( this . zkPort , this . secureZK , this . trustStore , this . keyStore , this . keyStorePassword , this . keyStorePassword ) ) { throw new IllegalStateException ( "ZooKeeper server failed to start" ) ; } }
Starts the ZooKeeper Service in process .
23,285
public static boolean waitForServerUp ( int zkPort , boolean secureZk , String trustStore , String keyStore , String keyStorePasswordPath , String trustStorePasswordPath ) { val address = LOOPBACK_ADDRESS + ":" + zkPort ; if ( secureZk ) { return waitForSSLServerUp ( address , LocalBookKeeper . CONNECTION_TIMEOUT , trustStore , keyStore , keyStorePasswordPath , trustStorePasswordPath ) ; } else { return LocalBookKeeper . waitForServerUp ( address , LocalBookKeeper . CONNECTION_TIMEOUT ) ; } }
Blocks the current thread and awaits ZooKeeper to start running locally on the given port .
23,286
public static void main ( String [ ] args ) throws Exception { int zkPort ; boolean secureZK = false ; String zkKeyStore ; String zkKeyStorePasswd = null ; String zkTrustStore = null ; try { zkPort = Integer . parseInt ( System . getProperty ( PROPERTY_ZK_PORT ) ) ; secureZK = Boolean . parseBoolean ( System . getProperty ( PROPERTY_SECURE_ZK , "false" ) ) ; zkKeyStore = System . getProperty ( PROPERTY_ZK_KEY_STORE ) ; zkKeyStorePasswd = System . getProperty ( PROPERTY_ZK_KEY_STORE_PASSWORD ) ; zkTrustStore = System . getProperty ( PROPERTY_ZK_TRUST_STORE ) ; } catch ( Exception ex ) { System . out . println ( String . format ( "Invalid or missing arguments (via system properties). Expected: %s(int). (%s)" , PROPERTY_ZK_PORT , ex . getMessage ( ) ) ) ; System . exit ( - 1 ) ; return ; } ZooKeeperServiceRunner runner = new ZooKeeperServiceRunner ( zkPort , secureZK , zkKeyStore , zkKeyStorePasswd , zkTrustStore ) ; runner . initialize ( ) ; runner . start ( ) ; Thread . sleep ( Long . MAX_VALUE ) ; }
Main method that can be used to start ZooKeeper out - of - process using BookKeeperServiceRunner . This is used when invoking this class via ProcessStarter .
23,287
static < T > StreamSegmentException convertException ( String segmentName , Throwable e ) { if ( e instanceof RemoteException ) { e = ( ( RemoteException ) e ) . unwrapRemoteException ( ) ; } if ( e instanceof PathNotFoundException || e instanceof FileNotFoundException ) { return new StreamSegmentNotExistsException ( segmentName , e ) ; } else if ( e instanceof FileAlreadyExistsException || e instanceof AlreadyBeingCreatedException ) { return new StreamSegmentExistsException ( segmentName , e ) ; } else if ( e instanceof AclException ) { return new StreamSegmentSealedException ( segmentName , e ) ; } else { throw Exceptions . sneakyThrow ( e ) ; } }
Translates HDFS specific Exceptions to Pravega - equivalent Exceptions .
23,288
private CompletableFuture < Iterator < Operation > > readData ( Void ignored ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "readData" ) ; try { Duration readTimeout = getReadTimeout ( ) ; return this . dataSource . read ( this . state . getLastReadSequenceNumber ( ) , this . config . getMaxItemsToReadAtOnce ( ) , readTimeout ) . thenApply ( result -> { LoggerHelpers . traceLeave ( log , this . traceObjectId , "readData" , traceId ) ; return result ; } ) . exceptionally ( ex -> { ex = Exceptions . unwrap ( ex ) ; if ( ex instanceof TimeoutException ) { log . debug ( "{}: Iteration[{}] No items were read during allotted timeout of {}ms" , this . traceObjectId , this . state . getIterationId ( ) , readTimeout . toMillis ( ) ) ; return null ; } else { throw new CompletionException ( ex ) ; } } ) ; } catch ( Throwable ex ) { Throwable realEx = Exceptions . unwrap ( ex ) ; if ( realEx instanceof TimeoutException ) { logErrorHandled ( realEx ) ; return CompletableFuture . completedFuture ( null ) ; } else { return Futures . failedFuture ( ex ) ; } } }
Reads data from the OperationLog .
23,289
private CompletableFuture < Void > processReadResult ( Iterator < Operation > readResult ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "processReadResult" ) ; InputReadStageResult result = new InputReadStageResult ( this . state ) ; if ( readResult == null ) { logStageEvent ( "InputRead" , result ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "processReadResult" , traceId ) ; return CompletableFuture . completedFuture ( null ) ; } return Futures . loop ( ( ) -> canRun ( ) && readResult . hasNext ( ) , ( ) -> { Operation op = readResult . next ( ) ; return processOperation ( op ) . thenRun ( ( ) -> { this . state . setLastReadSequenceNumber ( op . getSequenceNumber ( ) ) ; result . operationProcessed ( op ) ; } ) ; } , this . executor ) . thenRun ( ( ) -> { logStageEvent ( "InputRead" , result ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "processReadResult" , traceId ) ; } ) ; }
Processes all the operations in the given ReadResult .
23,290
private CompletableFuture < Void > flush ( Void ignored ) { checkRunning ( ) ; long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "flush" ) ; val flushFutures = this . processors . values ( ) . stream ( ) . filter ( ProcessorCollection :: mustFlush ) . map ( a -> a . flush ( this . config . getFlushTimeout ( ) ) ) . collect ( Collectors . toList ( ) ) ; return Futures . allOfWithResults ( flushFutures ) . thenAcceptAsync ( flushResults -> { FlushStageResult result = new FlushStageResult ( ) ; flushResults . forEach ( result :: withFlushResult ) ; if ( result . getFlushedBytes ( ) + result . getMergedBytes ( ) + result . count > 0 ) { logStageEvent ( "Flush" , result ) ; } LoggerHelpers . traceLeave ( log , this . traceObjectId , "flush" , traceId ) ; } , this . executor ) ; }
Flushes eligible operations to Storage if necessary . Does not perform any mergers .
23,291
private void cleanup ( ) { long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "cleanup" ) ; val toRemove = this . processors . values ( ) . stream ( ) . map ( this :: closeIfNecessary ) . filter ( ProcessorCollection :: isClosed ) . map ( ProcessorCollection :: getId ) . collect ( Collectors . toList ( ) ) ; toRemove . forEach ( this . processors :: remove ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "cleanup" , traceId , toRemove . size ( ) ) ; }
Cleans up all SegmentAggregators that are currently closed .
23,292
private CompletableFuture < Void > acknowledge ( Void ignored ) { checkRunning ( ) ; long traceId = LoggerHelpers . traceEnterWithContext ( log , this . traceObjectId , "acknowledge" ) ; long highestCommittedSeqNo = this . ackCalculator . getHighestCommittedSequenceNumber ( this . processors . values ( ) ) ; long ackSequenceNumber = this . dataSource . getClosestValidTruncationPoint ( highestCommittedSeqNo ) ; if ( ackSequenceNumber > this . state . getLastTruncatedSequenceNumber ( ) ) { return this . dataSource . acknowledge ( ackSequenceNumber , this . config . getAckTimeout ( ) ) . thenRun ( ( ) -> { this . state . setLastTruncatedSequenceNumber ( ackSequenceNumber ) ; logStageEvent ( "Acknowledged" , "SeqNo=" + ackSequenceNumber ) ; LoggerHelpers . traceLeave ( log , this . traceObjectId , "acknowledge" , traceId , ackSequenceNumber ) ; } ) ; } else { LoggerHelpers . traceLeave ( log , this . traceObjectId , "acknowledge" , traceId , Operation . NO_SEQUENCE_NUMBER ) ; return CompletableFuture . completedFuture ( null ) ; } }
Acknowledges operations that were flushed to storage
23,293
private CompletableFuture < ProcessorCollection > getProcessor ( long streamSegmentId ) { ProcessorCollection existingProcessor = this . processors . getOrDefault ( streamSegmentId , null ) ; if ( existingProcessor != null ) { if ( closeIfNecessary ( existingProcessor ) . isClosed ( ) ) { this . processors . remove ( streamSegmentId ) ; } else { return CompletableFuture . completedFuture ( existingProcessor ) ; } } UpdateableSegmentMetadata segmentMetadata = this . dataSource . getStreamSegmentMetadata ( streamSegmentId ) ; if ( segmentMetadata == null ) { return Futures . failedFuture ( new DataCorruptionException ( String . format ( "No StreamSegment with id '%d' is registered in the metadata." , streamSegmentId ) ) ) ; } SegmentAggregator newAggregator = new SegmentAggregator ( segmentMetadata , this . dataSource , this . storage , this . config , this . timer , this . executor ) ; ProcessorCollection pc = new ProcessorCollection ( newAggregator , this . createProcessors . apply ( segmentMetadata ) ) ; try { CompletableFuture < Void > init = newAggregator . initialize ( this . config . getFlushTimeout ( ) ) ; Futures . exceptionListener ( init , ex -> newAggregator . close ( ) ) ; return init . thenApply ( ignored -> { this . processors . put ( streamSegmentId , pc ) ; return pc ; } ) ; } catch ( Exception ex ) { pc . close ( ) ; throw ex ; } }
Gets or creates a SegmentAggregator for the given StorageOperation .
23,294
public static void traceLeave ( Logger log , String method , long traceEnterId , Object ... args ) { if ( ! log . isTraceEnabled ( ) ) { return ; } if ( args . length == 0 ) { log . trace ( "LEAVE {}@{} (elapsed={}us)." , method , traceEnterId , ELAPSED_MICRO . apply ( traceEnterId ) ) ; } else { log . trace ( "LEAVE {}@{} {} (elapsed={}us)." , method , traceEnterId , args , ELAPSED_MICRO . apply ( traceEnterId ) ) ; } }
Traces the fact that a method has exited normally .
23,295
public static String validateUserStreamName ( String name ) { Preconditions . checkNotNull ( name ) ; Preconditions . checkArgument ( name . matches ( "[\\p{Alnum}\\.\\-]+" ) , "Name must be a-z, 0-9, ., -." ) ; return name ; }
Validates a user created stream name .
23,296
public static String validateStreamName ( String name ) { Preconditions . checkNotNull ( name ) ; final String matcher = "[" + INTERNAL_NAME_PREFIX + "]?[\\p{Alnum}\\.\\-]+" ; Preconditions . checkArgument ( name . matches ( matcher ) , "Name must be " + matcher ) ; return name ; }
Validates an internal stream name .
23,297
public CompletableFuture < AttributeIndex > forSegment ( long streamSegmentId , Duration timeout ) { Exceptions . checkNotClosed ( this . closed . get ( ) , this ) ; SegmentMetadata sm = this . containerMetadata . getStreamSegmentMetadata ( streamSegmentId ) ; if ( sm . isDeleted ( ) ) { return Futures . failedFuture ( new StreamSegmentNotExistsException ( sm . getName ( ) ) ) ; } CompletableFuture < AttributeIndex > result ; AtomicReference < SegmentAttributeBTreeIndex > toInitialize = new AtomicReference < > ( ) ; synchronized ( this . attributeIndices ) { result = this . attributeIndices . computeIfAbsent ( streamSegmentId , id -> { toInitialize . set ( new SegmentAttributeBTreeIndex ( sm , this . storage , this . cache , this . config , this . executor ) ) ; return new CompletableFuture < > ( ) ; } ) ; } if ( toInitialize . get ( ) == null ) { return result ; } else { try { toInitialize . get ( ) . initialize ( timeout ) . thenRun ( ( ) -> this . cacheManager . register ( toInitialize . get ( ) ) ) . whenComplete ( ( r , ex ) -> { if ( ex == null ) { result . complete ( toInitialize . get ( ) ) ; } else { indexInitializationFailed ( streamSegmentId , result , ex ) ; } } ) ; } catch ( Throwable ex ) { if ( ! Exceptions . mustRethrow ( ex ) ) { indexInitializationFailed ( streamSegmentId , result , ex ) ; } throw ex ; } } return result ; }
region ContainerAttributeIndex Implementation
23,298
CompletableFuture < ResultT > find ( ArrayView soughtKey , long bucketOffset , TimeoutTimer timer ) { int maxReadLength = getMaxReadLength ( ) ; AtomicLong offset = new AtomicLong ( bucketOffset ) ; CompletableFuture < ResultT > result = new CompletableFuture < > ( ) ; Futures . loop ( ( ) -> ! result . isDone ( ) , ( ) -> { ReadResult readResult = this . segment . read ( offset . get ( ) , maxReadLength , timer . getRemaining ( ) ) ; val reader = getReader ( soughtKey , offset . get ( ) , timer ) ; AsyncReadResultProcessor . process ( readResult , reader , this . executor ) ; return reader . getResult ( ) . thenComposeAsync ( r -> { SearchContinuation sc = processResult ( r , soughtKey ) ; if ( sc == SearchContinuation . ResultFound || sc == SearchContinuation . NoResult ) { result . complete ( r ) ; } else { return this . getBackpointer . apply ( this . segment , offset . get ( ) , timer . getRemaining ( ) ) . thenAccept ( newOffset -> { offset . set ( newOffset ) ; if ( newOffset < 0 ) { result . complete ( null ) ; } } ) ; } return CompletableFuture . completedFuture ( null ) ; } , this . executor ) ; } , this . executor ) . exceptionally ( ex -> { result . completeExceptionally ( ex ) ; return null ; } ) ; return result ; }
Attempts to locate something in a TableBucket that matches a particular key .
23,299
public void write ( int byteValue , int streamPosition ) { Preconditions . checkElementIndex ( streamPosition , this . buf . length , "streamPosition" ) ; this . buf [ streamPosition ] = ( byte ) byteValue ; }
region RandomAccessOutputStream Implementation