idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
37,000
private long getTotalBytes ( String bucketName , AmazonCloudWatch client ) { long totalBytes = 0 ; totalBytes += getMetricData ( bucketName , "BucketSizeBytes" , "StandardStorage" , client ) ; totalBytes += getMetricData ( bucketName , "BucketSizeBytes" , "StandardIAStorage" , client ) ; totalBytes += getMetricData ( bucketName , "BucketSizeBytes" , "ReducedRedundancyStorage" , client ) ; return totalBytes ; }
Gets the total byte count for all storage types and combines them to produce a single stored bytes value
37,001
private GetMetricStatisticsRequest buildRequest ( String bucketName , String metricName , String storageType ) { GetMetricStatisticsRequest request = new GetMetricStatisticsRequest ( ) ; request . setMetricName ( metricName ) ; request . setNamespace ( "AWS/S3" ) ; request . setPeriod ( 360 ) ; request . setStatistics ( Collections . singletonList ( "Maximum" ) ) ; Calendar cal = Calendar . getInstance ( TimeZone . getTimeZone ( "UTC" ) ) ; Date currentTime = cal . getTime ( ) ; cal . add ( Calendar . DATE , - 1 ) ; request . setStartTime ( cal . getTime ( ) ) ; request . setEndTime ( currentTime ) ; List < Dimension > dimensions = new ArrayList < > ( ) ; dimensions . add ( new Dimension ( ) . withName ( "BucketName" ) . withValue ( bucketName ) ) ; dimensions . add ( new Dimension ( ) . withName ( "StorageType" ) . withValue ( storageType ) ) ; request . setDimensions ( dimensions ) ; return request ; }
Create the request to ask for bucket metrics
37,002
private void writeCompletionFile ( ) { File completionFile = getCompletionFile ( ) ; try { if ( completionFile . createNewFile ( ) ) { log . info ( "successfully created completion marker file: {}" , completionFile . getAbsolutePath ( ) ) ; } else { log . warn ( "completion marker file unexpectably exists already " + "- something may be amiss: {}" , completionFile . getAbsolutePath ( ) ) ; } } catch ( IOException e ) { log . error ( "Unable to create the completion file {}: {}" , completionFile . getAbsolutePath ( ) , e . getMessage ( ) ) ; } }
Writes zero length file to the work directory to mark the completion of a run .
37,003
private Queue < T > loadMorselQueue ( ) { Queue < T > morselQueue = createQueue ( ) ; Set < T > morsels = new LinkedHashSet < > ( this . stateManager . getMorsels ( ) ) ; morselQueue . addAll ( morsels ) ; if ( morselQueue . isEmpty ( ) ) { loadMorselQueueFromSource ( morselQueue ) ; } return morselQueue ; }
Loads the morsels from the persistent state if there are any ; otherwise it loads all other morsels based on on duplication policy manager .
37,004
public boolean isExcluded ( String path ) { if ( inclusions != null && ! inclusions . isEmpty ( ) ) { if ( ! matchesList ( path , inclusions , false ) ) { log . debug ( "{} does not match an inclusion: skipping..." , path ) ; return true ; } } if ( exclusions == null ) { return false ; } if ( matchesList ( path , exclusions , true ) ) { log . debug ( "{} matches exclusions: skipping..." , path ) ; return true ; } return false ; }
Returns true if the path is included and not excluded . A path is included if the inclusions list is null or empty or the path matches an item in the list .
37,005
public static Iterator < String > difference ( Iterator < String > iterA , Iterator < String > iterB ) throws IOException { String cacheName = "compare-" + System . currentTimeMillis ( ) ; cacheManager . addCache ( cacheName ) ; Cache cache = cacheManager . getCache ( cacheName ) ; while ( iterB . hasNext ( ) ) { String item = iterB . next ( ) ; cache . put ( new Element ( item , null ) ) ; } int diffCnt = 0 ; File diffFile = new File ( System . getProperty ( "java.io.tmpdir" ) + File . separator + "diff-" + System . currentTimeMillis ( ) + ".txt" ) ; FileWriter fileWriter = new FileWriter ( diffFile ) ; while ( iterA . hasNext ( ) ) { String item = iterA . next ( ) ; if ( ! cache . isKeyInCache ( item ) ) { fileWriter . write ( item + "\n" ) ; diffCnt ++ ; if ( diffCnt % 100 == 0 ) { fileWriter . flush ( ) ; } } } fileWriter . close ( ) ; cache . removeAll ( ) ; cacheManager . removeCache ( cache . getName ( ) ) ; if ( diffCnt > 0 ) { return new FileLineIterator ( diffFile ) ; } else { diffFile . delete ( ) ; return new ArrayList < String > ( 0 ) . iterator ( ) ; } }
Returns an Iterator contain the difference of elements contained in the provided Iterators . The returned Iterator contains all elements that are contained by iterA and not contained by iterB . iterB may also contain elements not present in iterA ; these are simply ignored .
37,006
public static List < String > retriveJavaScriptResources ( Chart < ? > chart ) { List < String > resources = new ArrayList < String > ( ) ; Class < ? > clazz = chart . getClass ( ) ; if ( clazz . isAnnotationPresent ( JqPlotPlugin . class ) ) { JqPlotResources [ ] jqPlotResourceses = clazz . getAnnotation ( JqPlotPlugin . class ) . values ( ) ; for ( JqPlotResources jqPlotResources : jqPlotResourceses ) { resources . add ( jqPlotResources . getResource ( ) ) ; } } if ( chart . getChartConfiguration ( ) . getHighlighter ( ) != null ) { resources . add ( JqPlotResources . Highlighter . getResource ( ) ) ; } return resources ; }
Retorna os recursos de javascript
37,007
public AbstractChart < T , S > addSerie ( Serie serie ) { Collection < Serie > series = getSeries ( ) ; if ( series == null ) { series = new ArrayList < Serie > ( ) ; } series . add ( serie ) ; return this ; }
Adiciona uma serie
37,008
public AbstractChart < T , S > addSeries ( Serie ... series ) { Collection < Serie > chartSeries = getSeries ( ) ; if ( chartSeries == null ) { chartSeries = new ArrayList < Serie > ( ) ; } for ( int i = series . length - 1 ; i >= 0 ; i -- ) { chartSeries . add ( series [ i ] ) ; } return this ; }
Add a collection of series
37,009
public static void loadFromSystemProperty ( String propertyFileSystemProperty , String defaultPropertyFilePath ) { String path = System . getProperty ( propertyFileSystemProperty ) ; if ( path == null ) { path = defaultPropertyFilePath ; System . setProperty ( propertyFileSystemProperty , path ) ; log . info ( "Using default " + propertyFileSystemProperty + " value: " + path + ". To override default specify java commandline param -D" + propertyFileSystemProperty + "=/your/prop/file/path/here" ) ; } else { log . info ( "Using user-defined " + propertyFileSystemProperty + " property: " + path ) ; } if ( ! new File ( path ) . exists ( ) ) { log . error ( path + " does not exist. It is required to run this application. Exiting..." ) ; System . exit ( 1 ) ; } else { try { SystemPropertyLoader . load ( path ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } }
Ensures that a specified system property is set with a valid file path . If the system property is not set the specified default value will set instead . If the resulting system property value does not resolve to an existing file The system will exit after logging the error .
37,010
public StorageProvider createWithAudit ( StorageProviderCredentials credentials , String accountSubdomain , TaskQueue auditQueue ) { UserUtil userUtil = new SimpleUserUtil ( ) ; StorageProvider storageprovider = create ( credentials ) ; StorageProvider auditProvider = new AuditStorageProvider ( storageprovider , accountSubdomain , credentials . getProviderId ( ) , credentials . getProviderType ( ) . getName ( ) , userUtil , auditQueue ) ; return auditProvider ; }
Creates a StorageProvider which captures events and passes them to the audit queue .
37,011
public StorageProvider create ( StorageProviderCredentials credentials ) { StorageProviderType storageProviderType = credentials . getProviderType ( ) ; if ( storageProviderType . equals ( StorageProviderType . AMAZON_S3 ) ) { return new S3StorageProvider ( credentials . getAccessKey ( ) , credentials . getSecretKey ( ) , credentials . getOptions ( ) ) ; } else if ( storageProviderType . equals ( StorageProviderType . AMAZON_GLACIER ) ) { return new GlacierStorageProvider ( credentials . getAccessKey ( ) , credentials . getSecretKey ( ) , credentials . getOptions ( ) ) ; } else if ( storageProviderType . equals ( StorageProviderType . CHRONOPOLIS ) ) { return new ChronopolisStorageProvider ( credentials . getAccessKey ( ) , credentials . getSecretKey ( ) ) ; } throw new RuntimeException ( storageProviderType + " is not a supported storage provider type" ) ; }
Creates a StorageProvider
37,012
private Watcher wrapWatcher ( final Watcher watcher ) { if ( watcher == null ) { return null ; } return new Watcher ( ) { public void process ( final WatchedEvent event ) { if ( eventExecutor . isShutdown ( ) ) { LOG . debug ( "Already shutdown. Discarding event: {}" , event ) ; return ; } eventExecutor . execute ( new Runnable ( ) { public void run ( ) { try { watcher . process ( event ) ; } catch ( Throwable t ) { LOG . error ( "Watcher throws exception." , t ) ; } } } ) ; } } ; }
Wraps the given watcher to be called from the event executor .
37,013
private Multimap < String , byte [ ] > copyAuthInfo ( Multimap < String , byte [ ] > authInfos ) { Multimap < String , byte [ ] > result = ArrayListMultimap . create ( ) ; for ( Map . Entry < String , byte [ ] > entry : authInfos . entries ( ) ) { byte [ ] info = entry . getValue ( ) ; result . put ( entry . getKey ( ) , info == null ? null : Arrays . copyOf ( info , info . length ) ) ; } return result ; }
Creates a deep copy of the given authInfos multimap .
37,014
private ZooKeeper createZooKeeper ( ) throws IOException { ZooKeeper zk = new ZooKeeper ( zkStr , sessionTimeout , wrapWatcher ( this ) ) ; for ( Map . Entry < String , byte [ ] > authInfo : authInfos . entries ( ) ) { zk . addAuthInfo ( authInfo . getKey ( ) , authInfo . getValue ( ) ) ; } return zk ; }
Creates a new ZooKeeper connection .
37,015
public void recoverFileLease ( final FileSystem fs , final Path p , Configuration conf ) throws IOException { if ( ! ( fs instanceof DistributedFileSystem ) ) { return ; } recoverDFSFileLease ( ( DistributedFileSystem ) fs , p , conf ) ; }
Recover the lease from HDFS retrying multiple times .
37,016
boolean recoverLease ( final DistributedFileSystem dfs , final int nbAttempt , final Path p , final long startWaiting ) throws FileNotFoundException { boolean recovered = false ; try { recovered = dfs . recoverLease ( p ) ; LOG . info ( "recoverLease=" + recovered + ", " + getLogMessageDetail ( nbAttempt , p , startWaiting ) ) ; } catch ( IOException e ) { if ( e instanceof LeaseExpiredException && e . getMessage ( ) . contains ( "File does not exist" ) ) { throw new FileNotFoundException ( "The given file wasn't found at " + p ) ; } else if ( e instanceof FileNotFoundException ) { throw ( FileNotFoundException ) e ; } LOG . warn ( getLogMessageDetail ( nbAttempt , p , startWaiting ) , e ) ; } return recovered ; }
Try to recover the lease .
37,017
private boolean isFileClosed ( final DistributedFileSystem dfs , final Method m , final Path p ) { try { return ( Boolean ) m . invoke ( dfs , p ) ; } catch ( SecurityException e ) { LOG . warn ( "No access" , e ) ; } catch ( Exception e ) { LOG . warn ( "Failed invocation for " + p . toString ( ) , e ) ; } return false ; }
Call HDFS - 4525 isFileClosed if it is available .
37,018
public static void main ( String [ ] args ) throws Exception { if ( args . length > 1 || ( args . length == 1 && ! "-v" . equals ( args [ 0 ] ) ) ) { System . out . println ( "USAGE: TransactionServiceClient [-v]" ) ; } boolean verbose = false ; if ( args . length == 1 && "-v" . equals ( args [ 0 ] ) ) { verbose = true ; } doMain ( verbose , new ConfigurationFactory ( ) . get ( ) ) ; }
Utility to be used for basic verification of transaction system availability and functioning
37,019
private < T > T execute ( Operation < T > operation , ThriftClientProvider provider ) throws Exception { RetryStrategy retryStrategy = retryStrategyProvider . newRetryStrategy ( ) ; while ( true ) { if ( provider == null ) { provider = this . clientProvider ; } try ( CloseableThriftClient closeable = provider . getCloseableClient ( ) ) { return operation . execute ( closeable . getThriftClient ( ) ) ; } catch ( TException te ) { boolean retry = retryStrategy . failOnce ( ) ; if ( ! retry ) { String message = "Thrift error for " + operation + ": " + te . getMessage ( ) ; LOG . error ( message ) ; LOG . debug ( message , te ) ; throw new Exception ( message , te ) ; } else { retryStrategy . beforeRetry ( ) ; String msg = "Retrying " + operation . getName ( ) + " after Thrift error: " + te . getMessage ( ) ; LOG . info ( msg ) ; LOG . debug ( msg , te ) ; } } } }
This is a generic method implementing the somewhat complex execution and retry logic for operations to avoid repetitive code .
37,020
protected ReturnCode determineReturnCode ( ReturnCode txFilterCode , ReturnCode subFilterCode ) { switch ( subFilterCode ) { case INCLUDE : return txFilterCode ; case INCLUDE_AND_NEXT_COL : return ReturnCode . INCLUDE_AND_NEXT_COL ; case SKIP : return txFilterCode == ReturnCode . INCLUDE ? ReturnCode . SKIP : ReturnCode . NEXT_COL ; default : return subFilterCode ; } }
Determines the return code of TransactionVisibilityFilter based on sub - filter s return code . Sub - filter can only exclude cells included by TransactionVisibilityFilter i . e . sub - filter s INCLUDE will be ignored . This behavior makes sure that sub - filter only sees cell versions valid for the given transaction . If sub - filter needs to see older versions of cell then this method can be overridden .
37,021
private Scan projectFamilyDeletes ( Scan scan ) { for ( Map . Entry < byte [ ] , NavigableSet < byte [ ] > > entry : scan . getFamilyMap ( ) . entrySet ( ) ) { NavigableSet < byte [ ] > columns = entry . getValue ( ) ; if ( columns != null && ! columns . isEmpty ( ) ) { scan . addColumn ( entry . getKey ( ) , TxConstants . FAMILY_DELETE_QUALIFIER ) ; } } return scan ; }
Ensures that family delete markers are present in the columns requested for any scan operation .
37,022
private Get projectFamilyDeletes ( Get get ) { for ( Map . Entry < byte [ ] , NavigableSet < byte [ ] > > entry : get . getFamilyMap ( ) . entrySet ( ) ) { NavigableSet < byte [ ] > columns = entry . getValue ( ) ; if ( columns != null && ! columns . isEmpty ( ) ) { get . addColumn ( entry . getKey ( ) , TxConstants . FAMILY_DELETE_QUALIFIER ) ; } } return get ; }
Ensures that family delete markers are present in the columns requested for any get operation .
37,023
protected Filter getTransactionFilter ( Transaction tx , ScanType type , Filter filter ) { return TransactionFilters . getVisibilityFilter ( tx , ttlByFamily , allowEmptyValues , type , filter ) ; }
Derived classes can override this method to customize the filter used to return data visible for the current transaction .
37,024
public boolean addTransactionAware ( TransactionAware txAware ) { boolean added = txAwares . add ( txAware ) ; if ( added && currentTx != null ) { txAware . startTx ( currentTx ) ; } return added ; }
Adds a new transaction - aware to participate in the transaction .
37,025
public void abort ( TransactionFailureException cause ) throws TransactionFailureException { if ( currentTx == null ) { return ; } try { boolean success = true ; for ( TransactionAware txAware : txAwares ) { try { if ( ! txAware . rollbackTx ( ) ) { success = false ; } } catch ( Throwable e ) { String message = String . format ( "Unable to roll back changes in transaction-aware '%s' for transaction %d. " , txAware . getTransactionAwareName ( ) , currentTx . getTransactionId ( ) ) ; LOG . warn ( message , e ) ; if ( cause == null ) { cause = new TransactionFailureException ( message , e ) ; } success = false ; } } if ( success ) { txClient . abort ( currentTx ) ; } else { txClient . invalidate ( currentTx . getTransactionId ( ) ) ; } if ( cause != null ) { throw cause ; } } finally { currentTx = null ; } }
Aborts the given transaction and rolls back all data set changes . If rollback fails the transaction is invalidated . If an exception is caught during rollback the exception is rethrown wrapped into a TransactionFailureException after all remaining TransactionAwares have completed rollback . If an existing exception is passed in that exception is thrown in either case whether the rollback is successful or not . In other words this method always throws the first exception that it encounters .
37,026
private void initDiscovery ( ) { if ( discoveryServiceClient == null ) { LOG . info ( "No DiscoveryServiceClient provided. Skipping service discovery." ) ; return ; } endpointStrategy = new TimeLimitEndpointStrategy ( new RandomEndpointStrategy ( discoveryServiceClient . discover ( configuration . get ( TxConstants . Service . CFG_DATA_TX_DISCOVERY_SERVICE_NAME , TxConstants . Service . DEFAULT_DATA_TX_DISCOVERY_SERVICE_NAME ) ) ) , 2 , TimeUnit . SECONDS ) ; }
Initialize the service discovery client we will reuse that every time we need to create a new client .
37,027
public static FenceWait prepareWait ( byte [ ] fenceId , TransactionSystemClient txClient ) throws TransactionFailureException , InterruptedException , TimeoutException { return new DefaultFenceWait ( new TransactionContext ( txClient , new WriteFence ( fenceId ) ) ) ; }
Used by a writer to wait on a fence so that changes are visible to all readers with in - progress transactions .
37,028
static void encode ( TransactionEdit src , DataOutput out , TransactionEditCodec codec ) throws IOException { out . writeByte ( codec . getVersion ( ) ) ; codec . encode ( src , out ) ; }
Encodes the given transaction edit using a specific codec . Note that this is only exposed for use by tests .
37,029
private void initialize ( Configuration configuration ) { String [ ] codecClassNames = configuration . getTrimmedStrings ( TxConstants . Persist . CFG_TX_SNAPHOT_CODEC_CLASSES ) ; List < Class > codecClasses = Lists . newArrayList ( ) ; if ( codecClassNames != null ) { for ( String clsName : codecClassNames ) { try { codecClasses . add ( Class . forName ( clsName ) ) ; } catch ( ClassNotFoundException cnfe ) { LOG . warn ( "Unable to load class configured for " + TxConstants . Persist . CFG_TX_SNAPHOT_CODEC_CLASSES + ": " + clsName , cnfe ) ; } } } if ( codecClasses . size ( ) == 0 ) { codecClasses . addAll ( Arrays . asList ( TxConstants . Persist . DEFAULT_TX_SNAPHOT_CODEC_CLASSES ) ) ; } for ( Class < ? > codecClass : codecClasses ) { try { SnapshotCodec codec = ( SnapshotCodec ) ( codecClass . newInstance ( ) ) ; codecs . put ( codec . getVersion ( ) , codec ) ; LOG . debug ( "Using snapshot codec {} for snapshots of version {}" , codecClass . getName ( ) , codec . getVersion ( ) ) ; } catch ( Exception e ) { LOG . warn ( "Error instantiating snapshot codec {}. Skipping." , codecClass . getName ( ) , e ) ; } } }
Register all codec specified in the configuration with this provider . There can only be one codec for a given version .
37,030
private SnapshotCodec getCurrentCodec ( ) { if ( codecs . isEmpty ( ) ) { throw new IllegalStateException ( String . format ( "No codecs are registered." ) ) ; } return codecs . get ( codecs . lastKey ( ) ) ; }
Retrieve the current snapshot codec that is the codec with the highest known version .
37,031
private SnapshotCodec getCodec ( InputStream in ) { BinaryDecoder decoder = new BinaryDecoder ( in ) ; int persistedVersion ; try { persistedVersion = decoder . readInt ( ) ; } catch ( IOException e ) { LOG . error ( "Unable to read transaction state version: " , e ) ; throw Throwables . propagate ( e ) ; } return getCodecForVersion ( persistedVersion ) ; }
Return the appropriate codec for the version in InputStream
37,032
private List < Entry > getPendingWrites ( ) { synchronized ( this ) { List < Entry > save = this . pendingWrites ; this . pendingWrites = new LinkedList < > ( ) ; return save ; } }
will accumulate in a new list .
37,033
public static long getMaxVisibleTimestamp ( Transaction tx ) { return tx . getWritePointer ( ) < Long . MAX_VALUE ? tx . getWritePointer ( ) + 1 : tx . getWritePointer ( ) ; }
Returns the maximum timestamp to use for time - range operations based on the given transaction .
37,034
private static long getMaxTTL ( Map < byte [ ] , Long > ttlByFamily ) { long maxTTL = 0 ; for ( Long familyTTL : ttlByFamily . values ( ) ) { maxTTL = Math . max ( familyTTL <= 0 ? Long . MAX_VALUE : familyTTL , maxTTL ) ; } return maxTTL == 0 ? Long . MAX_VALUE : maxTTL ; }
Returns the max TTL for the given TTL values . Returns Long . MAX_VALUE if any of the column families has no TTL set .
37,035
private void startMetricsThread ( ) { LOG . info ( "Starting periodic Metrics Emitter thread, frequency = " + METRICS_POLL_INTERVAL ) ; this . metricsThread = new DaemonThreadExecutor ( "tx-metrics" ) { public void doRun ( ) { txMetricsCollector . gauge ( "committing.size" , committingChangeSets . size ( ) ) ; txMetricsCollector . gauge ( "committed.size" , committedChangeSets . size ( ) ) ; txMetricsCollector . gauge ( "inprogress.size" , inProgress . size ( ) ) ; txMetricsCollector . gauge ( "invalid.size" , invalidArray . length ) ; } protected void onShutdown ( ) { txMetricsCollector . gauge ( "committing.size" , committingChangeSets . size ( ) ) ; txMetricsCollector . gauge ( "committed.size" , committedChangeSets . size ( ) ) ; txMetricsCollector . gauge ( "inprogress.size" , inProgress . size ( ) ) ; txMetricsCollector . gauge ( "invalid.size" , invalidArray . length ) ; } public long getSleepMillis ( ) { return METRICS_POLL_INTERVAL ; } } ; metricsThread . start ( ) ; }
Emits Transaction Data structures size as metrics
37,036
public boolean takeSnapshot ( OutputStream out ) throws IOException { TransactionSnapshot snapshot = getSnapshot ( ) ; if ( snapshot != null ) { persistor . writeSnapshot ( out , snapshot ) ; return true ; } else { return false ; } }
Take a snapshot of the transaction state and serialize it into the given output stream .
37,037
private void restoreSnapshot ( TransactionSnapshot snapshot ) { LOG . info ( "Restoring transaction state from snapshot at " + snapshot . getTimestamp ( ) ) ; Preconditions . checkState ( lastSnapshotTime == 0 , "lastSnapshotTime has been set!" ) ; Preconditions . checkState ( readPointer == 0 , "readPointer has been set!" ) ; Preconditions . checkState ( lastWritePointer == 0 , "lastWritePointer has been set!" ) ; Preconditions . checkState ( invalid . isEmpty ( ) , "invalid list should be empty!" ) ; Preconditions . checkState ( inProgress . isEmpty ( ) , "inProgress map should be empty!" ) ; Preconditions . checkState ( committingChangeSets . isEmpty ( ) , "committingChangeSets should be empty!" ) ; Preconditions . checkState ( committedChangeSets . isEmpty ( ) , "committedChangeSets should be empty!" ) ; LOG . info ( "Restoring snapshot of state: " + snapshot ) ; lastSnapshotTime = snapshot . getTimestamp ( ) ; readPointer = snapshot . getReadPointer ( ) ; lastWritePointer = snapshot . getWritePointer ( ) ; invalid . addAll ( snapshot . getInvalid ( ) ) ; inProgress . putAll ( txnBackwardsCompatCheck ( defaultLongTimeout , longTimeoutTolerance , snapshot . getInProgress ( ) ) ) ; committingChangeSets . putAll ( snapshot . getCommittingChangeSets ( ) ) ; committedChangeSets . putAll ( snapshot . getCommittedChangeSets ( ) ) ; }
Restore the initial in - memory transaction state from a snapshot .
37,038
public static Map < Long , InProgressTx > txnBackwardsCompatCheck ( int defaultLongTimeout , long longTimeoutTolerance , Map < Long , InProgressTx > inProgress ) { for ( Map . Entry < Long , InProgressTx > entry : inProgress . entrySet ( ) ) { long writePointer = entry . getKey ( ) ; long expiration = entry . getValue ( ) . getExpiration ( ) ; if ( entry . getValue ( ) . getType ( ) == null && ( expiration < 0 || ( getTxExpirationFromWritePointer ( writePointer , defaultLongTimeout ) - expiration < longTimeoutTolerance ) ) ) { long newExpiration = getTxExpirationFromWritePointer ( writePointer , defaultLongTimeout ) ; InProgressTx compatTx = new InProgressTx ( entry . getValue ( ) . getVisibilityUpperBound ( ) , newExpiration , TransactionType . LONG , entry . getValue ( ) . getCheckpointWritePointers ( ) ) ; entry . setValue ( compatTx ) ; } else if ( entry . getValue ( ) . getType ( ) == null ) { InProgressTx compatTx = new InProgressTx ( entry . getValue ( ) . getVisibilityUpperBound ( ) , entry . getValue ( ) . getExpiration ( ) , TransactionType . SHORT , entry . getValue ( ) . getCheckpointWritePointers ( ) ) ; entry . setValue ( compatTx ) ; } } return inProgress ; }
Check if in - progress transactions need to be migrated to have expiration time and type if so do the migration . This is required for backwards compatibility when long running transactions were represented with expiration time - 1 . This can be removed when we stop supporting SnapshotCodec version 1 .
37,039
public void resetState ( ) { this . logWriteLock . lock ( ) ; try { doSnapshot ( false ) ; clear ( ) ; doSnapshot ( false ) ; } catch ( IOException e ) { LOG . error ( "Snapshot failed when resetting state!" , e ) ; e . printStackTrace ( ) ; } finally { this . logWriteLock . unlock ( ) ; } }
Resets the state of the transaction manager .
37,040
private void abortService ( String message , Throwable error ) { if ( isRunning ( ) ) { LOG . error ( "Aborting transaction manager due to: " + message , error ) ; notifyFailed ( error ) ; } }
Immediately shuts down the service without going through the normal close process .
37,041
public Transaction startShort ( int timeoutInSeconds ) { Preconditions . checkArgument ( timeoutInSeconds > 0 , "timeout must be positive but is %s" , timeoutInSeconds ) ; txMetricsCollector . rate ( "start.short" ) ; Stopwatch timer = new Stopwatch ( ) . start ( ) ; long expiration = getTxExpiration ( timeoutInSeconds ) ; Transaction tx = startTx ( expiration , TransactionType . SHORT ) ; txMetricsCollector . histogram ( "start.short.latency" , ( int ) timer . elapsedMillis ( ) ) ; return tx ; }
Start a short transaction with a given timeout .
37,042
public Transaction startLong ( ) { txMetricsCollector . rate ( "start.long" ) ; Stopwatch timer = new Stopwatch ( ) . start ( ) ; long expiration = getTxExpiration ( defaultLongTimeout ) ; Transaction tx = startTx ( expiration , TransactionType . LONG ) ; txMetricsCollector . histogram ( "start.long.latency" , ( int ) timer . elapsedMillis ( ) ) ; return tx ; }
Start a long transaction . Long transactions and do not participate in conflict detection . Also aborting a long transaction moves it to the invalid list because we assume that its writes cannot be rolled back .
37,043
public boolean truncateInvalidTx ( Set < Long > invalidTxIds ) { txMetricsCollector . rate ( "truncateInvalidTx" ) ; Stopwatch timer = new Stopwatch ( ) . start ( ) ; this . logReadLock . lock ( ) ; try { boolean success ; synchronized ( this ) { ensureAvailable ( ) ; success = doTruncateInvalidTx ( invalidTxIds ) ; } appendToLog ( TransactionEdit . createTruncateInvalidTx ( invalidTxIds ) ) ; txMetricsCollector . histogram ( "truncateInvalidTx.latency" , ( int ) timer . elapsedMillis ( ) ) ; return success ; } finally { this . logReadLock . unlock ( ) ; } }
Removes the given transaction ids from the invalid list .
37,044
public boolean truncateInvalidTxBefore ( long time ) throws InvalidTruncateTimeException { txMetricsCollector . rate ( "truncateInvalidTxBefore" ) ; Stopwatch timer = new Stopwatch ( ) . start ( ) ; this . logReadLock . lock ( ) ; try { boolean success ; synchronized ( this ) { ensureAvailable ( ) ; success = doTruncateInvalidTxBefore ( time ) ; } appendToLog ( TransactionEdit . createTruncateInvalidTxBefore ( time ) ) ; txMetricsCollector . histogram ( "truncateInvalidTxBefore.latency" , ( int ) timer . elapsedMillis ( ) ) ; return success ; } finally { this . logReadLock . unlock ( ) ; } }
Removes all transaction ids started before the given time from invalid list .
37,045
private Transaction createTransaction ( long writePointer , TransactionType type ) { long firstShortTx = Transaction . NO_TX_IN_PROGRESS ; LongArrayList inProgressIds = new LongArrayList ( inProgress . size ( ) ) ; for ( Map . Entry < Long , InProgressTx > entry : inProgress . entrySet ( ) ) { long txId = entry . getKey ( ) ; inProgressIds . add ( txId ) ; LongArrayList childIds = entry . getValue ( ) . getCheckpointWritePointers ( ) ; if ( childIds != null ) { for ( int i = 0 ; i < childIds . size ( ) ; i ++ ) { inProgressIds . add ( childIds . get ( i ) ) ; } } if ( firstShortTx == Transaction . NO_TX_IN_PROGRESS && ! entry . getValue ( ) . isLongRunning ( ) ) { firstShortTx = txId ; } } return new Transaction ( readPointer , writePointer , invalidArray , inProgressIds . toLongArray ( ) , firstShortTx , type ) ; }
Creates a new Transaction . This method only get called from start transaction which is already synchronized .
37,046
public void logStatistics ( ) { LOG . info ( "Transaction Statistics: write pointer = " + lastWritePointer + ", invalid = " + invalid . size ( ) + ", in progress = " + inProgress . size ( ) + ", committing = " + committingChangeSets . size ( ) + ", committed = " + committedChangeSets . size ( ) ) ; }
Called from the tx service every 10 seconds . This hack is needed because current metrics system is not flexible when it comes to adding new metrics .
37,047
public void init ( ) throws IOException { Injector injector = Guice . createInjector ( new ConfigModule ( conf ) , new ZKModule ( ) , new DiscoveryModules ( ) . getDistributedModules ( ) , new TransactionModules ( ) . getDistributedModules ( ) , new TransactionClientModule ( ) ) ; zkClient = injector . getInstance ( ZKClientService . class ) ; zkClient . startAndWait ( ) ; txClient = injector . getInstance ( TransactionServiceClient . class ) ; createTableIfNotExists ( conf , TABLE , new byte [ ] [ ] { FAMILY } ) ; conn = HConnectionManager . createConnection ( conf ) ; }
Sets up common resources required by all clients .
37,048
public void run ( ) throws IOException , InterruptedException { List < Client > clients = new ArrayList < > ( totalClients ) ; for ( int i = 0 ; i < totalClients ; i ++ ) { Client c = new Client ( i , totalClients , iterations ) ; c . init ( txClient , conn . getTable ( TABLE ) ) ; c . start ( ) ; clients . add ( c ) ; } for ( Client c : clients ) { c . join ( ) ; Closeables . closeQuietly ( c ) ; } }
Runs all clients and waits for them to complete .
37,049
public void close ( ) { try { if ( conn != null ) { conn . close ( ) ; } } catch ( IOException ignored ) { } if ( zkClient != null ) { zkClient . stopAndWait ( ) ; } }
Frees up the underlying resources common to all clients .
37,050
public byte [ ] readBytes ( ) throws IOException { int toRead = readInt ( ) ; byte [ ] bytes = new byte [ toRead ] ; while ( toRead > 0 ) { int byteRead = input . read ( bytes , bytes . length - toRead , toRead ) ; if ( byteRead == - 1 ) { throw new EOFException ( ) ; } toRead -= byteRead ; } return bytes ; }
Read a byte sequence . First read an int to indicate how many bytes to read then that many bytes .
37,051
protected boolean startThreads ( ) { LOGGER . info ( "Starting {}" , TThreadedSelectorServerWithFix . class . getSimpleName ( ) ) ; try { for ( int i = 0 ; i < args . selectorThreads ; ++ i ) { selectorThreads . add ( new SelectorThread ( args . acceptQueueSizePerThread ) ) ; } acceptThread = new AcceptThread ( ( TNonblockingServerTransport ) serverTransport_ , createSelectorThreadLoadBalancer ( selectorThreads ) ) ; stopped_ = false ; for ( SelectorThread thread : selectorThreads ) { thread . start ( ) ; } acceptThread . start ( ) ; return true ; } catch ( IOException e ) { LOGGER . error ( "Failed to start threads!" , e ) ; return false ; } }
Start the accept and selector threads running to deal with clients .
37,052
public void stop ( ) { stopped_ = true ; stopListening ( ) ; if ( acceptThread != null ) { acceptThread . wakeupSelector ( ) ; } if ( selectorThreads != null ) { for ( SelectorThread thread : selectorThreads ) { if ( thread != null ) thread . wakeupSelector ( ) ; } } }
Stop serving and shut everything down .
37,053
protected boolean requestInvoke ( FrameBuffer frameBuffer ) { Runnable invocation = getRunnable ( frameBuffer ) ; if ( invoker != null ) { try { invoker . execute ( invocation ) ; return true ; } catch ( RejectedExecutionException rx ) { LOGGER . warn ( "ExecutorService rejected execution!" , rx ) ; return false ; } } else { invocation . run ( ) ; return true ; } }
We override the standard invoke method here to queue the invocation for invoker service instead of immediately invoking . If there is no thread pool handle the invocation inline on this thread
37,054
protected static ExecutorService createDefaultExecutor ( Args options ) { return ( options . workerThreads > 0 ) ? Executors . newFixedThreadPool ( options . workerThreads ) : null ; }
Helper to create the invoker if one is not specified
37,055
public BinaryEncoder writeInt ( int i ) throws IOException { int val = ( i << 1 ) ^ ( i >> 31 ) ; if ( ( val & ~ 0x7f ) != 0 ) { output . write ( 0x80 | val & 0x7f ) ; val >>>= 7 ; while ( val > 0x7f ) { output . write ( 0x80 | val & 0x7f ) ; val >>>= 7 ; } } output . write ( val ) ; return this ; }
write a single int value .
37,056
public BinaryEncoder writeLong ( long l ) throws IOException { long val = ( l << 1 ) ^ ( l >> 63 ) ; if ( ( val & ~ 0x7f ) != 0 ) { output . write ( ( int ) ( 0x80 | val & 0x7f ) ) ; val >>>= 7 ; while ( val > 0x7f ) { output . write ( ( int ) ( 0x80 | val & 0x7f ) ) ; val >>>= 7 ; } } output . write ( ( int ) val ) ; return this ; }
write a single long int value .
37,057
public BinaryEncoder writeBytes ( byte [ ] bytes ) throws IOException { writeLong ( bytes . length ) ; output . write ( bytes , 0 , bytes . length ) ; return this ; }
write a sequence of bytes . First writes the number of bytes as an int then the bytes themselves .
37,058
public int readMarker ( SequenceFile . Reader reader ) throws IOException { if ( valueBytes == null ) { valueBytes = reader . createValueBytes ( ) ; } rawKey . reset ( ) ; rawValue . reset ( ) ; int status = reader . nextRaw ( rawKey , valueBytes ) ; if ( status == - 1 ) { return - 1 ; } if ( isMarkerValid ( ) ) { valueBytes . writeUncompressedBytes ( rawValue ) ; rawValue . flush ( ) ; return Ints . fromByteArray ( rawValue . getData ( ) ) ; } throw new IOException ( String . format ( "Invalid key for num entries appended found %s, expected : %s" , new String ( rawKey . getData ( ) ) , TxConstants . TransactionLog . NUM_ENTRIES_APPENDED ) ) ; }
since we can recover without any consequence
37,059
private void tryInit ( ) { try { Configuration conf = getSnapshotConfiguration ( ) ; if ( conf != null ) { this . storage = new HDFSTransactionStateStorage ( conf , new SnapshotCodecProvider ( conf ) , new TxMetricsCollector ( ) ) ; this . storage . startAndWait ( ) ; this . snapshotRefreshFrequency = conf . getLong ( TxConstants . Manager . CFG_TX_SNAPSHOT_INTERVAL , TxConstants . Manager . DEFAULT_TX_SNAPSHOT_INTERVAL ) * 1000 ; this . initialized = true ; } else { LOG . info ( "Could not load configuration" ) ; } } catch ( Exception e ) { LOG . info ( "Failed to initialize TransactionStateCache due to: " + e . getMessage ( ) ) ; } }
Try to initialize the Configuration and TransactionStateStorage instances . Obtaining the Configuration may fail until ReactorServiceMain has been started .
37,060
public void doMain ( final String [ ] args ) throws Exception { final CountDownLatch shutdownLatch = new CountDownLatch ( 1 ) ; Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ) { public void run ( ) { try { try { TransactionServiceMain . this . stop ( ) ; } finally { try { TransactionServiceMain . this . destroy ( ) ; } finally { shutdownLatch . countDown ( ) ; } } } catch ( Throwable t ) { LOG . error ( "Exception when shutting down: " + t . getMessage ( ) , t ) ; } } } ) ; init ( args ) ; start ( ) ; shutdownLatch . await ( ) ; }
The main method . It simply call methods in the same sequence as if the program is started by jsvc .
37,061
public void start ( ) throws Exception { Injector injector = Guice . createInjector ( new ConfigModule ( conf ) , new ZKModule ( ) , new DiscoveryModules ( ) . getDistributedModules ( ) , new TransactionModules ( ) . getDistributedModules ( ) , new TransactionClientModule ( ) ) ; ZKClientService zkClientService = injector . getInstance ( ZKClientService . class ) ; zkClientService . startAndWait ( ) ; txService = injector . getInstance ( TransactionService . class ) ; try { LOG . info ( "Starting {}" , getClass ( ) . getSimpleName ( ) ) ; txService . startAndWait ( ) ; } catch ( Exception e ) { System . err . println ( "Failed to start service: " + e . getMessage ( ) ) ; } }
Invoked by jsvc to start the program .
37,062
public void stop ( ) { LOG . info ( "Stopping {}" , getClass ( ) . getSimpleName ( ) ) ; if ( txService == null ) { return ; } try { if ( txService . isRunning ( ) ) { txService . stopAndWait ( ) ; } } catch ( Throwable e ) { LOG . error ( "Failed to shutdown transaction service." , e ) ; System . err . println ( "Failed to shutdown transaction service: " + e . getMessage ( ) ) ; e . printStackTrace ( System . err ) ; } }
Invoked by jsvc to stop the program .
37,063
private T getOrCreate ( ) throws E { try { T client = elements . poll ( ) ; if ( client != null ) { return client ; } return create ( ) ; } catch ( Exception e ) { semaphore . release ( ) ; throw e ; } }
this method if it throws any exception
37,064
private static boolean leq ( int a1 , int a2 , int b1 , int b2 ) { return ( a1 < b1 || ( a1 == b1 && a2 <= b2 ) ) ; }
Lexicographic order for pairs .
37,065
private static boolean leq ( int a1 , int a2 , int a3 , int b1 , int b2 , int b3 ) { return ( a1 < b1 || ( a1 == b1 && leq ( a2 , a3 , b2 , b3 ) ) ) ; }
Lexicographic order for triples .
37,066
private void insSortUpdateRecurse_SaBucket ( int leftPtr , int rightPtr , int offset , int q ) { int rightTmpPtr = leftPtr + 1 ; while ( rightTmpPtr <= rightPtr ) { int tempValue = suffixArray [ rightTmpPtr ] ; int tempHashValue = sufPtrMap [ suffixArray [ rightTmpPtr ] + offset ] ; int leftTmpPtr = rightTmpPtr ; while ( leftTmpPtr > leftPtr && sufPtrMap [ suffixArray [ leftTmpPtr - 1 ] + offset ] > tempHashValue ) { suffixArray [ leftTmpPtr ] = suffixArray [ leftTmpPtr - 1 ] ; leftTmpPtr -- ; } suffixArray [ leftTmpPtr ] = tempValue ; rightTmpPtr ++ ; } updatePtrAndRefineBuckets_SaBucket ( leftPtr , rightPtr , offset , q ) ; }
Stably sorts a bucket at a refinement level regarding sort keys that are bucket pointers in sufPtrMap with offset .
37,067
private void computeBucketSize2_SaBucket ( int leftPtr , int rightPtr , int offset , int q ) { int suffix1 = suffixArray [ leftPtr ] + offset ; int suffix2 = suffixArray [ rightPtr ] + offset ; while ( sufPtrMap [ suffix1 ] == sufPtrMap [ suffix2 ] ) { suffix1 += q ; suffix2 += q ; } if ( sufPtrMap [ suffix1 ] > sufPtrMap [ suffix2 ] ) { int tmpSwap = suffixArray [ leftPtr ] ; suffixArray [ leftPtr ] = suffixArray [ rightPtr ] ; suffixArray [ rightPtr ] = tmpSwap ; } sufPtrMap [ suffixArray [ leftPtr ] ] = leftPtr ; sufPtrMap [ suffixArray [ rightPtr ] ] = rightPtr ; }
Completely sorts buckets of size 2 .
37,068
private int computeDiffDepthBucket_SaBucket ( int leftPtr , int rightPtr , int offset , int q ) { int lcp = offset ; while ( true ) { int runPtr = leftPtr ; int a = suffixArray [ rightPtr ] ; int tmpPtr = sufPtrMap [ a + lcp ] ; while ( runPtr < rightPtr ) { if ( sufPtrMap [ suffixArray [ runPtr ] + lcp ] != tmpPtr ) { return lcp ; } runPtr ++ ; } lcp += q ; } }
Computes about the LCP of all suffixes in this bucket . It will be the newoffset .
37,069
private int [ ] determineAll_Buckets_Sarray_Sptrmap ( int q ) { int [ ] buckets = determineAll_Buckets_Sarray ( q ) ; int strLen = length ; sufPtrMap = new int [ strLen + 2 * q + 1 ] ; int alphabetSize = alphabet . size ; int mappedUcharArray = 0 ; int tempPower = 1 ; int hashCode = 0 ; int i ; for ( i = q - 1 ; i >= 0 ; i -- ) { hashCode += seq [ start + mappedUcharArray + i ] * tempPower ; tempPower *= alphabetSize ; } int tempModulo = kbs_power_Ulong ( alphabetSize , q - 1 ) ; mappedUcharArray += q ; int j ; for ( j = 0 ; j < strLen - 1 ; j ++ ) { sufPtrMap [ j ] = ( buckets [ hashCode + 1 ] ) - 1 ; hashCode -= ( seq [ start + mappedUcharArray - q ] ) * tempModulo ; hashCode *= alphabetSize ; hashCode += seq [ start + mappedUcharArray ] ; mappedUcharArray ++ ; } sufPtrMap [ j ] = buckets [ hashCode ] ; int beginPtr = - 1 ; for ( j = strLen ; j <= strLen + 2 * q ; j ++ ) { sufPtrMap [ j ] = beginPtr -- ; } return buckets ; }
Constructs all buckets w . r . t . q - gram size q the up to prefix q sorted suffix array and the bucket - pointer table .
37,070
private int [ ] determinePower2Alpha_Buckets_Sarray_Sptrmap ( int q ) { int strLen = length ; int exp2 = kbs_getExp_Ulong ( 2 , alphabet . size ) ; if ( exp2 < 0 ) { throw new RuntimeException ( "value out of bounds" ) ; } int [ ] buckets = determinePower2Alpha_Buckets_Sarray ( q ) ; this . sufPtrMap = new int [ strLen + 2 * q + 1 ] ; int mappedUcharArray = 0 ; int hashCode = 0 ; int j ; for ( j = 0 ; j < q ; j ++ ) { hashCode = hashCode << exp2 ; hashCode += seq [ start + mappedUcharArray + j ] ; } int tempModulo = 0 ; tempModulo = ~ tempModulo ; tempModulo = tempModulo << ( exp2 * ( q - 1 ) ) ; tempModulo = ~ tempModulo ; mappedUcharArray += q ; for ( j = 0 ; j < strLen - 1 ; j ++ ) { sufPtrMap [ j ] = ( buckets [ hashCode + 1 ] ) - 1 ; hashCode = hashCode & tempModulo ; hashCode = hashCode << exp2 ; hashCode = hashCode | seq [ start + mappedUcharArray ] ; mappedUcharArray ++ ; } sufPtrMap [ j ] = buckets [ hashCode ] ; int beginPtr = - 1 ; for ( j = strLen ; j <= strLen + 2 * q ; j ++ ) { sufPtrMap [ j ] = beginPtr -- ; } return buckets ; }
Constructs all buckets w . r . t . q - gram size q the up to prefix length q sorted suffix array and the bucket - pointer table .
37,071
private void ssMergeForward ( int PA , int first , int middle , int last , int buf , int depth ) { int a , b , c , bufend ; int t , r ; bufend = buf + ( middle - first ) - 1 ; ssBlockSwap ( buf , first , middle - first ) ; for ( t = SA [ a = first ] , b = buf , c = middle ; ; ) { r = ssCompare ( PA + SA [ b ] , PA + SA [ c ] , depth ) ; if ( r < 0 ) { do { SA [ a ++ ] = SA [ b ] ; if ( bufend <= b ) { SA [ bufend ] = t ; return ; } SA [ b ++ ] = SA [ a ] ; } while ( SA [ b ] < 0 ) ; } else if ( r > 0 ) { do { SA [ a ++ ] = SA [ c ] ; SA [ c ++ ] = SA [ a ] ; if ( last <= c ) { while ( b < bufend ) { SA [ a ++ ] = SA [ b ] ; SA [ b ++ ] = SA [ a ] ; } SA [ a ] = SA [ b ] ; SA [ b ] = t ; return ; } } while ( SA [ c ] < 0 ) ; } else { SA [ c ] = ~ SA [ c ] ; do { SA [ a ++ ] = SA [ b ] ; if ( bufend <= b ) { SA [ bufend ] = t ; return ; } SA [ b ++ ] = SA [ a ] ; } while ( SA [ b ] < 0 ) ; do { SA [ a ++ ] = SA [ c ] ; SA [ c ++ ] = SA [ a ] ; if ( last <= c ) { while ( b < bufend ) { SA [ a ++ ] = SA [ b ] ; SA [ b ++ ] = SA [ a ] ; } SA [ a ] = SA [ b ] ; SA [ b ] = t ; return ; } } while ( SA [ c ] < 0 ) ; } } }
Merge - forward with internal buffer .
37,072
private void ssInsertionSort ( int PA , int first , int last , int depth ) { int i , j ; int t , r ; for ( i = last - 2 ; first <= i ; -- i ) { for ( t = SA [ i ] , j = i + 1 ; 0 < ( r = ssCompare ( PA + t , PA + SA [ j ] , depth ) ) ; ) { do { SA [ j - 1 ] = SA [ j ] ; } while ( ( ++ j < last ) && ( SA [ j ] < 0 ) ) ; if ( last <= j ) { break ; } } if ( r == 0 ) { SA [ j ] = ~ SA [ j ] ; } SA [ j - 1 ] = t ; } }
Insertionsort for small size groups
37,073
private int ssMedian5 ( int Td , int PA , int v1 , int v2 , int v3 , int v4 , int v5 ) { int t ; if ( T [ start + Td + SA [ PA + SA [ v2 ] ] ] > T [ start + Td + SA [ PA + SA [ v3 ] ] ] ) { t = v2 ; v2 = v3 ; v3 = t ; } if ( T [ start + Td + SA [ PA + SA [ v4 ] ] ] > T [ start + Td + SA [ PA + SA [ v5 ] ] ] ) { t = v4 ; v4 = v5 ; v5 = t ; } if ( T [ start + Td + SA [ PA + SA [ v2 ] ] ] > T [ start + Td + SA [ PA + SA [ v4 ] ] ] ) { t = v2 ; v2 = v4 ; v4 = t ; t = v3 ; v3 = v5 ; v5 = t ; } if ( T [ start + Td + SA [ PA + SA [ v1 ] ] ] > T [ start + Td + SA [ PA + SA [ v3 ] ] ] ) { t = v1 ; v1 = v3 ; v3 = t ; } if ( T [ start + Td + SA [ PA + SA [ v1 ] ] ] > T [ start + Td + SA [ PA + SA [ v4 ] ] ] ) { t = v1 ; v1 = v4 ; v4 = t ; t = v3 ; v3 = v5 ; v5 = t ; } if ( T [ start + Td + SA [ PA + SA [ v3 ] ] ] > T [ start + Td + SA [ PA + SA [ v4 ] ] ] ) { return v4 ; } return v3 ; }
Returns the median of five elements
37,074
private int ssPartition ( int PA , int first , int last , int depth ) { int a , b ; int t ; for ( a = first - 1 , b = last ; ; ) { for ( ; ( ++ a < b ) && ( ( SA [ PA + SA [ a ] ] + depth ) >= ( SA [ PA + SA [ a ] + 1 ] + 1 ) ) ; ) { SA [ a ] = ~ SA [ a ] ; } for ( ; ( a < -- b ) && ( ( SA [ PA + SA [ b ] ] + depth ) < ( SA [ PA + SA [ b ] + 1 ] + 1 ) ) ; ) { } if ( b <= a ) { break ; } t = ~ SA [ b ] ; SA [ b ] = SA [ a ] ; SA [ a ] = t ; } if ( first < a ) { SA [ first ] = ~ SA [ first ] ; } return a ; }
Binary partition for substrings .
37,075
private void ssHeapSort ( int Td , int PA , int sa , int size ) { int i , m , t ; m = size ; if ( ( size % 2 ) == 0 ) { m -- ; if ( T [ start + Td + SA [ PA + SA [ sa + ( m / 2 ) ] ] ] < T [ start + Td + SA [ PA + SA [ sa + m ] ] ] ) { swapInSA ( sa + m , sa + ( m / 2 ) ) ; } } for ( i = m / 2 - 1 ; 0 <= i ; -- i ) { ssFixDown ( Td , PA , sa , i , m ) ; } if ( ( size % 2 ) == 0 ) { swapInSA ( sa , sa + m ) ; ssFixDown ( Td , PA , sa , 0 , m ) ; } for ( i = m - 1 ; 0 < i ; -- i ) { t = SA [ sa ] ; SA [ sa ] = SA [ sa + i ] ; ssFixDown ( Td , PA , sa , 0 , i ) ; SA [ sa + i ] = t ; } }
Simple top - down heapsort .
37,076
private void trSort ( int ISA , int n , int depth ) { TRBudget budget = new TRBudget ( trIlg ( n ) * 2 / 3 , n ) ; int ISAd ; int first , last ; int t , skip , unsorted ; for ( ISAd = ISA + depth ; - n < SA [ 0 ] ; ISAd += ISAd - ISA ) { first = 0 ; skip = 0 ; unsorted = 0 ; do { if ( ( t = SA [ first ] ) < 0 ) { first -= t ; skip += t ; } else { if ( skip != 0 ) { SA [ first + skip ] = skip ; skip = 0 ; } last = SA [ ISA + t ] + 1 ; if ( 1 < ( last - first ) ) { budget . count = 0 ; trIntroSort ( ISA , ISAd , first , last , budget ) ; if ( budget . count != 0 ) { unsorted += budget . count ; } else { skip = first - last ; } } else if ( ( last - first ) == 1 ) { skip = - 1 ; } first = last ; } } while ( first < n ) ; if ( skip != 0 ) { SA [ first + skip ] = skip ; } if ( unsorted == 0 ) { break ; } } }
Tandem repeat sort
37,077
private int trMedian5 ( int ISAd , int v1 , int v2 , int v3 , int v4 , int v5 ) { int t ; if ( SA [ ISAd + SA [ v2 ] ] > SA [ ISAd + SA [ v3 ] ] ) { t = v2 ; v2 = v3 ; v3 = t ; } if ( SA [ ISAd + SA [ v4 ] ] > SA [ ISAd + SA [ v5 ] ] ) { t = v4 ; v4 = v5 ; v5 = t ; } if ( SA [ ISAd + SA [ v2 ] ] > SA [ ISAd + SA [ v4 ] ] ) { t = v2 ; v2 = v4 ; v4 = t ; t = v3 ; v3 = v5 ; v5 = t ; } if ( SA [ ISAd + SA [ v1 ] ] > SA [ ISAd + SA [ v3 ] ] ) { t = v1 ; v1 = v3 ; v3 = t ; } if ( SA [ ISAd + SA [ v1 ] ] > SA [ ISAd + SA [ v4 ] ] ) { t = v1 ; v1 = v4 ; v4 = t ; t = v3 ; v3 = v5 ; v5 = t ; } if ( SA [ ISAd + SA [ v3 ] ] > SA [ ISAd + SA [ v4 ] ] ) { return v4 ; } return v3 ; }
Returns the median of five elements .
37,078
private void trCopy ( int ISA , int first , int a , int b , int last , int depth ) { int c , d , e ; int s , v ; v = b - 1 ; for ( c = first , d = a - 1 ; c <= d ; ++ c ) { s = SA [ c ] - depth ; if ( ( 0 <= s ) && ( SA [ ISA + s ] == v ) ) { SA [ ++ d ] = s ; SA [ ISA + s ] = d ; } } for ( c = last - 1 , e = d + 1 , d = b ; e < d ; -- c ) { s = SA [ c ] - depth ; if ( ( 0 <= s ) && ( SA [ ISA + s ] == v ) ) { SA [ -- d ] = s ; SA [ ISA + s ] = d ; } } }
sort suffixes of middle partition by using sorted order of suffixes of left and right partition .
37,079
void add ( S sequence ) { for ( I item : sequence ) { masterSequence . add ( item ) ; } SequenceTerminal < S > terminal = new SequenceTerminal < > ( sequence ) ; masterSequence . add ( terminal ) ; }
Adds a Sequence to the suffix tree .
37,080
public Iterator < Object > iterator ( ) { return new Iterator < Object > ( ) { int currentPosition = 0 ; public boolean hasNext ( ) { return masterSequence . size ( ) > currentPosition ; } public Object next ( ) { if ( currentPosition <= masterSequence . size ( ) ) return masterSequence . get ( currentPosition ++ ) ; return null ; } public void remove ( ) { throw new UnsupportedOperationException ( "Remove is not supported." ) ; } } ; }
Retrieves an iterator for the sequence .
37,081
void insert ( Suffix < T , S > suffix , ActivePoint < T , S > activePoint ) { Object item = suffix . getEndItem ( ) ; Object nextItem = getItemAt ( activePoint . getLength ( ) ) ; if ( item . equals ( nextItem ) ) { activePoint . incrementLength ( ) ; } else { split ( suffix , activePoint ) ; suffix . decrement ( ) ; activePoint . updateAfterInsert ( suffix ) ; if ( suffix . isEmpty ( ) ) { } else tree . insert ( suffix ) ; } }
Insert the given suffix at the supplied active point .
37,082
private void split ( Suffix < T , S > suffix , ActivePoint < T , S > activePoint ) { Node < T , S > breakNode = new Node < > ( this , sequence , tree ) ; Edge < T , S > newEdge = new Edge < > ( suffix . getEndPosition ( ) - 1 , breakNode , sequence , tree ) ; breakNode . insert ( newEdge ) ; Edge < T , S > oldEdge = new Edge < > ( start + activePoint . getLength ( ) , breakNode , sequence , tree ) ; oldEdge . end = end ; oldEdge . terminal = this . terminal ; breakNode . insert ( oldEdge ) ; this . terminal = breakNode ; end = start + activePoint . getLength ( ) ; tree . setSuffixLink ( breakNode ) ; tree . incrementInsertCount ( ) ; }
Splits the edge to enable the insertion of supplied suffix at the supplied active point .
37,083
@ SuppressWarnings ( "unchecked" ) T getItemAt ( int position ) { if ( position > getLength ( ) ) throw new IllegalArgumentException ( "Index " + position + " is greater than " + getLength ( ) + " - the length of this edge." ) ; return ( T ) sequence . getItem ( start + position ) ; }
Retrieves the item at given position within the current edge .
37,084
public Iterator < T > iterator ( ) { return new Iterator < T > ( ) { private int currentPosition = start ; private boolean hasNext = true ; public boolean hasNext ( ) { return hasNext ; } @ SuppressWarnings ( "unchecked" ) public T next ( ) { if ( end == - 1 ) hasNext = ! sequence . getItem ( currentPosition ) . getClass ( ) . equals ( SequenceTerminal . class ) ; else hasNext = currentPosition < getEnd ( ) - 1 ; return ( T ) sequence . getItem ( currentPosition ++ ) ; } public void remove ( ) { throw new UnsupportedOperationException ( "The remove method is not supported." ) ; } } ; }
Retrieves an iterator that steps over the items in this edge .
37,085
public boolean isCompetitor ( Island isl ) { for ( Coordinate c : isl ) { for ( Coordinate d : islandCoordinates ) { if ( c . sameColumn ( d ) || c . sameRow ( d ) ) return true ; } } return false ; }
Two islands are competitors if there is a horizontal or vertical line which goes through both islands
37,086
public IntStream getAllOccurrencesAsRanges ( ) { IntStream result = IntStream . empty ( ) ; for ( int i = start ; i < end ; i ++ ) { int token_position = tokenIndex . suffix_array [ i ] ; IntStream range = IntStream . range ( token_position , token_position + length ) ; result = IntStream . concat ( result , range ) ; } return result ; }
transform lcp interval into int stream range
37,087
private void select_sort_split ( int p , int n ) { int pa , pb , pi , pn ; int f , v ; pa = p ; pn = p + n - 1 ; while ( pa < pn ) { for ( pi = pb = pa + 1 , f = KEY ( pa ) ; pi <= pn ; ++ pi ) if ( ( v = KEY ( pi ) ) < f ) { f = v ; SWAP ( pi , pa ) ; pb = pa + 1 ; } else if ( v == f ) { SWAP ( pi , pb ) ; ++ pb ; } update_group ( pa , pb - 1 ) ; pa = pb ; } if ( pa == pn ) { V [ start + I [ pa ] ] = pa ; I [ pa ] = - 1 ; } }
Quadratic sorting method to use for small subarrays . To be able to update group numbers consistently a variant of selection sorting is used .
37,088
public static SuffixData createWithLCP ( CharSequence s , ISuffixArrayBuilder builder ) { final CharSequenceAdapter adapter = new CharSequenceAdapter ( builder ) ; final int [ ] sa = adapter . buildSuffixArray ( s ) ; final int [ ] lcp = computeLCP ( adapter . input , 0 , s . length ( ) , sa ) ; return new SuffixData ( sa , lcp ) ; }
Create a suffix array and an LCP array for a given character sequence use the given algorithm for building the suffix array .
37,089
public static SuffixData createWithLCP ( int [ ] input , int start , int length ) { final ISuffixArrayBuilder builder = new DensePositiveDecorator ( new ExtraTrailingCellsDecorator ( defaultAlgorithm ( ) , 3 ) ) ; return createWithLCP ( input , start , length , builder ) ; }
Create a suffix array and an LCP array for a given input sequence of symbols .
37,090
public static SuffixData createWithLCP ( int [ ] input , int start , int length , ISuffixArrayBuilder builder ) { final int [ ] sa = builder . buildSuffixArray ( input , start , length ) ; final int [ ] lcp = computeLCP ( input , start , length , sa ) ; return new SuffixData ( sa , lcp ) ; }
Create a suffix array and an LCP array for a given input sequence of symbols and a custom suffix array building strategy .
37,091
public static < T > SuffixData createWithLCP ( T [ ] input , ISuffixArrayBuilder builder , Comparator < ? super T > comparator ) { final GenericArrayAdapter adapter = new GenericArrayAdapter ( builder , comparator ) ; final int [ ] sa = adapter . buildSuffixArray ( input ) ; final int [ ] lcp = computeLCP ( adapter . input , 0 , input . length , sa ) ; return new SuffixData ( sa , lcp ) ; }
Create a suffix array and an LCP array for a given generic array and a custom suffix array building strategy using the given T object comparator .
37,092
static int max ( int [ ] input , int start , int length ) { assert length >= 1 ; int max = input [ start ] ; for ( int i = length - 2 , index = start + 1 ; i >= 0 ; i -- , index ++ ) { final int v = input [ index ] ; if ( v > max ) { max = v ; } } return max ; }
Determine the maximum value in a slice of an array .
37,093
static int min ( int [ ] input , int start , int length ) { assert length >= 1 ; int min = input [ start ] ; for ( int i = length - 2 , index = start + 1 ; i >= 0 ; i -- , index ++ ) { final int v = input [ index ] ; if ( v < min ) { min = v ; } } return min ; }
Determine the minimum value in a slice of an array .
37,094
static MinMax minmax ( int [ ] input , final int start , final int length ) { int max = input [ start ] ; int min = max ; for ( int i = length - 2 , index = start + 1 ; i >= 0 ; i -- , index ++ ) { final int v = input [ index ] ; if ( v > max ) { max = v ; } if ( v < min ) { min = v ; } } return new MinMax ( min , max ) ; }
Calculate minimum and maximum value for a slice of an array .
37,095
public Object getItemXFromEnd ( int distanceFromEnd ) { if ( ( end - ( distanceFromEnd ) ) < start ) { throw new IllegalArgumentException ( distanceFromEnd + " extends before the start of this suffix: " ) ; } return sequence . getItem ( end - distanceFromEnd ) ; }
Retrieves the item the given distance from the end of the suffix .
37,096
public String toHtml ( Archipelago arch ) { int mat [ ] = new int [ rowNum ( ) ] ; for ( Island isl : arch . getIslands ( ) ) { for ( Coordinate c : isl ) { mat [ c . row ] = c . column ; } } StringBuilder result = new StringBuilder ( "<table>\n<tr><td></td>\n" ) ; ArrayList < String > colLabels = columnLabels ( ) ; for ( String cLabel : colLabels ) { result . append ( "<td>" ) . append ( cLabel ) . append ( "</td>" ) ; } result . append ( "</tr>\n" ) ; ArrayList < String > rLabels = rowLabels ( ) ; int row = 0 ; for ( String label : rLabels ) { result . append ( "<tr><td>" ) . append ( label ) . append ( "</td>" ) ; if ( mat [ row ] > 0 ) { result . append ( "<td colspan=\"" ) . append ( mat [ row ] ) . append ( "\"></td>" ) . append ( "<td BGCOLOR=\"lightgreen\">M</td>" ) ; } result . append ( "</tr>\n" ) ; row ++ ; } result . append ( "</table>" ) ; return result . toString ( ) ; }
arch = preferred matches
37,097
@ SuppressWarnings ( "unchecked" ) void insert ( Suffix < T , S > suffix , ActivePoint < T , S > activePoint ) { Object item = suffix . getEndItem ( ) ; if ( edges . containsKey ( item ) ) { if ( tree . isNotFirstInsert ( ) && activePoint . getNode ( ) != tree . getRoot ( ) ) tree . setSuffixLink ( activePoint . getNode ( ) ) ; activePoint . setEdge ( edges . get ( item ) ) ; activePoint . incrementLength ( ) ; } else { saveSequenceTerminal ( item ) ; Edge < T , S > newEdge = new Edge < > ( suffix . getEndPosition ( ) - 1 , this , sequence , tree ) ; edges . put ( ( T ) suffix . getEndItem ( ) , newEdge ) ; suffix . decrement ( ) ; activePoint . updateAfterInsert ( suffix ) ; if ( tree . isNotFirstInsert ( ) && ! this . equals ( tree . getRoot ( ) ) ) { tree . getLastNodeInserted ( ) . setSuffixLink ( this ) ; } if ( suffix . isEmpty ( ) ) { } else tree . insert ( suffix ) ; } }
Inserts the suffix at the given active point .
37,098
void insert ( Edge < T , S > edge ) { if ( edges . containsKey ( edge . getStartItem ( ) ) ) throw new IllegalArgumentException ( "Item " + edge . getStartItem ( ) + " already exists in node " + toString ( ) ) ; edges . put ( edge . getStartItem ( ) , edge ) ; }
Inserts the given edge as a child of this node . The edge must not already exist as child or an IllegalArgumentException will be thrown .
37,099
static < I , S extends Iterable < I > > Object [ ] addTerminalToSequence ( S sequence , SequenceTerminal < S > terminatingObject ) { ArrayList < Object > list = new ArrayList < > ( ) ; for ( I item : sequence ) list . add ( item ) ; Object [ ] newSequence = new Object [ list . size ( ) + 1 ] ; int i = 0 ; for ( ; i < list . size ( ) ; i ++ ) newSequence [ i ] = list . get ( i ) ; newSequence [ i ] = terminatingObject ; return newSequence ; }
Appends a SequenceTerminal element to a supplied array .