idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
8,800 | public static String [ ] splitByComma ( String input , boolean allowEmpty ) { if ( input == null ) { return null ; } String [ ] splitWithEmptyValues = trim ( input ) . split ( "\\s*,\\s*" , - 1 ) ; return allowEmpty ? splitWithEmptyValues : subtraction ( splitWithEmptyValues , new String [ ] { "" } ) ; } | Splits String value with comma used as a separator . The whitespaces around values are trimmed . |
8,801 | public static String [ ] subtraction ( String [ ] arr1 , String [ ] arr2 ) { if ( arr1 == null || arr1 . length == 0 || arr2 == null || arr2 . length == 0 ) { return arr1 ; } List < String > list = new ArrayList < String > ( Arrays . asList ( arr1 ) ) ; list . removeAll ( Arrays . asList ( arr2 ) ) ; return list . toArray ( new String [ 0 ] ) ; } | Returns subtraction between given String arrays . |
8,802 | protected boolean evictInternal ( Object key ) { Data keyData = toDataWithStrategy ( key ) ; MapOperation operation = operationProvider . createEvictOperation ( name , keyData , false ) ; return ( Boolean ) invokeOperation ( keyData , operation ) ; } | Evicts a key from a map . |
8,803 | protected void loadInternal ( Set < K > keys , Iterable < Data > dataKeys , boolean replaceExistingValues ) { if ( dataKeys == null ) { dataKeys = convertToData ( keys ) ; } Map < Integer , List < Data > > partitionIdToKeys = getPartitionIdToKeysMap ( dataKeys ) ; Iterable < Entry < Integer , List < Data > > > entries = partitionIdToKeys . entrySet ( ) ; for ( Entry < Integer , List < Data > > entry : entries ) { Integer partitionId = entry . getKey ( ) ; List < Data > correspondingKeys = entry . getValue ( ) ; Operation operation = createLoadAllOperation ( correspondingKeys , replaceExistingValues ) ; operationService . invokeOnPartition ( SERVICE_NAME , operation , partitionId ) ; } waitUntilLoaded ( ) ; } | Maps keys to corresponding partitions and sends operations to them . |
8,804 | private void parse ( String version ) { String [ ] tokens = StringUtil . tokenizeVersionString ( version ) ; this . major = Byte . valueOf ( tokens [ 0 ] ) ; this . minor = Byte . valueOf ( tokens [ 1 ] ) ; if ( tokens . length > 3 && tokens [ 3 ] != null ) { this . patch = Byte . valueOf ( tokens [ 3 ] ) ; } } | populate this Version s major minor patch from given String |
8,805 | public static String toPrettyString ( long size , MemoryUnit unit ) { if ( unit . toGigaBytes ( size ) >= PRETTY_FORMAT_LIMIT ) { return unit . toGigaBytes ( size ) + " GB" ; } if ( unit . toMegaBytes ( size ) >= PRETTY_FORMAT_LIMIT ) { return unit . toMegaBytes ( size ) + " MB" ; } if ( unit . toKiloBytes ( size ) >= PRETTY_FORMAT_LIMIT ) { return unit . toKiloBytes ( size ) + " KB" ; } if ( size % MemoryUnit . K == 0 ) { return unit . toKiloBytes ( size ) + " KB" ; } return size + " bytes" ; } | Utility method to create a pretty format representation of given value in given unit . |
8,806 | public void put ( K key , V value ) { checkNotNull ( key , "Key cannot be null" ) ; checkNotNull ( value , "Value cannot be null" ) ; Set < V > values = backingMap . get ( key ) ; if ( values == null ) { values = new HashSet < V > ( ) ; backingMap . put ( key , values ) ; } values . add ( value ) ; } | Associate value to a given key . It has no effect if the value is already associated with the key . |
8,807 | public Set < V > get ( K key ) { checkNotNull ( key , "Key cannot be null" ) ; return backingMap . get ( key ) ; } | Return Set of values associated with a given key |
8,808 | public static < KeyIn , ValueIn , ValueOut > Supplier < KeyIn , ValueIn , ValueOut > all ( ) { return new AcceptAllSupplier ( null ) ; } | The predefined Supplier selects all values and does not perform any kind of data transformation . Input value types need to match the aggregations expected value type to make this Supplier work . |
8,809 | public long newId ( ) { for ( ; ; ) { Block block = this . block ; long res = block . next ( ) ; if ( res != Long . MIN_VALUE ) { return res ; } synchronized ( this ) { if ( block != this . block ) { continue ; } this . block = new Block ( batchIdSupplier . newIdBatch ( batchSize ) , validity ) ; } } } | Return next ID from current batch or get new batch from supplier if current batch is spent or expired . |
8,810 | public IcmpFailureDetectorConfig setIntervalMilliseconds ( int intervalMilliseconds ) { if ( intervalMilliseconds < MIN_INTERVAL_MILLIS ) { throw new ConfigurationException ( format ( "Interval can't be set to less than %d milliseconds." , MIN_INTERVAL_MILLIS ) ) ; } this . intervalMilliseconds = intervalMilliseconds ; return this ; } | Sets the time in milliseconds between each ping This value can not be smaller than 1000 milliseconds |
8,811 | PartitionRuntimeState createMigrationCommitPartitionState ( MigrationInfo migrationInfo ) { lock . lock ( ) ; try { if ( ! partitionStateManager . isInitialized ( ) ) { return null ; } List < MigrationInfo > completedMigrations = migrationManager . getCompletedMigrationsCopy ( ) ; InternalPartition [ ] partitions = partitionStateManager . getPartitionsCopy ( ) ; int partitionId = migrationInfo . getPartitionId ( ) ; InternalPartitionImpl partition = ( InternalPartitionImpl ) partitions [ partitionId ] ; migrationManager . applyMigration ( partition , migrationInfo ) ; migrationInfo . setStatus ( MigrationStatus . SUCCESS ) ; completedMigrations . add ( migrationInfo ) ; int committedVersion = getPartitionStateVersion ( ) + 1 ; return new PartitionRuntimeState ( partitions , completedMigrations , committedVersion ) ; } finally { lock . unlock ( ) ; } } | Creates a transient PartitionRuntimeState to commit given migration . Result migration is applied to partition table and migration is added to completed - migrations set . Version of created partition table is incremented by 1 . |
8,812 | @ SuppressWarnings ( "checkstyle:npathcomplexity" ) void publishPartitionRuntimeState ( ) { if ( ! partitionStateManager . isInitialized ( ) ) { return ; } if ( ! node . isMaster ( ) ) { return ; } if ( ! areMigrationTasksAllowed ( ) ) { return ; } PartitionRuntimeState partitionState = createPartitionStateInternal ( ) ; if ( partitionState == null ) { return ; } if ( logger . isFineEnabled ( ) ) { logger . fine ( "Publishing partition state, version: " + partitionState . getVersion ( ) ) ; } PartitionStateOperation op = new PartitionStateOperation ( partitionState , false ) ; OperationService operationService = nodeEngine . getOperationService ( ) ; Collection < Member > members = node . clusterService . getMembers ( ) ; for ( Member member : members ) { if ( ! member . localMember ( ) ) { try { operationService . send ( op , member . getAddress ( ) ) ; } catch ( Exception e ) { logger . finest ( e ) ; } } } } | Called on the master node to publish the current partition state to all cluster nodes . It will not publish the partition state if the partitions have not yet been initialized there is ongoing repartitioning or a node is joining the cluster . |
8,813 | public static void checkNearCacheConfig ( String mapName , NearCacheConfig nearCacheConfig , NativeMemoryConfig nativeMemoryConfig , boolean isClient ) { checkNotNativeWhenOpenSource ( nearCacheConfig . getInMemoryFormat ( ) ) ; checkLocalUpdatePolicy ( mapName , nearCacheConfig . getLocalUpdatePolicy ( ) ) ; checkEvictionConfig ( nearCacheConfig . getEvictionConfig ( ) , true ) ; checkOnHeapNearCacheMaxSizePolicy ( nearCacheConfig ) ; checkNearCacheNativeMemoryConfig ( nearCacheConfig . getInMemoryFormat ( ) , nativeMemoryConfig , getBuildInfo ( ) . isEnterprise ( ) ) ; if ( isClient && nearCacheConfig . isCacheLocalEntries ( ) ) { throw new IllegalArgumentException ( "The Near Cache option `cache-local-entries` is not supported in " + "client configurations." ) ; } checkPreloaderConfig ( nearCacheConfig , isClient ) ; } | Checks preconditions to create a map proxy with Near Cache . |
8,814 | private static void checkLocalUpdatePolicy ( String mapName , LocalUpdatePolicy localUpdatePolicy ) { if ( localUpdatePolicy != INVALIDATE ) { throw new IllegalArgumentException ( format ( "Wrong `local-update-policy` option is selected for `%s` map Near Cache." + " Only `%s` option is supported but found `%s`" , mapName , INVALIDATE , localUpdatePolicy ) ) ; } } | Checks IMap s supported Near Cache local update policy configuration . |
8,815 | public static boolean checkAndLogPropertyDeprecated ( HazelcastProperties properties , HazelcastProperty hazelcastProperty ) { if ( properties . containsKey ( hazelcastProperty ) ) { LOGGER . warning ( "Property " + hazelcastProperty . getName ( ) + " is deprecated. Use configuration object/element instead." ) ; return properties . getBoolean ( hazelcastProperty ) ; } return false ; } | Checks if given group property is defined within given Hazelcast properties . Logs a warning when the property is defied . |
8,816 | @ SuppressWarnings ( "checkstyle:magicnumber" ) public static String toMD5String ( String str ) { try { MessageDigest md = MessageDigest . getInstance ( "MD5" ) ; if ( md == null || str == null ) { return null ; } byte [ ] byteData = md . digest ( str . getBytes ( Charset . forName ( "UTF-8" ) ) ) ; StringBuilder sb = new StringBuilder ( ) ; for ( byte aByteData : byteData ) { sb . append ( Integer . toString ( ( aByteData & 0xff ) + 0x100 , 16 ) . substring ( 1 ) ) ; } return sb . toString ( ) ; } catch ( NoSuchAlgorithmException ignored ) { return null ; } } | Converts given string to MD5 hash |
8,817 | public void setAllFrom ( OnDemandIndexStats onDemandStats ) { this . creationTime = onDemandStats . getCreationTime ( ) ; this . hitCount = onDemandStats . getHitCount ( ) ; this . queryCount = onDemandStats . getQueryCount ( ) ; this . averageHitSelectivity = onDemandStats . getAverageHitSelectivity ( ) ; this . averageHitLatency = onDemandStats . getAverageHitLatency ( ) ; this . insertCount = onDemandStats . getInsertCount ( ) ; this . totalInsertLatency = onDemandStats . getTotalInsertLatency ( ) ; this . updateCount = onDemandStats . getUpdateCount ( ) ; this . totalUpdateLatency = onDemandStats . getTotalUpdateLatency ( ) ; this . removeCount = onDemandStats . getRemoveCount ( ) ; this . totalRemoveLatency = onDemandStats . getTotalRemoveLatency ( ) ; this . memoryCost = onDemandStats . getMemoryCost ( ) ; } | Sets all the values in this stats to the corresponding values in the given on - demand stats . |
8,818 | protected final void invalidateAllKeysInNearCaches ( ) { if ( mapContainer . hasInvalidationListener ( ) ) { int partitionId = getPartitionId ( ) ; Invalidator invalidator = getNearCacheInvalidator ( ) ; if ( partitionId == getNodeEngine ( ) . getPartitionService ( ) . getPartitionId ( name ) ) { invalidator . invalidateAllKeys ( name , getCallerUuid ( ) ) ; } invalidator . resetPartitionMetaData ( name , getPartitionId ( ) ) ; } } | This method helps to add clearing Near Cache event only from one - partition which matches partitionId of the map name . |
8,819 | public int accumulatorInfoCountOfMap ( String mapName ) { ConcurrentMap < String , AccumulatorInfo > accumulatorInfo = cacheInfoPerMap . get ( mapName ) ; if ( accumulatorInfo == null ) { return 0 ; } else { return accumulatorInfo . size ( ) ; } } | only for testing |
8,820 | private void commitSource ( ) { int partitionId = getPartitionId ( ) ; InternalPartitionServiceImpl partitionService = getService ( ) ; PartitionReplicaManager replicaManager = partitionService . getReplicaManager ( ) ; ILogger logger = getLogger ( ) ; int sourceNewReplicaIndex = migrationInfo . getSourceNewReplicaIndex ( ) ; if ( sourceNewReplicaIndex < 0 ) { clearPartitionReplicaVersions ( partitionId ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Replica versions are cleared in source after migration. partitionId=" + partitionId ) ; } } else if ( migrationInfo . getSourceCurrentReplicaIndex ( ) != sourceNewReplicaIndex && sourceNewReplicaIndex > 1 ) { for ( ServiceNamespace namespace : replicaManager . getNamespaces ( partitionId ) ) { long [ ] versions = updatePartitionReplicaVersions ( replicaManager , partitionId , namespace , sourceNewReplicaIndex - 1 ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Replica versions are set after SHIFT DOWN migration. partitionId=" + partitionId + " namespace: " + namespace + " replica versions=" + Arrays . toString ( versions ) ) ; } } } } | Updates the replica versions on the migration source if the replica index has changed . |
8,821 | private void rollbackDestination ( ) { int partitionId = getPartitionId ( ) ; InternalPartitionServiceImpl partitionService = getService ( ) ; PartitionReplicaManager replicaManager = partitionService . getReplicaManager ( ) ; ILogger logger = getLogger ( ) ; int destinationCurrentReplicaIndex = migrationInfo . getDestinationCurrentReplicaIndex ( ) ; if ( destinationCurrentReplicaIndex == - 1 ) { clearPartitionReplicaVersions ( partitionId ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Replica versions are cleared in destination after failed migration. partitionId=" + partitionId ) ; } } else { int replicaOffset = migrationInfo . getDestinationCurrentReplicaIndex ( ) <= 1 ? 1 : migrationInfo . getDestinationCurrentReplicaIndex ( ) ; for ( ServiceNamespace namespace : replicaManager . getNamespaces ( partitionId ) ) { long [ ] versions = updatePartitionReplicaVersions ( replicaManager , partitionId , namespace , replicaOffset - 1 ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Replica versions are rolled back in destination after failed migration. partitionId=" + partitionId + " namespace: " + namespace + " replica versions=" + Arrays . toString ( versions ) ) ; } } } } | Updates the replica versions on the migration destination . |
8,822 | boolean isMetadataGroupLeader ( ) { CPMemberInfo localCPMember = getLocalCPMember ( ) ; if ( localCPMember == null ) { return false ; } RaftNode raftNode = raftService . getRaftNode ( getMetadataGroupId ( ) ) ; return raftNode != null && ! raftNode . isTerminatedOrSteppedDown ( ) && localCPMember . equals ( raftNode . getLeader ( ) ) ; } | could return stale information |
8,823 | private static int getNextEntryEventTypeId ( ) { int higherTypeId = Integer . MIN_VALUE ; int i = 0 ; EntryEventType [ ] values = EntryEventType . values ( ) ; for ( EntryEventType value : values ) { int typeId = value . getType ( ) ; if ( i == 0 ) { higherTypeId = typeId ; } else { if ( typeId > higherTypeId ) { higherTypeId = typeId ; } } i ++ ; } int eventFlagPosition = Integer . numberOfTrailingZeros ( higherTypeId ) ; return 1 << ++ eventFlagPosition ; } | Returns next event type ID . |
8,824 | private void doInBackup ( List < DelayedEntry > delayedEntries ) { writeBehindProcessor . callBeforeStoreListeners ( delayedEntries ) ; removeFinishedStoreOperationsFromQueues ( mapName , delayedEntries ) ; writeBehindProcessor . callAfterStoreListeners ( delayedEntries ) ; } | Process write - behind queues on backup partitions . It is a fake processing and it only removes entries from queues and does not persist any of them . |
8,825 | public static < E > void setMax ( E obj , AtomicLongFieldUpdater < E > updater , long value ) { for ( ; ; ) { long current = updater . get ( obj ) ; if ( current >= value ) { return ; } if ( updater . compareAndSet ( obj , current , value ) ) { return ; } } } | Atomically sets the max value . |
8,826 | private void removeExistingKeys ( Collection < Data > keys ) { if ( keys == null || keys . isEmpty ( ) ) { return ; } Storage storage = recordStore . getStorage ( ) ; keys . removeIf ( storage :: containsKey ) ; } | Removes keys from the provided collection which are contained in the partition record store . |
8,827 | public void addTaskAndWakeup ( Runnable task ) { taskQueue . add ( task ) ; if ( selectMode != SELECT_NOW ) { selector . wakeup ( ) ; } } | Adds a task to be executed by the NioThread and wakes up the selector so that it will eventually pick up the task . |
8,828 | void publishTaskState ( String taskName , Map stateSnapshot , ScheduledTaskStatisticsImpl statsSnapshot , ScheduledTaskResult result ) { if ( logger . isFinestEnabled ( ) ) { log ( FINEST , "Publishing state, to replicas. State: " + stateSnapshot ) ; } Operation op = new SyncStateOperation ( getName ( ) , taskName , stateSnapshot , statsSnapshot , result ) ; createInvocationBuilder ( op ) . invoke ( ) . join ( ) ; } | State is published after every run . When replicas get promoted they start with the latest state . |
8,829 | private Future removeExistingKeys ( List < Data > keys ) { OperationService operationService = mapServiceContext . getNodeEngine ( ) . getOperationService ( ) ; Operation operation = new RemoveFromLoadAllOperation ( name , keys ) ; return operationService . invokeOnPartition ( MapService . SERVICE_NAME , operation , partitionId ) ; } | Removes keys already present in the partition record store from the provided keys list . This is done by sending a partition operation . This operation is supposed to be invoked locally and the provided parameter is supposed to be thread - safe as it will be mutated directly from the partition thread . |
8,830 | private List < Future > doBatchLoad ( List < Data > keys ) { Queue < List < Data > > batchChunks = createBatchChunks ( keys ) ; int size = batchChunks . size ( ) ; List < Future > futures = new ArrayList < > ( size ) ; while ( ! batchChunks . isEmpty ( ) ) { List < Data > chunk = batchChunks . poll ( ) ; List < Data > keyValueSequence = loadAndGet ( chunk ) ; if ( keyValueSequence . isEmpty ( ) ) { continue ; } futures . add ( sendOperation ( keyValueSequence ) ) ; } return futures ; } | Loads the values for the provided keys in batches and invokes partition operations to put the loaded entry batches into the record store . |
8,831 | private Queue < List < Data > > createBatchChunks ( List < Data > keys ) { Queue < List < Data > > chunks = new LinkedList < > ( ) ; int loadBatchSize = getLoadBatchSize ( ) ; int page = 0 ; List < Data > tmpKeys ; while ( ( tmpKeys = getBatchChunk ( keys , loadBatchSize , page ++ ) ) != null ) { chunks . add ( tmpKeys ) ; } return chunks ; } | Returns a queue of key batches |
8,832 | private List < Data > loadAndGet ( List < Data > keys ) { try { Map entries = mapDataStore . loadAll ( keys ) ; return getKeyValueSequence ( entries ) ; } catch ( Throwable t ) { logger . warning ( "Could not load keys from map store" , t ) ; throw ExceptionUtil . rethrow ( t ) ; } } | Loads the provided keys from the underlying map store and transforms them to a list of alternating serialised key - value pairs . |
8,833 | private List < Data > getKeyValueSequence ( Map < ? , ? > entries ) { if ( entries == null || entries . isEmpty ( ) ) { return Collections . emptyList ( ) ; } List < Data > keyValueSequence = new ArrayList < > ( entries . size ( ) * 2 ) ; for ( Map . Entry < ? , ? > entry : entries . entrySet ( ) ) { Object key = entry . getKey ( ) ; Object value = entry . getValue ( ) ; Data dataKey = mapServiceContext . toData ( key ) ; Data dataValue = mapServiceContext . toData ( value ) ; keyValueSequence . add ( dataKey ) ; keyValueSequence . add ( dataValue ) ; } return keyValueSequence ; } | Transforms a map to a list of serialised alternating key - value pairs . |
8,834 | private Future < ? > sendOperation ( List < Data > keyValueSequence ) { OperationService operationService = mapServiceContext . getNodeEngine ( ) . getOperationService ( ) ; Operation operation = createOperation ( keyValueSequence ) ; return operationService . invokeOnPartition ( MapService . SERVICE_NAME , operation , partitionId ) ; } | Invokes an operation to put the provided key - value pairs to the partition record store . |
8,835 | private Operation createOperation ( List < Data > keyValueSequence ) { NodeEngine nodeEngine = mapServiceContext . getNodeEngine ( ) ; MapOperationProvider operationProvider = mapServiceContext . getMapOperationProvider ( name ) ; MapOperation operation = operationProvider . createPutFromLoadAllOperation ( name , keyValueSequence ) ; operation . setNodeEngine ( nodeEngine ) ; operation . setPartitionId ( partitionId ) ; OperationAccessor . setCallerAddress ( operation , nodeEngine . getThisAddress ( ) ) ; operation . setCallerUuid ( nodeEngine . getLocalMember ( ) . getUuid ( ) ) ; operation . setServiceName ( MapService . SERVICE_NAME ) ; return operation ; } | Returns an operation to put the provided key - value pairs into the partition record store . |
8,836 | private void removeUnloadableKeys ( Collection < Data > keys ) { if ( keys == null || keys . isEmpty ( ) ) { return ; } keys . removeIf ( key -> ! mapDataStore . loadable ( key ) ) ; } | Removes unloadable keys from the provided key collection . |
8,837 | public static boolean allDone ( Collection < Future > futures ) { for ( Future f : futures ) { if ( ! f . isDone ( ) ) { return false ; } } return true ; } | Check if all futures are done |
8,838 | public static void checkAllDone ( Collection < Future > futures ) throws Exception { for ( Future f : futures ) { if ( f . isDone ( ) ) { f . get ( ) ; } } } | Rethrow exeception of the fist future that completed with an exception |
8,839 | public static List < Future > getAllDone ( Collection < Future > futures ) { List < Future > doneFutures = new ArrayList < Future > ( ) ; for ( Future f : futures ) { if ( f . isDone ( ) ) { doneFutures . add ( f ) ; } } return doneFutures ; } | Get all futures that are done |
8,840 | public String getProperty ( String name ) { String value = properties . getProperty ( name ) ; return value != null ? value : System . getProperty ( name ) ; } | Returns the value for a named property . If it has not been previously set it will try to get the value from the system properties . |
8,841 | public Config setCacheConfigs ( Map < String , CacheSimpleConfig > cacheConfigs ) { this . cacheConfigs . clear ( ) ; this . cacheConfigs . putAll ( cacheConfigs ) ; for ( final Entry < String , CacheSimpleConfig > entry : this . cacheConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of cache configurations mapped by config name . The config name may be a pattern with which the configuration was initially obtained . |
8,842 | public Config setAtomicLongConfigs ( Map < String , AtomicLongConfig > atomicLongConfigs ) { this . atomicLongConfigs . clear ( ) ; this . atomicLongConfigs . putAll ( atomicLongConfigs ) ; for ( Entry < String , AtomicLongConfig > entry : atomicLongConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of AtomicLong configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,843 | public Config setAtomicReferenceConfigs ( Map < String , AtomicReferenceConfig > atomicReferenceConfigs ) { this . atomicReferenceConfigs . clear ( ) ; this . atomicReferenceConfigs . putAll ( atomicReferenceConfigs ) ; for ( Entry < String , AtomicReferenceConfig > entry : atomicReferenceConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of AtomicReference configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,844 | public Config setCountDownLatchConfigs ( Map < String , CountDownLatchConfig > countDownLatchConfigs ) { this . countDownLatchConfigs . clear ( ) ; this . countDownLatchConfigs . putAll ( countDownLatchConfigs ) ; for ( Entry < String , CountDownLatchConfig > entry : countDownLatchConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of CountDownLatch configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,845 | public Config setReliableTopicConfigs ( Map < String , ReliableTopicConfig > reliableTopicConfigs ) { this . reliableTopicConfigs . clear ( ) ; this . reliableTopicConfigs . putAll ( reliableTopicConfigs ) ; for ( Entry < String , ReliableTopicConfig > entry : reliableTopicConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of reliable topic configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,846 | public Config setExecutorConfigs ( Map < String , ExecutorConfig > executorConfigs ) { this . executorConfigs . clear ( ) ; this . executorConfigs . putAll ( executorConfigs ) ; for ( Entry < String , ExecutorConfig > entry : executorConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of executor configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,847 | public Config setDurableExecutorConfigs ( Map < String , DurableExecutorConfig > durableExecutorConfigs ) { this . durableExecutorConfigs . clear ( ) ; this . durableExecutorConfigs . putAll ( durableExecutorConfigs ) ; for ( Entry < String , DurableExecutorConfig > entry : durableExecutorConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of durable executor configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,848 | public Config setScheduledExecutorConfigs ( Map < String , ScheduledExecutorConfig > scheduledExecutorConfigs ) { this . scheduledExecutorConfigs . clear ( ) ; this . scheduledExecutorConfigs . putAll ( scheduledExecutorConfigs ) ; for ( Entry < String , ScheduledExecutorConfig > entry : scheduledExecutorConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of scheduled executor configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,849 | public Config setCardinalityEstimatorConfigs ( Map < String , CardinalityEstimatorConfig > cardinalityEstimatorConfigs ) { this . cardinalityEstimatorConfigs . clear ( ) ; this . cardinalityEstimatorConfigs . putAll ( cardinalityEstimatorConfigs ) ; for ( Entry < String , CardinalityEstimatorConfig > entry : cardinalityEstimatorConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of cardinality estimator configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,850 | public Config setPNCounterConfigs ( Map < String , PNCounterConfig > pnCounterConfigs ) { this . pnCounterConfigs . clear ( ) ; this . pnCounterConfigs . putAll ( pnCounterConfigs ) ; for ( Entry < String , PNCounterConfig > entry : pnCounterConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of PN counter configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,851 | public Config setSemaphoreConfigs ( Map < String , SemaphoreConfig > semaphoreConfigs ) { this . semaphoreConfigs . clear ( ) ; this . semaphoreConfigs . putAll ( semaphoreConfigs ) ; for ( final Entry < String , SemaphoreConfig > entry : this . semaphoreConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of semaphore configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,852 | public Config setWanReplicationConfigs ( Map < String , WanReplicationConfig > wanReplicationConfigs ) { this . wanReplicationConfigs . clear ( ) ; this . wanReplicationConfigs . putAll ( wanReplicationConfigs ) ; for ( final Entry < String , WanReplicationConfig > entry : this . wanReplicationConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of WAN replication configurations mapped by config name . |
8,853 | public Config setJobTrackerConfigs ( Map < String , JobTrackerConfig > jobTrackerConfigs ) { this . jobTrackerConfigs . clear ( ) ; this . jobTrackerConfigs . putAll ( jobTrackerConfigs ) ; for ( final Entry < String , JobTrackerConfig > entry : this . jobTrackerConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of job tracker configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,854 | public Config setQuorumConfigs ( Map < String , QuorumConfig > quorumConfigs ) { this . quorumConfigs . clear ( ) ; this . quorumConfigs . putAll ( quorumConfigs ) ; for ( final Entry < String , QuorumConfig > entry : this . quorumConfigs . entrySet ( ) ) { entry . getValue ( ) . setName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of split - brain protection configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,855 | public Config setMapEventJournalConfigs ( Map < String , EventJournalConfig > eventJournalConfigs ) { this . mapEventJournalConfigs . clear ( ) ; this . mapEventJournalConfigs . putAll ( eventJournalConfigs ) ; for ( Entry < String , EventJournalConfig > entry : eventJournalConfigs . entrySet ( ) ) { entry . getValue ( ) . setMapName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of map event journal configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,856 | public Config setCacheEventJournalConfigs ( Map < String , EventJournalConfig > eventJournalConfigs ) { this . cacheEventJournalConfigs . clear ( ) ; this . cacheEventJournalConfigs . putAll ( eventJournalConfigs ) ; for ( Entry < String , EventJournalConfig > entry : eventJournalConfigs . entrySet ( ) ) { entry . getValue ( ) . setCacheName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of cache event journal configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,857 | public Config setMapMerkleTreeConfigs ( Map < String , MerkleTreeConfig > merkleTreeConfigs ) { this . mapMerkleTreeConfigs . clear ( ) ; this . mapMerkleTreeConfigs . putAll ( merkleTreeConfigs ) ; for ( Entry < String , MerkleTreeConfig > entry : merkleTreeConfigs . entrySet ( ) ) { entry . getValue ( ) . setMapName ( entry . getKey ( ) ) ; } return this ; } | Sets the map of map merkle configurations mapped by config name . The config name may be a pattern with which the configuration will be obtained in the future . |
8,858 | public String getLicenseKey ( ) { SecurityManager sm = System . getSecurityManager ( ) ; if ( sm != null ) { sm . checkPermission ( new HazelcastRuntimePermission ( "com.hazelcast.config.Config.getLicenseKey" ) ) ; } return licenseKey ; } | Returns the license key for this hazelcast instance . The license key is used to enable enterprise features . |
8,859 | public ClientReliableTopicConfig getReliableTopicConfig ( String name ) { return ConfigUtils . getConfig ( configPatternMatcher , reliableTopicConfigMap , name , ClientReliableTopicConfig . class , new BiConsumer < ClientReliableTopicConfig , String > ( ) { public void accept ( ClientReliableTopicConfig clientReliableTopicConfig , String name ) { clientReliableTopicConfig . setName ( name ) ; } } ) ; } | Gets the ClientReliableTopicConfig for a given reliable topic name . |
8,860 | public ClientConfig setLabels ( Set < String > labels ) { Preconditions . isNotNull ( labels , "labels" ) ; this . labels . clear ( ) ; this . labels . addAll ( labels ) ; return this ; } | Set labels for the client . Deletes old labels if added earlier . |
8,861 | public static Predicate [ ] acceptVisitor ( Predicate [ ] predicates , Visitor visitor , Indexes indexes ) { Predicate [ ] target = predicates ; boolean copyCreated = false ; for ( int i = 0 ; i < predicates . length ; i ++ ) { Predicate predicate = predicates [ i ] ; if ( predicate instanceof VisitablePredicate ) { Predicate transformed = ( ( VisitablePredicate ) predicate ) . accept ( visitor , indexes ) ; if ( transformed != predicate ) { if ( ! copyCreated ) { copyCreated = true ; target = createCopy ( target ) ; } target [ i ] = transformed ; } } } return target ; } | Accept visitor by all predicates . It treats the input array as immutable . |
8,862 | public static PrettyPrint indentWithSpaces ( int number ) { if ( number < 0 ) { throw new IllegalArgumentException ( "number is negative" ) ; } char [ ] chars = new char [ number ] ; Arrays . fill ( chars , ' ' ) ; return new PrettyPrint ( chars ) ; } | Print every value on a separate line . Use the given number of spaces for indentation . |
8,863 | public static void delete ( File f ) { if ( ! f . exists ( ) ) { return ; } File [ ] subFiles = f . listFiles ( ) ; if ( subFiles != null ) { for ( File sf : subFiles ) { delete ( sf ) ; } } if ( ! f . delete ( ) ) { throw new HazelcastException ( "Failed to delete " + f ) ; } } | Ensures that the file described by the supplied parameter does not exist after the method returns . If the file didn t exist returns silently . If the file could not be deleted fails with an exception . If the file is a directory its children are recursively deleted . |
8,864 | public static void copyFile ( File source , File target , long sourceCount ) { if ( ! source . exists ( ) ) { throw new IllegalArgumentException ( "Source does not exist " + source . getAbsolutePath ( ) ) ; } if ( ! source . isFile ( ) ) { throw new IllegalArgumentException ( "Source is not a file " + source . getAbsolutePath ( ) ) ; } if ( ! target . exists ( ) && ! target . mkdirs ( ) ) { throw new HazelcastException ( "Could not create the target directory " + target . getAbsolutePath ( ) ) ; } final File destination = target . isDirectory ( ) ? new File ( target , source . getName ( ) ) : target ; FileInputStream in = null ; FileOutputStream out = null ; try { in = new FileInputStream ( source ) ; out = new FileOutputStream ( destination ) ; final FileChannel inChannel = in . getChannel ( ) ; final FileChannel outChannel = out . getChannel ( ) ; final long transferCount = sourceCount > 0 ? sourceCount : inChannel . size ( ) ; inChannel . transferTo ( 0 , transferCount , outChannel ) ; } catch ( Exception e ) { throw new HazelcastException ( "Error occurred while copying file" , e ) ; } finally { closeResource ( in ) ; closeResource ( out ) ; } } | Copies source file to target and creates the target if necessary . The target can be a directory or file . If the target is a file nests the new file under the target directory otherwise copies to the given target . |
8,865 | public ClientNetworkConfig addAddress ( String ... addresses ) { isNotNull ( addresses , "addresses" ) ; for ( String address : addresses ) { isNotNull ( address , "address" ) ; checkHasText ( address . trim ( ) , "member must contain text" ) ; } Collections . addAll ( addressList , addresses ) ; return this ; } | Adds given addresses to candidate address list that client will use to establish initial connection |
8,866 | public ClientNetworkConfig setAddresses ( List < String > addresses ) { isNotNull ( addresses , "addresses" ) ; addressList . clear ( ) ; addressList . addAll ( addresses ) ; return this ; } | required for spring module |
8,867 | public ClientNetworkConfig addOutboundPort ( int port ) { if ( outboundPorts == null ) { outboundPorts = new HashSet < Integer > ( ) ; } outboundPorts . add ( port ) ; return this ; } | Add outbound port to the outbound port list |
8,868 | public ClientNetworkConfig addOutboundPortDefinition ( String portDef ) { if ( outboundPortDefinitions == null ) { outboundPortDefinitions = new HashSet < String > ( ) ; } outboundPortDefinitions . add ( portDef ) ; return this ; } | Add outbound port definition to the outbound port definition list |
8,869 | public Object run ( MetadataRaftGroupManager metadataGroupManager , long commitIndex ) { metadataGroupManager . triggerDestroyRaftGroup ( targetGroupId ) ; return targetGroupId ; } | Please note that targetGroupId is the Raft group that is being queried |
8,870 | private void doRun ( ) { if ( migrationInfo . startProcessing ( ) ) { try { if ( firstFragment ) { executeBeforeMigrations ( ) ; } for ( Operation migrationOperation : fragmentMigrationState . getMigrationOperations ( ) ) { runMigrationOperation ( migrationOperation ) ; } success = true ; } catch ( Throwable e ) { failureReason = e ; getLogger ( ) . severe ( "Error while executing replication operations " + migrationInfo , e ) ; } finally { afterMigrate ( ) ; } } else { logMigrationCancelled ( ) ; } } | Notifies services that migration started invokes all sent migration tasks and updates the replica versions . |
8,871 | public InternalIndex match ( String attribute , QueryContext . IndexMatchHint matchHint ) { Record record = registry . get ( attribute ) ; if ( record == null ) { return null ; } switch ( matchHint ) { case NONE : case PREFER_ORDERED : InternalIndex ordered = record . ordered ; return ordered == null ? record . unordered : ordered ; case PREFER_UNORDERED : InternalIndex unordered = record . unordered ; return unordered == null ? record . ordered : unordered ; default : throw new IllegalStateException ( "unexpected match hint: " + matchHint ) ; } } | Matches an index for the given attribute and match hint . |
8,872 | void removeRegistrations ( String topic ) { Collection < Registration > all = registrations . remove ( topic ) ; if ( all == null ) { return ; } for ( Registration reg : all ) { registrationIdMap . remove ( reg . getId ( ) ) ; pingNotifiableEventListener ( topic , reg , false ) ; } } | Removes all registrations for the specified topic and notifies the listeners and service of the listener deregistrations . |
8,873 | public void initEvictor ( ) { MapEvictionPolicy mapEvictionPolicy = getMapEvictionPolicy ( ) ; if ( mapEvictionPolicy == null ) { evictor = NULL_EVICTOR ; } else { MemoryInfoAccessor memoryInfoAccessor = getMemoryInfoAccessor ( ) ; EvictionChecker evictionChecker = new EvictionChecker ( memoryInfoAccessor , mapServiceContext ) ; NodeEngine nodeEngine = mapServiceContext . getNodeEngine ( ) ; IPartitionService partitionService = nodeEngine . getPartitionService ( ) ; int batchSize = nodeEngine . getProperties ( ) . getInteger ( MAP_EVICTION_BATCH_SIZE ) ; evictor = new EvictorImpl ( mapEvictionPolicy , evictionChecker , partitionService , batchSize ) ; } } | this method is overridden |
8,874 | ConstructorFunction < Void , RecordFactory > createRecordFactoryConstructor ( final SerializationService serializationService ) { return notUsedArg -> { switch ( mapConfig . getInMemoryFormat ( ) ) { case BINARY : return new DataRecordFactory ( mapConfig , serializationService , partitioningStrategy ) ; case OBJECT : return new ObjectRecordFactory ( mapConfig , serializationService ) ; default : throw new IllegalArgumentException ( "Invalid storage format: " + mapConfig . getInMemoryFormat ( ) ) ; } } ; } | overridden in different context |
8,875 | private void nodeNotOwnsBackup ( InternalPartitionImpl partition ) { int partitionId = getPartitionId ( ) ; int replicaIndex = getReplicaIndex ( ) ; NodeEngine nodeEngine = getNodeEngine ( ) ; ILogger logger = getLogger ( ) ; if ( logger . isFinestEnabled ( ) ) { int currentReplicaIndex = partition . getReplicaIndex ( PartitionReplica . from ( nodeEngine . getLocalMember ( ) ) ) ; logger . finest ( "This node is not backup replica of partitionId=" + partitionId + ", replicaIndex=" + replicaIndex + " anymore. current replicaIndex=" + currentReplicaIndex ) ; } if ( operations != null ) { PartitionReplica replica = partition . getReplica ( replicaIndex ) ; Member targetMember = null ; if ( replica != null ) { ClusterServiceImpl clusterService = ( ClusterServiceImpl ) nodeEngine . getClusterService ( ) ; targetMember = clusterService . getMember ( replica . address ( ) , replica . uuid ( ) ) ; } Throwable throwable = new WrongTargetException ( nodeEngine . getLocalMember ( ) , targetMember , partitionId , replicaIndex , getClass ( ) . getName ( ) ) ; for ( Operation op : operations ) { prepareOperation ( op ) ; onOperationFailure ( op , throwable ) ; } } } | Fail all replication operations with the exception that this node is no longer the replica with the sent index |
8,876 | @ SuppressWarnings ( "checkstyle:magicnumber" ) void writeLong ( long value ) { if ( value == Long . MIN_VALUE ) { write ( STR_LONG_MIN_VALUE ) ; return ; } if ( value < 0 ) { write ( '-' ) ; value = - value ; } int digitsWithoutComma = 0 ; tmpSb . setLength ( 0 ) ; do { digitsWithoutComma ++ ; if ( digitsWithoutComma == 4 ) { tmpSb . append ( ',' ) ; digitsWithoutComma = 1 ; } int mod = ( int ) ( value % 10 ) ; tmpSb . append ( DIGITS [ mod ] ) ; value = value / 10 ; } while ( value > 0 ) ; for ( int k = tmpSb . length ( ) - 1 ; k >= 0 ; k -- ) { char c = tmpSb . charAt ( k ) ; write ( c ) ; } } | we can t rely on NumberFormat since it generates a ton of garbage |
8,877 | private void appendDateTime ( long epochMillis ) { date . setTime ( epochMillis ) ; calendar . setTime ( date ) ; appendDate ( ) ; write ( ' ' ) ; appendTime ( ) ; } | we can t rely on DateFormat since it generates a ton of garbage |
8,878 | public NearCacheConfig setInMemoryFormat ( String inMemoryFormat ) { checkNotNull ( inMemoryFormat , "In-Memory format cannot be null!" ) ; this . inMemoryFormat = InMemoryFormat . valueOf ( inMemoryFormat ) ; return this ; } | this setter is for reflection based configuration building |
8,879 | private boolean checkPartitionOwner ( ) { InternalPartitionServiceImpl partitionService = getService ( ) ; PartitionStateManager partitionStateManager = partitionService . getPartitionStateManager ( ) ; InternalPartitionImpl partition = partitionStateManager . getPartitionImpl ( getPartitionId ( ) ) ; PartitionReplica owner = partition . getOwnerReplicaOrNull ( ) ; NodeEngine nodeEngine = getNodeEngine ( ) ; if ( owner == null || ! owner . isIdentical ( nodeEngine . getLocalMember ( ) ) ) { ILogger logger = getLogger ( ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "This node is not owner partition. Cannot process request. partitionId=" + getPartitionId ( ) + ", replicaIndex=" + getReplicaIndex ( ) + ", namespaces=" + namespaces ) ; } return false ; } return true ; } | Checks if we are the primary owner of the partition . |
8,880 | private void sendRetryResponse ( ) { NodeEngine nodeEngine = getNodeEngine ( ) ; int partitionId = getPartitionId ( ) ; int replicaIndex = getReplicaIndex ( ) ; PartitionReplicaSyncRetryResponse response = new PartitionReplicaSyncRetryResponse ( namespaces ) ; response . setPartitionId ( partitionId ) . setReplicaIndex ( replicaIndex ) ; Address target = getCallerAddress ( ) ; OperationService operationService = nodeEngine . getOperationService ( ) ; operationService . send ( response , target ) ; } | Send a response to the replica to retry the replica sync |
8,881 | private void sendResponse ( Collection < Operation > operations , ServiceNamespace ns ) { NodeEngine nodeEngine = getNodeEngine ( ) ; PartitionReplicaSyncResponse syncResponse = createResponse ( operations , ns ) ; Address target = getCallerAddress ( ) ; ILogger logger = getLogger ( ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Sending sync response to -> " + target + " for partitionId=" + getPartitionId ( ) + ", replicaIndex=" + getReplicaIndex ( ) + ", namespaces=" + ns ) ; } syncResponse . setTarget ( target ) ; OperationService operationService = nodeEngine . getOperationService ( ) ; operationService . send ( syncResponse , target ) ; } | Send a synchronization response to the caller replica containing the replication operations to be executed |
8,882 | protected boolean isConfigLocation ( URI location ) { String scheme = location . getScheme ( ) ; if ( scheme == null ) { try { String resolvedPlaceholder = getProperty ( location . getRawSchemeSpecificPart ( ) ) ; if ( resolvedPlaceholder == null ) { return false ; } location = new URI ( resolvedPlaceholder ) ; scheme = location . getScheme ( ) ; } catch ( URISyntaxException e ) { return false ; } } return ( scheme != null && SUPPORTED_SCHEMES . contains ( scheme . toLowerCase ( StringUtil . LOCALE_INTERNAL ) ) ) ; } | from which Config objects can be initialized |
8,883 | private void addJavaScriptEngine ( List < String > factoryCandidates ) { factoryCandidates . add ( OSGiScriptEngineFactory . class . getName ( ) ) ; if ( ClassLoaderUtil . isClassDefined ( RHINO_SCRIPT_ENGINE_FACTORY ) ) { factoryCandidates . add ( RHINO_SCRIPT_ENGINE_FACTORY ) ; } else if ( ClassLoaderUtil . isClassDefined ( NASHORN_SCRIPT_ENGINE_FACTORY ) ) { factoryCandidates . add ( NASHORN_SCRIPT_ENGINE_FACTORY ) ; } else { logger . warning ( "No built-in JavaScript ScriptEngineFactory found." ) ; } } | Adds the JDK build - in JavaScript engine into the given list of scripting engine factories . |
8,884 | public CallStatus call ( ) throws Exception { if ( this instanceof BlockingOperation ) { BlockingOperation blockingOperation = ( BlockingOperation ) this ; if ( blockingOperation . shouldWait ( ) ) { return WAIT ; } } run ( ) ; return returnsResponse ( ) ? DONE_RESPONSE : DONE_VOID ; } | Call the operation and returns the CallStatus . |
8,885 | public final Operation setPartitionId ( int partitionId ) { this . partitionId = partitionId ; setFlag ( partitionId > Short . MAX_VALUE , BITMASK_PARTITION_ID_32_BIT ) ; return this ; } | Sets the partition ID . |
8,886 | final boolean deactivate ( ) { long c = callId ; if ( c <= 0 ) { return false ; } if ( CALL_ID . compareAndSet ( this , c , - c ) ) { return true ; } if ( callId > 0 ) { throw new IllegalStateException ( "Operation concurrently re-activated while executing deactivate(). " + this ) ; } return false ; } | Marks this operation as not involved in an ongoing invocation . |
8,887 | public boolean apply ( Map . Entry mapEntry ) { if ( predicate != null ) { return predicate . apply ( mapEntry ) ; } return true ; } | Used for delegating filtering to inner predicate . |
8,888 | void setAnchor ( int page , Map . Entry anchor ) { SimpleImmutableEntry anchorEntry = new SimpleImmutableEntry ( page , anchor ) ; int anchorCount = anchorList . size ( ) ; if ( page < anchorCount ) { anchorList . set ( page , anchorEntry ) ; } else if ( page == anchorCount ) { anchorList . add ( anchorEntry ) ; } else { throw new IllegalArgumentException ( "Anchor index is not correct, expected: " + page + " found: " + anchorCount ) ; } } | After each query an anchor entry is set for that page . The anchor entry is the last entry of the query . |
8,889 | void heartbeat ( ) { if ( ! clusterService . isJoined ( ) ) { return ; } checkClockDrift ( heartbeatIntervalMillis ) ; final long clusterTime = clusterClock . getClusterTime ( ) ; if ( clusterService . isMaster ( ) ) { heartbeatWhenMaster ( clusterTime ) ; } else { heartbeatWhenSlave ( clusterTime ) ; } } | Send heartbeats and calculate clock drift . This method is expected to be called periodically because it calculates the clock drift based on the expected and actual invocation period . |
8,890 | private void resetHeartbeats ( ) { QuorumServiceImpl quorumService = nodeEngine . getQuorumService ( ) ; long now = clusterClock . getClusterTime ( ) ; for ( MemberImpl member : clusterService . getMemberImpls ( ) ) { heartbeatFailureDetector . heartbeat ( member , now ) ; quorumService . onHeartbeat ( member , now ) ; } } | Reset all heartbeats to the current cluster time . Called when system clock jump is detected . |
8,891 | public void remove ( Endpoint follower ) { FollowerState removed = followerStates . remove ( follower ) ; assert removed != null : "Unknown follower " + follower ; } | Removes a follower from leader maintained state . |
8,892 | public long [ ] matchIndices ( ) { long [ ] indices = new long [ followerStates . size ( ) + 1 ] ; int ix = 0 ; for ( FollowerState state : followerStates . values ( ) ) { indices [ ix ++ ] = state . matchIndex ( ) ; } return indices ; } | Returns an array of match indices for all followers . Additionally an empty slot is added at the end of indices array for leader itself . |
8,893 | public boolean awaitConnected ( long timeout , TimeUnit unit ) throws InterruptedException { lock . lock ( ) ; try { if ( currentState . equals ( CLIENT_CONNECTED ) ) { return true ; } if ( currentState . equals ( SHUTTING_DOWN ) || currentState . equals ( SHUTDOWN ) ) { return false ; } long duration = unit . toNanos ( timeout ) ; while ( duration > 0 ) { duration = connectedCondition . awaitNanos ( duration ) ; if ( currentState . equals ( CLIENT_CONNECTED ) ) { return true ; } } return false ; } finally { lock . unlock ( ) ; } } | Waits until the client is connected to cluster or the timeout expires . Does not wait if the client is already shutting down or shutdown . |
8,894 | public boolean awaitDisconnected ( long timeout , TimeUnit unit ) throws InterruptedException { lock . lock ( ) ; try { if ( currentState . equals ( CLIENT_DISCONNECTED ) || currentState . equals ( SHUTTING_DOWN ) || currentState . equals ( SHUTDOWN ) ) { return true ; } long duration = unit . toNanos ( timeout ) ; while ( duration > 0 ) { duration = disconnectedCondition . awaitNanos ( duration ) ; if ( currentState . equals ( CLIENT_DISCONNECTED ) || currentState . equals ( SHUTTING_DOWN ) || currentState . equals ( SHUTDOWN ) ) { return true ; } } return false ; } finally { lock . unlock ( ) ; } } | Waits until the client is disconnected from the cluster or the timeout expires . Does not wait if the client is already shutting down or shutdown . |
8,895 | public void merge ( VectorClock other ) { for ( Entry < String , Long > entry : other . replicaTimestamps . entrySet ( ) ) { final String replicaId = entry . getKey ( ) ; final long mergingTimestamp = entry . getValue ( ) ; final long localTimestamp = replicaTimestamps . containsKey ( replicaId ) ? replicaTimestamps . get ( replicaId ) : Long . MIN_VALUE ; replicaTimestamps . put ( replicaId , Math . max ( localTimestamp , mergingTimestamp ) ) ; } } | Merges the provided vector clock into this one by taking the maximum of the logical timestamps for each replica . This method is not thread safe and concurrent access must be synchronized externally . |
8,896 | public boolean evict ( S evictableStore , EvictionPolicyEvaluator < A , E > evictionPolicyEvaluator , EvictionChecker evictionChecker , EvictionListener < A , E > evictionListener ) { if ( evictionChecker != null ) { if ( evictionChecker . isEvictionRequired ( ) ) { return evictInternal ( evictableStore , evictionPolicyEvaluator , evictionListener ) ; } else { return false ; } } else { return evictInternal ( evictableStore , evictionPolicyEvaluator , evictionListener ) ; } } | Does eviction if required . |
8,897 | private void registerWanPublisherMBeans ( WanReplicationService wanReplicationService ) { final Map < String , LocalWanStats > wanStats = wanReplicationService . getStats ( ) ; if ( wanStats == null ) { return ; } for ( Entry < String , LocalWanStats > replicationStatsEntry : wanStats . entrySet ( ) ) { final String wanReplicationName = replicationStatsEntry . getKey ( ) ; final LocalWanStats localWanStats = replicationStatsEntry . getValue ( ) ; final Map < String , LocalWanPublisherStats > publisherStats = localWanStats . getLocalWanPublisherStats ( ) ; for ( String targetGroupName : publisherStats . keySet ( ) ) { register ( new WanPublisherMBean ( wanReplicationService , wanReplicationName , targetGroupName , service ) ) ; } } } | Registers managed beans for all WAN publishers if any . |
8,898 | public V remove ( final int keyPartA , final int keyPartB ) { final long key = compoundKey ( keyPartA , keyPartB ) ; return map . remove ( key ) ; } | Remove a value from the map and return the value . |
8,899 | public void forEach ( final EntryConsumer < V > consumer ) { for ( Map . Entry < Long , V > entry : map . entrySet ( ) ) { Long compoundKey = entry . getKey ( ) ; final int keyPartA = ( int ) ( compoundKey >>> Integer . SIZE ) ; final int keyPartB = ( int ) ( compoundKey & LOWER_INT_MASK ) ; consumer . accept ( keyPartA , keyPartB , entry . getValue ( ) ) ; } } | Iterate over the entries of the map |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.