idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
8,900 | public void forEach ( final Consumer < V > consumer ) { for ( Map . Entry < Long , V > entry : map . entrySet ( ) ) { consumer . accept ( entry . getValue ( ) ) ; } } | Iterate over the values in the map |
8,901 | public static String removeModifierSuffix ( String fullName ) { int indexOfFirstOpeningToken = fullName . indexOf ( MODIFIER_OPENING_TOKEN ) ; if ( indexOfFirstOpeningToken == - 1 ) { return fullName ; } int indexOfSecondOpeningToken = fullName . lastIndexOf ( MODIFIER_OPENING_TOKEN ) ; if ( indexOfSecondOpeningToken != indexOfFirstOpeningToken ) { throw new IllegalArgumentException ( "Attribute name '" + fullName + "' is not valid as it contains more than one " + MODIFIER_OPENING_TOKEN ) ; } int indexOfFirstClosingToken = fullName . indexOf ( MODIFIER_CLOSING_TOKEN ) ; if ( indexOfFirstClosingToken != fullName . length ( ) - 1 ) { throw new IllegalArgumentException ( "Attribute name '" + fullName + "' is not valid as the last character is not " + MODIFIER_CLOSING_TOKEN ) ; } return fullName . substring ( 0 , indexOfFirstOpeningToken ) ; } | Remove modifier suffix from given fullName . |
8,902 | public static String getModifierSuffix ( String fullName , String baseName ) { if ( fullName . equals ( baseName ) ) { return null ; } int indexOfOpeningBracket = fullName . indexOf ( MODIFIER_OPENING_TOKEN ) ; return fullName . substring ( indexOfOpeningBracket , fullName . length ( ) ) ; } | Get modifier suffix if fullName contains any otherwise returns null . |
8,903 | public < T > T readDataAsObject ( ) throws IOException { Data data = readData ( ) ; return data == null ? null : ( T ) serializationService . toObject ( data ) ; } | a future optimization would be to skip the construction of the Data object . |
8,904 | private boolean isValidJoinMessage ( JoinMessage joinMessage ) { try { return validateJoinMessage ( joinMessage ) ; } catch ( ConfigMismatchException e ) { throw e ; } catch ( Exception e ) { return false ; } } | rethrows only on ConfigMismatchException ; in case of other exception returns false . |
8,905 | public void handleMasterResponse ( Address masterAddress , Address callerAddress ) { clusterServiceLock . lock ( ) ; try { if ( logger . isFineEnabled ( ) ) { logger . fine ( format ( "Handling master response %s from %s" , masterAddress , callerAddress ) ) ; } if ( clusterService . isJoined ( ) ) { if ( logger . isFineEnabled ( ) ) { logger . fine ( format ( "Ignoring master response %s from %s, this node is already joined" , masterAddress , callerAddress ) ) ; } return ; } if ( node . getThisAddress ( ) . equals ( masterAddress ) ) { logger . warning ( "Received my address as master address from " + callerAddress ) ; return ; } Address currentMaster = clusterService . getMasterAddress ( ) ; if ( currentMaster == null || currentMaster . equals ( masterAddress ) ) { setMasterAndJoin ( masterAddress ) ; return ; } if ( currentMaster . equals ( callerAddress ) ) { logger . warning ( format ( "Setting master to %s since %s says it is not master anymore" , masterAddress , currentMaster ) ) ; setMasterAndJoin ( masterAddress ) ; return ; } Connection conn = node . getEndpointManager ( MEMBER ) . getConnection ( currentMaster ) ; if ( conn != null && conn . isAlive ( ) ) { logger . info ( format ( "Ignoring master response %s from %s since this node has an active master %s" , masterAddress , callerAddress , currentMaster ) ) ; sendJoinRequest ( currentMaster , true ) ; } else { logger . warning ( format ( "Ambiguous master response! Received master response %s from %s. " + "This node has a master %s, but does not have an active connection to it. " + "Master field will be unset now." , masterAddress , callerAddress , currentMaster ) ) ; clusterService . setMasterAddress ( null ) ; } } finally { clusterServiceLock . unlock ( ) ; } } | Set master address if required . |
8,906 | private boolean shouldMergeTo ( Address thisAddress , Address targetAddress ) { String thisAddressStr = "[" + thisAddress . getHost ( ) + "]:" + thisAddress . getPort ( ) ; String targetAddressStr = "[" + targetAddress . getHost ( ) + "]:" + targetAddress . getPort ( ) ; if ( thisAddressStr . equals ( targetAddressStr ) ) { throw new IllegalArgumentException ( "Addresses should be different! This: " + thisAddress + ", Target: " + targetAddress ) ; } int result = thisAddressStr . compareTo ( targetAddressStr ) ; return result > 0 ; } | Determines whether this address should merge to target address and called when two sides are equal on all aspects . This is a pure function that must produce always the same output when called with the same parameters . This logic should not be changed otherwise compatibility will be broken . |
8,907 | public static TransactionContext getTransactionContext ( HazelcastInstance hazelcastInstance ) { TransactionContextHolder transactionContextHolder = ( TransactionContextHolder ) TransactionSynchronizationManager . getResource ( hazelcastInstance ) ; if ( transactionContextHolder == null ) { throw new NoTransactionException ( "No TransactionContext with actual transaction available for current thread" ) ; } return transactionContextHolder . getContext ( ) ; } | Returns the transaction context for the given Hazelcast instance bounded to the current thread . |
8,908 | protected MergePolicyConfig mergePolicyConfig ( boolean mergePolicyExist , String mergePolicy , int batchSize ) { if ( mergePolicyExist ) { MergePolicyConfig config = new MergePolicyConfig ( mergePolicy , batchSize ) ; return config ; } return new MergePolicyConfig ( ) ; } | returns a MergePolicyConfig based on given parameters if these exist or the default MergePolicyConfig |
8,909 | public void start ( ) { if ( ! raftIntegration . isReady ( ) ) { raftIntegration . schedule ( new Runnable ( ) { public void run ( ) { start ( ) ; } } , RAFT_NODE_INIT_DELAY_MILLIS , MILLISECONDS ) ; return ; } if ( logger . isFineEnabled ( ) ) { logger . fine ( "Starting raft node: " + localMember + " for " + groupId + " with " + state . memberCount ( ) + " members: " + state . members ( ) ) ; } raftIntegration . execute ( new PreVoteTask ( this , 0 ) ) ; scheduleLeaderFailureDetection ( ) ; } | Starts the periodic tasks such as voting leader failure - detection snapshot handling . |
8,910 | private void applyLogEntry ( LogEntry entry ) { if ( logger . isFineEnabled ( ) ) { logger . fine ( "Processing " + entry ) ; } Object response = null ; Object operation = entry . operation ( ) ; if ( operation instanceof RaftGroupCmd ) { if ( operation instanceof DestroyRaftGroupCmd ) { setStatus ( TERMINATED ) ; } else if ( operation instanceof UpdateRaftGroupMembersCmd ) { if ( state . lastGroupMembers ( ) . index ( ) < entry . index ( ) ) { setStatus ( UPDATING_GROUP_MEMBER_LIST ) ; UpdateRaftGroupMembersCmd op = ( UpdateRaftGroupMembersCmd ) operation ; updateGroupMembers ( entry . index ( ) , op . getMembers ( ) ) ; } assert status == UPDATING_GROUP_MEMBER_LIST : "STATUS: " + status ; assert state . lastGroupMembers ( ) . index ( ) == entry . index ( ) ; state . commitGroupMembers ( ) ; UpdateRaftGroupMembersCmd cmd = ( UpdateRaftGroupMembersCmd ) operation ; if ( cmd . getMember ( ) . equals ( localMember ) && cmd . getMode ( ) == MembershipChangeMode . REMOVE ) { setStatus ( STEPPED_DOWN ) ; invalidateFuturesUntil ( entry . index ( ) - 1 , new LeaderDemotedException ( localMember , null ) ) ; } else { setStatus ( ACTIVE ) ; } response = entry . index ( ) ; } else { response = new IllegalArgumentException ( "Invalid command: " + operation ) ; } } else { response = raftIntegration . runOperation ( operation , entry . index ( ) ) ; } if ( response == PostponedResponse . INSTANCE ) { return ; } completeFuture ( entry . index ( ) , response ) ; } | Applies the log entry by executing operation attached and set execution result to the related future if any available . |
8,911 | public void runQueryOperation ( Object operation , SimpleCompletableFuture resultFuture ) { long commitIndex = state . commitIndex ( ) ; Object result = raftIntegration . runOperation ( operation , commitIndex ) ; resultFuture . setResult ( result ) ; } | Executes query operation sets execution result to the future . |
8,912 | public boolean installSnapshot ( SnapshotEntry snapshot ) { long commitIndex = state . commitIndex ( ) ; if ( commitIndex > snapshot . index ( ) ) { logger . info ( "Ignored stale " + snapshot + ", commit index at: " + commitIndex ) ; return false ; } else if ( commitIndex == snapshot . index ( ) ) { logger . info ( "Ignored " + snapshot + " since commit index is same." ) ; return true ; } state . commitIndex ( snapshot . index ( ) ) ; int truncated = state . log ( ) . setSnapshot ( snapshot ) ; if ( truncated > 0 ) { logger . info ( truncated + " entries are truncated to install " + snapshot ) ; } raftIntegration . restoreSnapshot ( snapshot . operation ( ) , snapshot . index ( ) ) ; setStatus ( ACTIVE ) ; state . restoreGroupMembers ( snapshot . groupMembersLogIndex ( ) , snapshot . groupMembers ( ) ) ; printMemberState ( ) ; state . lastApplied ( snapshot . index ( ) ) ; invalidateFuturesUntil ( snapshot . index ( ) , new StaleAppendRequestException ( state . leader ( ) ) ) ; logger . info ( snapshot + " is installed." ) ; return true ; } | Restores the snapshot sent by the leader if it s not applied before . |
8,913 | public void updateGroupMembers ( long logIndex , Collection < Endpoint > members ) { state . updateGroupMembers ( logIndex , members ) ; printMemberState ( ) ; } | Updates Raft group members . |
8,914 | private int pollRemovedCountHolders ( AtomicReferenceArray < Queue < Integer > > removedCountHolders ) { int count = 0 ; for ( int i = 0 ; i < partitionCount ; i ++ ) { Queue < Integer > removalCounts = removedCountHolders . get ( i ) ; count += removalCounts . poll ( ) ; } return count ; } | should be called when noMissingMapWideEvent false otherwise polling can cause NPE |
8,915 | private void evictMap ( SampleableConcurrentHashMap < ? , ? > map , int triggeringEvictionSize , int afterEvictionSize ) { map . purgeStaleEntries ( ) ; int mapSize = map . size ( ) ; if ( mapSize - triggeringEvictionSize >= 0 ) { for ( SampleableConcurrentHashMap . SamplingEntry entry : map . getRandomSamples ( mapSize - afterEvictionSize ) ) { map . remove ( entry . getEntryKey ( ) ) ; } } } | It works on best effort basis . If multi - threaded calls involved it may evict all elements but it s unlikely . |
8,916 | public int drainTo ( Collection < E > collection ) { int size = queue . drainTo ( collection ) ; addCapacity ( - size ) ; return size ; } | Removes all elements from this queue and adds them to the given collection . |
8,917 | public static Data toHeapData ( Data data ) { if ( data == null || data instanceof HeapData ) { return data ; } return new HeapData ( data . toByteArray ( ) ) ; } | Converts Data to HeapData . Useful for offheap conversion . |
8,918 | public void setEntryCounting ( boolean enable ) { if ( enable ) { if ( ! entryCountingEnable ) { cacheContext . increaseEntryCount ( size ( ) ) ; } } else { if ( entryCountingEnable ) { cacheContext . decreaseEntryCount ( size ( ) ) ; } } this . entryCountingEnable = enable ; } | Called by only same partition thread . So there is no synchronization and visibility problem . |
8,919 | void setClusterTimeDiff ( long diff ) { if ( logger . isFineEnabled ( ) ) { logger . fine ( "Setting cluster time diff to " + diff + "ms." ) ; } if ( abs ( diff ) > abs ( maxClusterTimeDiff ) ) { maxClusterTimeDiff = diff ; } this . clusterTimeDiff = diff ; } | Set the cluster time diff and records the maximum observed cluster time diff |
8,920 | public static int calculateMaxPartitionSize ( int maxEntryCount , int partitionCount ) { final double balancedPartitionSize = ( double ) maxEntryCount / ( double ) partitionCount ; final double approximatedStdDev = Math . sqrt ( balancedPartitionSize ) ; int stdDevMultiplier ; if ( maxEntryCount <= STD_DEV_OF_5_THRESHOLD ) { stdDevMultiplier = STD_DEV_MULTIPLIER_5 ; } else if ( maxEntryCount > STD_DEV_OF_5_THRESHOLD && maxEntryCount <= MAX_ENTRY_COUNT_FOR_THRESHOLD_USAGE ) { stdDevMultiplier = STD_DEV_MULTIPLIER_3 ; } else { stdDevMultiplier = 0 ; } return ( int ) ( ( approximatedStdDev * stdDevMultiplier ) + balancedPartitionSize ) ; } | for calculating the estimated max size . |
8,921 | private void ensureQuorumPresent ( Operation op ) { QuorumServiceImpl quorumService = operationService . nodeEngine . getQuorumService ( ) ; quorumService . ensureQuorumPresent ( op ) ; } | Ensures that the quorum is present if the quorum is configured and the operation service is quorum aware . |
8,922 | public final void run ( ) { int partitionCount = partitionService . getPartitionCount ( ) ; latch = new CountDownLatch ( partitionCount ) ; for ( int partitionId = 0 ; partitionId < partitionCount ; partitionId ++ ) { operationExecutor . execute ( new CollectContainerRunnable ( partitionId ) ) ; } try { latch . await ( ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } } | Collects the containers from the data structure in a thread - safe way . |
8,923 | public final void destroy ( ) { for ( Collection < C > containers : containersByPartitionId . values ( ) ) { for ( C container : containers ) { destroy ( container ) ; } } containersByPartitionId . clear ( ) ; onDestroy ( ) ; } | Destroys all collected containers . |
8,924 | private static int setAndGetAllListenerFlags ( ) { int listenerFlags = 0 ; EntryEventType [ ] values = EntryEventType . values ( ) ; for ( EntryEventType eventType : values ) { listenerFlags = listenerFlags | eventType . getType ( ) ; } return listenerFlags ; } | Sets and gets all listener flags . |
8,925 | protected void sendClientMessage ( Throwable throwable ) { if ( throwable instanceof ScheduledTaskResult . ExecutionExceptionDecorator ) { super . sendClientMessage ( throwable . getCause ( ) ) ; } else { super . sendClientMessage ( throwable ) ; } } | Exceptions may be wrapped in ExecutionExceptionDecorator the wrapped ExecutionException should be sent to the client . |
8,926 | private static void checkValidAlternative ( List < ClientConfig > alternativeClientConfigs ) { if ( alternativeClientConfigs . isEmpty ( ) ) { throw new InvalidConfigurationException ( "ClientFailoverConfig should have at least one client config." ) ; } ClientConfig mainConfig = alternativeClientConfigs . get ( 0 ) ; for ( ClientConfig alternativeClientConfig : alternativeClientConfigs . subList ( 1 , alternativeClientConfigs . size ( ) ) ) { checkValidAlternative ( mainConfig , alternativeClientConfig ) ; } } | For a client to be valid alternative all configurations should be equal except GroupConfig SecurityConfig Discovery related parts of NetworkConfig Credentials related configs |
8,927 | public static JsonArray array ( String ... strings ) { if ( strings == null ) { throw new NullPointerException ( "values is null" ) ; } JsonArray array = new JsonArray ( ) ; for ( String value : strings ) { array . add ( value ) ; } return array ; } | Creates a new JsonArray that contains the JSON representations of the given strings . |
8,928 | public static JsonValue parse ( String string ) { if ( string == null ) { throw new NullPointerException ( "string is null" ) ; } DefaultHandler handler = new DefaultHandler ( ) ; new JsonParser ( handler ) . parse ( string ) ; return handler . getValue ( ) ; } | Parses the given input string as JSON . The input must contain a valid JSON value optionally padded with whitespace . |
8,929 | public static long fastLongMix ( long k ) { final long phi = 0x9E3779B97F4A7C15L ; long h = k * phi ; h ^= h >>> 32 ; return h ^ ( h >>> 16 ) ; } | Hash function based on Knuth s multiplicative method . This version is faster than using Murmur hash but provides acceptable behavior . |
8,930 | private void flushAndRemoveQueryCaches ( PartitionMigrationEvent event ) { int partitionId = event . getPartitionId ( ) ; QueryCacheContext queryCacheContext = mapServiceContext . getQueryCacheContext ( ) ; PublisherContext publisherContext = queryCacheContext . getPublisherContext ( ) ; if ( event . getMigrationEndpoint ( ) == MigrationEndpoint . SOURCE ) { flushAccumulator ( publisherContext , partitionId ) ; removeAccumulator ( publisherContext , partitionId ) ; return ; } if ( isLocalPromotion ( event ) ) { removeAccumulator ( publisherContext , partitionId ) ; sendEndOfSequenceEvents ( publisherContext , partitionId ) ; return ; } } | Flush and remove query cache on this source partition . |
8,931 | public List < T > descendingKeys ( ) { List < T > list = new ArrayList < T > ( map . keySet ( ) ) ; sort ( list , new Comparator < T > ( ) { public int compare ( T o1 , T o2 ) { MutableLong l1 = map . get ( o1 ) ; MutableLong l2 = map . get ( o2 ) ; return compare ( l2 . value , l1 . value ) ; } private int compare ( long x , long y ) { return ( x < y ) ? - 1 : ( ( x == y ) ? 0 : 1 ) ; } } ) ; return list ; } | Returns a List of keys in descending value order . |
8,932 | public long get ( T item ) { MutableLong count = map . get ( item ) ; return count == null ? 0 : count . value ; } | Get current counter for an item item |
8,933 | public void set ( T item , long value ) { MutableLong entry = map . get ( item ) ; if ( entry == null ) { entry = MutableLong . valueOf ( value ) ; map . put ( item , entry ) ; total += value ; } else { total -= entry . value ; total += value ; entry . value = value ; } } | Set counter of item to value |
8,934 | public void add ( T item , long delta ) { MutableLong entry = map . get ( item ) ; if ( entry == null ) { entry = MutableLong . valueOf ( delta ) ; map . put ( item , entry ) ; } else { entry . value += delta ; } total += delta ; } | Add delta to the item |
8,935 | public long getAndSet ( T item , long value ) { MutableLong entry = map . get ( item ) ; if ( entry == null ) { entry = MutableLong . valueOf ( value ) ; map . put ( item , entry ) ; total += value ; return 0 ; } long oldValue = entry . value ; total = total - oldValue + value ; entry . value = value ; return oldValue ; } | Set counter for item and return previous value |
8,936 | public boolean imbalanceDetected ( LoadImbalance imbalance ) { long min = imbalance . minimumLoad ; long max = imbalance . maximumLoad ; if ( min == Long . MIN_VALUE || max == Long . MAX_VALUE ) { return false ; } long lowerBound = ( long ) ( MIN_MAX_RATIO_MIGRATION_THRESHOLD * max ) ; return min < lowerBound ; } | Checks if an imbalance was detected in the system |
8,937 | public MigratablePipeline findPipelineToMigrate ( LoadImbalance imbalance ) { Set < ? extends MigratablePipeline > candidates = imbalance . getPipelinesOwnedBy ( imbalance . srcOwner ) ; long migrationThreshold = ( long ) ( ( imbalance . maximumLoad - imbalance . minimumLoad ) * MAXIMUM_NO_OF_EVENTS_AFTER_MIGRATION_COEFFICIENT ) ; MigratablePipeline candidate = null ; long loadInSelectedPipeline = 0 ; for ( MigratablePipeline pipeline : candidates ) { long load = imbalance . getLoad ( pipeline ) ; if ( load > loadInSelectedPipeline ) { if ( load < migrationThreshold ) { loadInSelectedPipeline = load ; candidate = pipeline ; } } } return candidate ; } | Attempt to find a pipeline to migrate to a new NioThread . |
8,938 | public void addEventData ( CacheEventData cacheEventData ) { if ( events == null ) { events = new HashSet < CacheEventData > ( ) ; } this . events . add ( cacheEventData ) ; } | Helper method for adding multiple CacheEventData into this Set |
8,939 | public static < T > T newInstanceOrNull ( Class < ? extends T > clazz , Object ... params ) { Constructor < T > constructor = selectMatchingConstructor ( clazz , params ) ; if ( constructor == null ) { return null ; } try { return constructor . newInstance ( params ) ; } catch ( IllegalAccessException e ) { return null ; } catch ( InstantiationException e ) { return null ; } catch ( InvocationTargetException e ) { return null ; } } | Create a new instance of a given class . It will search for a constructor matching passed parameters . If a matching constructor is not found then it returns null . |
8,940 | public static < K , V > Map < K , V > aggregate ( Map < K , V > map1 , Map < K , V > map2 ) { return new AggregatingMap < K , V > ( map1 , map2 ) ; } | Creates new aggregating maps . |
8,941 | @ SuppressWarnings ( "unchecked" ) < T > ICompletableFuture < Map < Integer , T > > invokeAsync ( ) { assert ! invoked : "already invoked" ; invoked = true ; ensureNotCallingFromPartitionOperationThread ( ) ; invokeOnAllPartitions ( ) ; return future ; } | Executes all the operations on the partitions . |
8,942 | @ SuppressWarnings ( "unchecked" ) public < E > E read ( long sequence ) { checkReadSequence ( sequence ) ; return ( E ) items [ toIndex ( sequence ) ] ; } | Reads one item from the ringbuffer . |
8,943 | protected void markExpirable ( long expiryTime ) { if ( expiryTime > 0 && expiryTime < Long . MAX_VALUE ) { hasEntryWithExpiration = true ; } if ( isPrimary ( ) && hasEntryWithExpiration ) { cacheService . getExpirationManager ( ) . scheduleExpirationTask ( ) ; } } | This method marks current replica as expirable and also starts expiration task if necessary . |
8,944 | public static < T > T [ ] createCopy ( T [ ] src ) { return Arrays . copyOf ( src , src . length ) ; } | Create copy of the src array . |
8,945 | public static < T > T [ ] remove ( T [ ] src , T object ) { int index = indexOf ( src , object ) ; if ( index == - 1 ) { return src ; } T [ ] dst = ( T [ ] ) Array . newInstance ( src . getClass ( ) . getComponentType ( ) , src . length - 1 ) ; System . arraycopy ( src , 0 , dst , 0 , index ) ; if ( index < src . length - 1 ) { System . arraycopy ( src , index + 1 , dst , index , src . length - index - 1 ) ; } return dst ; } | Removes an item from the array . |
8,946 | public static < T > T [ ] append ( T [ ] array1 , T [ ] array2 ) { T [ ] dst = ( T [ ] ) Array . newInstance ( array1 . getClass ( ) . getComponentType ( ) , array1 . length + array2 . length ) ; System . arraycopy ( array1 , 0 , dst , 0 , array1 . length ) ; System . arraycopy ( array2 , 0 , dst , array1 . length , array2 . length ) ; return dst ; } | Appends 2 arrays . |
8,947 | public static < T > T [ ] replaceFirst ( T [ ] src , T oldValue , T [ ] newValues ) { int index = indexOf ( src , oldValue ) ; if ( index == - 1 ) { return src ; } T [ ] dst = ( T [ ] ) Array . newInstance ( src . getClass ( ) . getComponentType ( ) , src . length - 1 + newValues . length ) ; System . arraycopy ( src , 0 , dst , 0 , index ) ; System . arraycopy ( src , index + 1 , dst , index + newValues . length , src . length - index - 1 ) ; System . arraycopy ( newValues , 0 , dst , index , newValues . length ) ; return dst ; } | Replaces the first occurrence of the oldValue by the newValue . |
8,948 | public static String getInstanceName ( String instanceName , Config config ) { String name = instanceName ; if ( name == null || name . trim ( ) . length ( ) == 0 ) { name = createInstanceName ( config ) ; } return name ; } | Return real name for the hazelcast instance s instance |
8,949 | public VoteRequest toCandidate ( ) { role = RaftRole . CANDIDATE ; preCandidateState = null ; leaderState = null ; candidateState = new CandidateState ( majority ( ) ) ; candidateState . grantVote ( localEndpoint ) ; persistVote ( incrementTerm ( ) , localEndpoint ) ; return new VoteRequest ( localEndpoint , term , log . lastLogOrSnapshotTerm ( ) , log . lastLogOrSnapshotIndex ( ) ) ; } | Switches this node to candidate role . Clears pre - candidate and leader states . Initializes candidate state for current majority and grants vote for local endpoint as a candidate . |
8,950 | public void updateGroupMembers ( long logIndex , Collection < Endpoint > members ) { assert committedGroupMembers == lastGroupMembers : "Cannot update group members to: " + members + " at log index: " + logIndex + " because last group members: " + lastGroupMembers + " is different than committed group members: " + committedGroupMembers ; assert lastGroupMembers . index ( ) < logIndex : "Cannot update group members to: " + members + " at log index: " + logIndex + " because last group members: " + lastGroupMembers + " has a bigger log index." ; RaftGroupMembers newGroupMembers = new RaftGroupMembers ( logIndex , members , localEndpoint ) ; committedGroupMembers = lastGroupMembers ; lastGroupMembers = newGroupMembers ; if ( leaderState != null ) { for ( Endpoint endpoint : members ) { if ( ! committedGroupMembers . isKnownMember ( endpoint ) ) { leaderState . add ( endpoint , log . lastLogOrSnapshotIndex ( ) ) ; } } for ( Endpoint endpoint : committedGroupMembers . remoteMembers ( ) ) { if ( ! members . contains ( endpoint ) ) { leaderState . remove ( endpoint ) ; } } } } | Initializes the last applied group members with the members and logIndex . This method expects there s no uncommitted membership changes committed members are the same as the last applied members . |
8,951 | public void init ( boolean fromBackup ) { if ( ! fromBackup && store . isEnabled ( ) ) { Set < Long > keys = store . loadAllKeys ( ) ; if ( keys != null ) { long maxId = - 1 ; for ( Long key : keys ) { QueueItem item = new QueueItem ( this , key , null ) ; getItemQueue ( ) . offer ( item ) ; maxId = Math . max ( maxId , key ) ; } idGenerator = maxId + 1 ; } } } | Initializes the item queue with items from the queue store if the store is enabled and if item queue is not being initialized as a part of a backup operation . If the item queue is being initialized as a part of a backup operation then the operation is in charge of adding items to a queue and the items are not loaded from a queue store . |
8,952 | public void txnPollBackupReserve ( long itemId , String transactionId ) { QueueItem item = getBackupMap ( ) . remove ( itemId ) ; if ( item != null ) { txMap . put ( itemId , new TxQueueItem ( item ) . setPollOperation ( true ) . setTransactionId ( transactionId ) ) ; return ; } if ( txMap . remove ( itemId ) == null ) { logger . warning ( "Poll backup reserve failed, itemId: " + itemId + " is not found" ) ; } } | Makes a reservation for a poll operation . Should be executed as a part of the prepare phase for a transactional queue poll on the partition backup replica . The ID of the item being polled is determined by the partition owner . |
8,953 | public long offer ( Data data ) { QueueItem item = new QueueItem ( this , nextId ( ) , null ) ; if ( store . isEnabled ( ) ) { try { store . store ( item . getItemId ( ) , data ) ; } catch ( Exception e ) { throw new HazelcastException ( e ) ; } } if ( ! store . isEnabled ( ) || store . getMemoryLimit ( ) > getItemQueue ( ) . size ( ) ) { item . setData ( data ) ; } getItemQueue ( ) . offer ( item ) ; cancelEvictionIfExists ( ) ; return item . getItemId ( ) ; } | TX Methods Ends |
8,954 | public void offerBackup ( Data data , long itemId ) { QueueItem item = new QueueItem ( this , itemId , null ) ; if ( ! store . isEnabled ( ) || store . getMemoryLimit ( ) > getItemQueue ( ) . size ( ) ) { item . setData ( data ) ; } getBackupMap ( ) . put ( itemId , item ) ; } | Offers the item to the backup map . If the memory limit has been achieved the item data will not be kept in - memory . Executed on the backup replica |
8,955 | public void addAllBackup ( Map < Long , Data > dataMap ) { for ( Map . Entry < Long , Data > entry : dataMap . entrySet ( ) ) { QueueItem item = new QueueItem ( this , entry . getKey ( ) , null ) ; if ( ! store . isEnabled ( ) || store . getMemoryLimit ( ) > getItemQueue ( ) . size ( ) ) { item . setData ( entry . getValue ( ) ) ; } getBackupMap ( ) . put ( item . getItemId ( ) , item ) ; } } | Offers the items to the backup map in bulk . If the memory limit has been achieved the item data will not be kept in - memory . Executed on the backup replica |
8,956 | public void pollBackup ( long itemId ) { QueueItem item = getBackupMap ( ) . remove ( itemId ) ; if ( item != null ) { age ( item , Clock . currentTimeMillis ( ) ) ; } } | Polls an item on the backup replica . The item ID is predetermined when executing the poll operation on the partition owner . Executed on the backup replica |
8,957 | public long remove ( Data data ) { Iterator < QueueItem > iterator = getItemQueue ( ) . iterator ( ) ; while ( iterator . hasNext ( ) ) { QueueItem item = iterator . next ( ) ; if ( data . equals ( item . getData ( ) ) ) { if ( store . isEnabled ( ) ) { try { store . delete ( item . getItemId ( ) ) ; } catch ( Exception e ) { throw new HazelcastException ( e ) ; } } iterator . remove ( ) ; age ( item , Clock . currentTimeMillis ( ) ) ; scheduleEvictionIfEmpty ( ) ; return item . getItemId ( ) ; } } return - 1 ; } | iterates all items checks equality with data This method does not trigger store load . |
8,958 | public boolean contains ( Collection < Data > dataSet ) { for ( Data data : dataSet ) { boolean contains = false ; for ( QueueItem item : getItemQueue ( ) ) { if ( item . getData ( ) != null && item . getData ( ) . equals ( data ) ) { contains = true ; break ; } } if ( ! contains ) { return false ; } } return true ; } | Checks if the queue contains all items in the dataSet . This method does not trigger store load . |
8,959 | public List < Data > getAsDataList ( ) { List < Data > dataList = new ArrayList < Data > ( getItemQueue ( ) . size ( ) ) ; for ( QueueItem item : getItemQueue ( ) ) { if ( store . isEnabled ( ) && item . getData ( ) == null ) { try { load ( item ) ; } catch ( Exception e ) { throw new HazelcastException ( e ) ; } } dataList . add ( item . getData ( ) ) ; } return dataList ; } | Returns data in the queue . This method triggers store load . |
8,960 | public Deque < QueueItem > getItemQueue ( ) { if ( itemQueue == null ) { itemQueue = new LinkedList < QueueItem > ( ) ; if ( backupMap != null && ! backupMap . isEmpty ( ) ) { List < QueueItem > values = new ArrayList < QueueItem > ( backupMap . values ( ) ) ; Collections . sort ( values ) ; itemQueue . addAll ( values ) ; QueueItem lastItem = itemQueue . peekLast ( ) ; if ( lastItem != null ) { setId ( lastItem . itemId + ID_PROMOTION_OFFSET ) ; } backupMap . clear ( ) ; backupMap = null ; } if ( ! txMap . isEmpty ( ) ) { long maxItemId = Long . MIN_VALUE ; for ( TxQueueItem item : txMap . values ( ) ) { maxItemId = Math . max ( maxItemId , item . itemId ) ; } setId ( maxItemId + ID_PROMOTION_OFFSET ) ; } } return itemQueue ; } | Returns the item queue on the partition owner . This method will also move the items from the backup map if this member has been promoted from a backup replica to the partition owner and clear the backup map . |
8,961 | public Map < Long , QueueItem > getBackupMap ( ) { if ( backupMap == null ) { if ( itemQueue != null ) { backupMap = createHashMap ( itemQueue . size ( ) ) ; for ( QueueItem item : itemQueue ) { backupMap . put ( item . getItemId ( ) , item ) ; } itemQueue . clear ( ) ; itemQueue = null ; } else { backupMap = new HashMap < Long , QueueItem > ( ) ; } } return backupMap ; } | Return the map containing queue items when this instance is a backup replica . The map contains both items that are parts of different transactions and items which have already been committed to the queue . |
8,962 | protected long [ ] createItemIdArray ( ) { int size = operationList . size ( ) ; long [ ] itemIds = new long [ size ] ; for ( int i = 0 ; i < size ; i ++ ) { CollectionTxnOperation operation = ( CollectionTxnOperation ) operationList . get ( i ) ; itemIds [ i ] = CollectionTxnUtil . getItemId ( operation ) ; } return itemIds ; } | Creates an array of IDs for all operations in this transaction log . The ID is negative if the operation is a remove operation . |
8,963 | private RingbufferConfig getRingbufferConfig ( RingbufferService service , ObjectNamespace ns ) { final String serviceName = ns . getServiceName ( ) ; if ( RingbufferService . SERVICE_NAME . equals ( serviceName ) ) { return service . getRingbufferConfig ( ns . getObjectName ( ) ) ; } else if ( MapService . SERVICE_NAME . equals ( serviceName ) ) { final MapService mapService = getNodeEngine ( ) . getService ( MapService . SERVICE_NAME ) ; final MapEventJournal journal = mapService . getMapServiceContext ( ) . getEventJournal ( ) ; final EventJournalConfig journalConfig = journal . getEventJournalConfig ( ns ) ; return journal . toRingbufferConfig ( journalConfig , ns ) ; } else if ( CacheService . SERVICE_NAME . equals ( serviceName ) ) { final CacheService cacheService = getNodeEngine ( ) . getService ( CacheService . SERVICE_NAME ) ; final CacheEventJournal journal = cacheService . getEventJournal ( ) ; final EventJournalConfig journalConfig = journal . getEventJournalConfig ( ns ) ; return journal . toRingbufferConfig ( journalConfig , ns ) ; } else { throw new IllegalArgumentException ( "Unsupported ringbuffer service name " + serviceName ) ; } } | Returns the ringbuffer config for the provided namespace . The namespace provides information whether the requested ringbuffer is a ringbuffer that the user is directly interacting with through a ringbuffer proxy or if this is a backing ringbuffer for an event journal . If a ringbuffer configuration for an event journal is requested this method will expect the configuration for the relevant map or cache to be available . |
8,964 | public void handle ( Data key , String sourceUuid , UUID partitionUuid , long sequence ) { if ( ! localUuid . equals ( sourceUuid ) ) { if ( key == null ) { nearCache . clear ( ) ; } else { nearCache . invalidate ( serializeKeys ? key : serializationService . toObject ( key ) ) ; } } int partitionId = getPartitionIdOrDefault ( key ) ; checkOrRepairUuid ( partitionId , partitionUuid ) ; checkOrRepairSequence ( partitionId , sequence , false ) ; } | Handles a single invalidation |
8,965 | public void handle ( Collection < Data > keys , Collection < String > sourceUuids , Collection < UUID > partitionUuids , Collection < Long > sequences ) { Iterator < Data > keyIterator = keys . iterator ( ) ; Iterator < Long > sequenceIterator = sequences . iterator ( ) ; Iterator < UUID > partitionUuidIterator = partitionUuids . iterator ( ) ; Iterator < String > sourceUuidsIterator = sourceUuids . iterator ( ) ; while ( keyIterator . hasNext ( ) && sourceUuidsIterator . hasNext ( ) && partitionUuidIterator . hasNext ( ) && sequenceIterator . hasNext ( ) ) { handle ( keyIterator . next ( ) , sourceUuidsIterator . next ( ) , partitionUuidIterator . next ( ) , sequenceIterator . next ( ) ) ; } } | Handles batch invalidations |
8,966 | @ SuppressWarnings ( "WeakerAccess" ) public void register ( int errorCode , Class clazz , ExceptionFactory exceptionFactory ) { if ( intToFactory . containsKey ( errorCode ) ) { throw new HazelcastException ( "Code " + errorCode + " already used" ) ; } if ( ! clazz . equals ( exceptionFactory . createException ( "" , null ) . getClass ( ) ) ) { throw new HazelcastException ( "Exception factory did not produce an instance of expected class" ) ; } intToFactory . put ( errorCode , exceptionFactory ) ; } | method is used by Jet |
8,967 | protected boolean notHaveAnyExpirableRecord ( PartitionContainer partitionContainer ) { boolean notExist = true ; final ConcurrentMap < String , RecordStore > maps = partitionContainer . getMaps ( ) ; for ( RecordStore store : maps . values ( ) ) { if ( store . isExpirable ( ) ) { notExist = false ; break ; } } return notExist ; } | Here we check if that partition has any expirable record or not if no expirable record exists in that partition no need to fire an expiration operation . |
8,968 | public RestApiConfig enableGroups ( RestEndpointGroup ... endpointGroups ) { if ( endpointGroups != null ) { enabledGroups . addAll ( Arrays . asList ( endpointGroups ) ) ; } return this ; } | Enables provided REST endpoint groups . It doesn t replace already enabled groups . |
8,969 | public RestApiConfig disableGroups ( RestEndpointGroup ... endpointGroups ) { if ( endpointGroups != null ) { enabledGroups . removeAll ( Arrays . asList ( endpointGroups ) ) ; } return this ; } | Disables provided REST endpoint groups . |
8,970 | private int getLocalMemberListIndex ( ) { final Collection < Member > dataMembers = nodeEngine . getClusterService ( ) . getMembers ( DATA_MEMBER_SELECTOR ) ; int index = - 1 ; for ( Member dataMember : dataMembers ) { index ++ ; if ( dataMember . equals ( nodeEngine . getLocalMember ( ) ) ) { return index ; } } return index ; } | Returns the index of the local member in the membership list containing only data members . |
8,971 | public JsonArray add ( JsonValue value ) { if ( value == null ) { throw new NullPointerException ( "value is null" ) ; } values . add ( value ) ; return this ; } | Appends the specified JSON value to the end of this array . |
8,972 | public JsonArray set ( int index , JsonValue value ) { if ( value == null ) { throw new NullPointerException ( "value is null" ) ; } values . set ( index , value ) ; return this ; } | Replaces the element at the specified position in this array with the specified JSON value . |
8,973 | public Iterator < JsonValue > iterator ( ) { final Iterator < JsonValue > iterator = values . iterator ( ) ; return new Iterator < JsonValue > ( ) { public boolean hasNext ( ) { return iterator . hasNext ( ) ; } public JsonValue next ( ) { return iterator . next ( ) ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } | Returns an iterator over the values of this array in document order . The returned iterator cannot be used to modify this array . |
8,974 | public BufferBuilder append ( ClientProtocolBuffer srcBuffer , int srcOffset , int length ) { ensureCapacity ( length ) ; srcBuffer . getBytes ( srcOffset , protocolBuffer . byteArray ( ) , position , length ) ; position += length ; return this ; } | Append a source buffer to the end of the internal buffer resizing the internal buffer as required . |
8,975 | public void appendEntries ( LogEntry ... newEntries ) { int lastTerm = lastLogOrSnapshotTerm ( ) ; long lastIndex = lastLogOrSnapshotIndex ( ) ; if ( ! checkAvailableCapacity ( newEntries . length ) ) { throw new IllegalStateException ( "Not enough capacity! Capacity: " + logs . getCapacity ( ) + ", Size: " + logs . size ( ) + ", New entries: " + newEntries . length ) ; } for ( LogEntry entry : newEntries ) { if ( entry . term ( ) < lastTerm ) { throw new IllegalArgumentException ( "Cannot append " + entry + " since its term is lower than last log term: " + lastTerm ) ; } if ( entry . index ( ) != lastIndex + 1 ) { throw new IllegalArgumentException ( "Cannot append " + entry + " since its index is bigger than (lastLogIndex + 1): " + ( lastIndex + 1 ) ) ; } logs . add ( entry ) ; lastIndex ++ ; lastTerm = Math . max ( lastTerm , entry . term ( ) ) ; } } | Appends new entries to the Raft log . |
8,976 | public JsonValue parse ( Reader reader , int buffersize ) throws IOException { if ( reader == null ) { throw new NullPointerException ( "reader is null" ) ; } if ( buffersize <= 0 ) { throw new IllegalArgumentException ( "buffersize is zero or negative" ) ; } this . reader = reader ; buffer = new char [ buffersize ] ; bufferOffset = 0 ; index = 0 ; fill = 0 ; line = 1 ; lineOffset = 0 ; current = 0 ; captureStart = - 1 ; read ( ) ; return readValue ( ) ; } | Reads a single value from the given reader and parses it as JsonValue . The input must be pointing to the beginning of a JsonLiteral not JsonArray or JsonObject . |
8,977 | public static long getThreadId ( ) { final Long threadId = THREAD_LOCAL . get ( ) ; if ( threadId != null ) { return threadId ; } return Thread . currentThread ( ) . getId ( ) ; } | Get the thread ID . |
8,978 | public static String createThreadName ( String hzName , String name ) { checkNotNull ( name , "name can't be null" ) ; return "hz." + hzName + "." + name ; } | Creates the threadname with prefix and notation . |
8,979 | public V get ( SerializationService serializationService ) { if ( ! valueExists ) { assert serializationService != null ; value = serializationService . toObject ( serializedValue ) ; valueExists = true ; } return value ; } | get - or - deserialize - and - get |
8,980 | public DeferredValue < V > shallowCopy ( ) { if ( this == NULL_VALUE ) { return NULL_VALUE ; } DeferredValue < V > copy = new DeferredValue < V > ( ) ; if ( serializedValueExists ) { copy . serializedValueExists = true ; copy . serializedValue = serializedValue ; } if ( valueExists ) { copy . valueExists = true ; copy . value = value ; } return copy ; } | returns a new DeferredValue representing the same value as this |
8,981 | void updateMembers ( MembersView membersView ) { MemberMap currentMemberMap = memberMapRef . get ( ) ; Collection < MemberImpl > addedMembers = new LinkedList < > ( ) ; Collection < MemberImpl > removedMembers = new LinkedList < > ( ) ; ClusterHeartbeatManager clusterHeartbeatManager = clusterService . getClusterHeartbeatManager ( ) ; MemberImpl [ ] members = new MemberImpl [ membersView . size ( ) ] ; int memberIndex = 0 ; for ( MemberInfo memberInfo : membersView . getMembers ( ) ) { Address address = memberInfo . getAddress ( ) ; MemberImpl member = currentMemberMap . getMember ( address ) ; if ( member != null && member . getUuid ( ) . equals ( memberInfo . getUuid ( ) ) ) { member = createNewMemberImplIfChanged ( memberInfo , member ) ; members [ memberIndex ++ ] = member ; continue ; } if ( member != null ) { assert ! ( member . localMember ( ) && member . equals ( clusterService . getLocalMember ( ) ) ) : "Local " + member + " cannot be replaced with " + memberInfo ; removedMembers . add ( member ) ; } member = createMember ( memberInfo , memberInfo . getAttributes ( ) ) ; addedMembers . add ( member ) ; long now = clusterService . getClusterTime ( ) ; clusterHeartbeatManager . onHeartbeat ( member , now ) ; repairPartitionTableIfReturningMember ( member ) ; members [ memberIndex ++ ] = member ; } MemberMap newMemberMap = membersView . toMemberMap ( ) ; for ( MemberImpl member : currentMemberMap . getMembers ( ) ) { if ( ! newMemberMap . contains ( member . getAddress ( ) ) ) { removedMembers . add ( member ) ; } } setMembers ( MemberMap . createNew ( membersView . getVersion ( ) , members ) ) ; for ( MemberImpl member : removedMembers ) { closeConnection ( member . getAddress ( ) , "Member left event received from master" ) ; handleMemberRemove ( memberMapRef . get ( ) , member ) ; } clusterService . getClusterJoinManager ( ) . insertIntoRecentlyJoinedMemberSet ( addedMembers ) ; sendMembershipEvents ( currentMemberMap . getMembers ( ) , addedMembers ) ; removeFromMissingMembers ( members ) ; clusterHeartbeatManager . heartbeat ( ) ; clusterService . printMemberList ( ) ; node . getNodeExtension ( ) . scheduleClusterVersionAutoUpgrade ( ) ; } | handles both new and left members |
8,982 | protected String decrypt ( String encryptedStr ) throws Exception { String [ ] split = encryptedStr . split ( ":" ) ; checkTrue ( split . length == 3 , "Wrong format of the encrypted variable (" + encryptedStr + ")" ) ; byte [ ] salt = Base64 . getDecoder ( ) . decode ( split [ 0 ] . getBytes ( UTF8_CHARSET ) ) ; checkTrue ( salt . length == saltLengthBytes , "Salt length doesn't match." ) ; int iterations = Integer . parseInt ( split [ 1 ] ) ; byte [ ] encryptedVal = Base64 . getDecoder ( ) . decode ( split [ 2 ] . getBytes ( UTF8_CHARSET ) ) ; return new String ( transform ( Cipher . DECRYPT_MODE , encryptedVal , salt , iterations ) , UTF8_CHARSET ) ; } | Decrypts given encrypted variable . |
8,983 | public void shutdownInvocations ( ) { logger . finest ( "Shutting down invocations" ) ; invocationRegistry . shutdown ( ) ; invocationMonitor . shutdown ( ) ; inboundResponseHandlerSupplier . shutdown ( ) ; try { invocationMonitor . awaitTermination ( TERMINATION_TIMEOUT_MILLIS ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } } | Shuts down invocation infrastructure . New invocation requests will be rejected after shutdown and all pending invocations will be notified with a failure response . |
8,984 | public static void markPartitionAsIndexed ( int partitionId , InternalIndex [ ] indexes ) { for ( InternalIndex index : indexes ) { index . markPartitionAsIndexed ( partitionId ) ; } } | Marks the given partition as indexed by the given indexes . |
8,985 | public static void markPartitionAsUnindexed ( int partitionId , InternalIndex [ ] indexes ) { for ( InternalIndex index : indexes ) { index . markPartitionAsUnindexed ( partitionId ) ; } } | Marks the given partition as unindexed by the given indexes . |
8,986 | public void destroyIndexes ( ) { InternalIndex [ ] indexesSnapshot = getIndexes ( ) ; indexes = EMPTY_INDEXES ; compositeIndexes = EMPTY_INDEXES ; indexesByName . clear ( ) ; attributeIndexRegistry . clear ( ) ; converterCache . clear ( ) ; for ( InternalIndex index : indexesSnapshot ) { index . destroy ( ) ; } } | Destroys and then removes all the indexes from this indexes instance . |
8,987 | public void putEntry ( QueryableEntry queryableEntry , Object oldValue , Index . OperationSource operationSource ) { InternalIndex [ ] indexes = getIndexes ( ) ; for ( InternalIndex index : indexes ) { index . putEntry ( queryableEntry , oldValue , operationSource ) ; } } | Inserts a new queryable entry into this indexes instance or updates the existing one . |
8,988 | public void removeEntry ( Data key , Object value , Index . OperationSource operationSource ) { InternalIndex [ ] indexes = getIndexes ( ) ; for ( InternalIndex index : indexes ) { index . removeEntry ( key , value , operationSource ) ; } } | Removes the entry from this indexes instance identified by the given key and value . |
8,989 | @ SuppressWarnings ( "unchecked" ) public Set < QueryableEntry > query ( Predicate predicate ) { stats . incrementQueryCount ( ) ; if ( ! haveAtLeastOneIndex ( ) || ! ( predicate instanceof IndexAwarePredicate ) ) { return null ; } IndexAwarePredicate indexAwarePredicate = ( IndexAwarePredicate ) predicate ; QueryContext queryContext = queryContextProvider . obtainContextFor ( this ) ; if ( ! indexAwarePredicate . isIndexed ( queryContext ) ) { return null ; } Set < QueryableEntry > result = indexAwarePredicate . filter ( queryContext ) ; if ( result != null ) { stats . incrementIndexedQueryCount ( ) ; queryContext . applyPerQueryStats ( ) ; } return result ; } | Performs a query on this indexes instance using the given predicate . |
8,990 | private void verifyNodeStarted ( ) { NodeEngineImpl nodeEngine = ( NodeEngineImpl ) getNodeEngine ( ) ; nodeStartCompleted = nodeEngine . getNode ( ) . getNodeExtension ( ) . isStartCompleted ( ) ; if ( ! nodeStartCompleted ) { throw new IllegalStateException ( "Migration operation is received before startup is completed. " + "Sender: " + getCallerAddress ( ) ) ; } } | Verifies that the node startup is completed . |
8,991 | private void verifyPartitionStateVersion ( ) { InternalPartitionService partitionService = getService ( ) ; int localPartitionStateVersion = partitionService . getPartitionStateVersion ( ) ; if ( partitionStateVersion != localPartitionStateVersion ) { if ( getNodeEngine ( ) . getThisAddress ( ) . equals ( migrationInfo . getMaster ( ) ) ) { return ; } throw new PartitionStateVersionMismatchException ( partitionStateVersion , localPartitionStateVersion ) ; } } | Verifies that the sent partition state version matches the local version or this node is master . |
8,992 | final void verifyMaster ( ) { NodeEngine nodeEngine = getNodeEngine ( ) ; Address masterAddress = nodeEngine . getMasterAddress ( ) ; if ( ! migrationInfo . getMaster ( ) . equals ( masterAddress ) ) { throw new IllegalStateException ( "Migration initiator is not master node! => " + toString ( ) ) ; } if ( getMigrationParticipantType ( ) == MigrationParticipant . SOURCE && ! masterAddress . equals ( getCallerAddress ( ) ) ) { throw new IllegalStateException ( "Caller is not master node! => " + toString ( ) ) ; } } | Verifies that the local master is equal to the migration master . |
8,993 | private void verifyMigrationParticipant ( ) { Member localMember = getNodeEngine ( ) . getLocalMember ( ) ; if ( getMigrationParticipantType ( ) == MigrationParticipant . SOURCE ) { if ( migrationInfo . getSourceCurrentReplicaIndex ( ) == 0 && ! migrationInfo . getSource ( ) . isIdentical ( localMember ) ) { throw new IllegalStateException ( localMember + " is the migration source but has a different identity! Migration: " + migrationInfo ) ; } verifyPartitionOwner ( ) ; verifyExistingDestination ( ) ; } else if ( getMigrationParticipantType ( ) == MigrationParticipant . DESTINATION ) { if ( ! migrationInfo . getDestination ( ) . isIdentical ( localMember ) ) { throw new IllegalStateException ( localMember + " is the migration destination but has a different identity! Migration: " + migrationInfo ) ; } } } | Checks if the local member matches the migration source or destination if this node is the migration source or destination . |
8,994 | private void verifyPartitionOwner ( ) { InternalPartition partition = getPartition ( ) ; PartitionReplica owner = partition . getOwnerReplicaOrNull ( ) ; if ( owner == null ) { throw new RetryableHazelcastException ( "Cannot migrate at the moment! Owner of the partition is null => " + migrationInfo ) ; } if ( ! owner . isIdentical ( getNodeEngine ( ) . getLocalMember ( ) ) ) { throw new RetryableHazelcastException ( "Owner of partition is not this node! => " + toString ( ) ) ; } } | Verifies that this node is the owner of the partition . |
8,995 | final void verifyExistingDestination ( ) { PartitionReplica destination = migrationInfo . getDestination ( ) ; Member target = getNodeEngine ( ) . getClusterService ( ) . getMember ( destination . address ( ) , destination . uuid ( ) ) ; if ( target == null ) { throw new TargetNotMemberException ( "Destination of migration could not be found! => " + toString ( ) ) ; } } | Verifies that the destination is a cluster member . |
8,996 | private void verifyClusterState ( ) { NodeEngineImpl nodeEngine = ( NodeEngineImpl ) getNodeEngine ( ) ; ClusterState clusterState = nodeEngine . getClusterService ( ) . getClusterState ( ) ; if ( ! clusterState . isMigrationAllowed ( ) ) { throw new IllegalStateException ( "Cluster state does not allow migrations! " + clusterState ) ; } } | Verifies that the cluster is active . |
8,997 | void setActiveMigration ( ) { InternalPartitionServiceImpl partitionService = getService ( ) ; MigrationManager migrationManager = partitionService . getMigrationManager ( ) ; MigrationInfo currentActiveMigration = migrationManager . setActiveMigration ( migrationInfo ) ; if ( currentActiveMigration != null ) { if ( migrationInfo . equals ( currentActiveMigration ) ) { migrationInfo = currentActiveMigration ; return ; } throw new RetryableHazelcastException ( "Cannot set active migration to " + migrationInfo + ". Current active migration is " + currentActiveMigration ) ; } PartitionStateManager partitionStateManager = partitionService . getPartitionStateManager ( ) ; if ( ! partitionStateManager . trySetMigratingFlag ( migrationInfo . getPartitionId ( ) ) ) { throw new RetryableHazelcastException ( "Cannot set migrating flag, " + "probably previous migration's finalization is not completed yet." ) ; } } | Sets the active migration and the partition migration flag . |
8,998 | @ SuppressWarnings ( "unchecked" ) private < T > T extract ( I input ) { if ( attributePath == null ) { if ( input instanceof Map . Entry ) { return ( T ) ( ( Map . Entry ) input ) . getValue ( ) ; } } else if ( input instanceof Extractable ) { return ( T ) ( ( Extractable ) input ) . getAttributeValue ( attributePath ) ; } throw new IllegalArgumentException ( "Can't extract " + attributePath + " from the given input" ) ; } | Extract the value of the given attributePath from the given entry . |
8,999 | final void initInvocationTarget ( ) throws Exception { Member previousTargetMember = targetMember ; T target = getInvocationTarget ( ) ; if ( target == null ) { remote = false ; throw newTargetNullException ( ) ; } targetMember = toTargetMember ( target ) ; if ( targetMember != null ) { targetAddress = targetMember . getAddress ( ) ; } else { targetAddress = toTargetAddress ( target ) ; } memberListVersion = context . clusterService . getMemberListVersion ( ) ; if ( targetMember == null ) { if ( previousTargetMember != null ) { throw new MemberLeftException ( previousTargetMember ) ; } if ( ! ( isJoinOperation ( op ) || isWanReplicationOperation ( op ) ) ) { throw new TargetNotMemberException ( target , op . getPartitionId ( ) , op . getClass ( ) . getName ( ) , op . getServiceName ( ) ) ; } } if ( op instanceof TargetAware ) { ( ( TargetAware ) op ) . setTarget ( targetAddress ) ; } remote = ! context . thisAddress . equals ( targetAddress ) ; } | Initializes the invocation target . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.