idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
9,000 | void notifyBackupComplete ( ) { int newBackupAcksCompleted = BACKUP_ACKS_RECEIVED . incrementAndGet ( this ) ; Object pendingResponse = this . pendingResponse ; if ( pendingResponse == VOID ) { return ; } int backupAcksExpected = this . backupsAcksExpected ; if ( backupAcksExpected < newBackupAcksCompleted ) { return ; } if ( backupAcksExpected != newBackupAcksCompleted ) { return ; } complete ( pendingResponse ) ; } | this method can be called concurrently |
9,001 | boolean detectAndHandleTimeout ( long heartbeatTimeoutMillis ) { if ( skipTimeoutDetection ( ) ) { return false ; } HeartbeatTimeout heartbeatTimeout = detectTimeout ( heartbeatTimeoutMillis ) ; if ( heartbeatTimeout == TIMEOUT ) { complete ( HEARTBEAT_TIMEOUT ) ; return true ; } else { return false ; } } | Checks if this Invocation has received a heartbeat in time . |
9,002 | boolean detectAndHandleBackupTimeout ( long timeoutMillis ) { boolean backupsCompleted = backupsAcksExpected == backupsAcksReceived ; long responseReceivedMillis = pendingResponseReceivedMillis ; long expirationTime = responseReceivedMillis + timeoutMillis ; boolean timeout = expirationTime > 0 && expirationTime < Clock . currentTimeMillis ( ) ; boolean responseReceived = pendingResponse != VOID ; if ( backupsCompleted || ! responseReceived || ! timeout ) { return false ; } if ( shouldFailOnIndeterminateOperationState ( ) ) { complete ( new IndeterminateOperationStateException ( this + " failed because backup acks missed." ) ) ; return true ; } boolean targetDead = context . clusterService . getMember ( targetAddress ) == null ; if ( targetDead ) { resetAndReInvoke ( ) ; return false ; } complete ( pendingResponse ) ; return true ; } | gets called from the monitor - thread |
9,003 | public Class < ? > handleClassNotFoundException ( String name ) throws ClassNotFoundException { if ( ! enabled ) { throw new ClassNotFoundException ( "User Code Deployment is not enabled. Cannot find class " + name ) ; } return locator . handleClassNotFoundException ( name ) ; } | called by User Code Deployment classloader on this member |
9,004 | public ScheduledEntry < K , V > get ( K key ) { synchronized ( mutex ) { if ( scheduleType . equals ( ScheduleType . FOR_EACH ) ) { return getByCompositeKey ( key ) ; } Integer second = secondsOfKeys . get ( key ) ; if ( second != null ) { Map < Object , ScheduledEntry < K , V > > entries = scheduledEntries . get ( second ) ; if ( entries != null ) { return entries . get ( key ) ; } } return null ; } } | in the case of composite keys this method will return only one scheduled entry with no ordering guarantee |
9,005 | private void scanQuorums ( ) { for ( QuorumImpl quorum : quorums . values ( ) ) { if ( quorum . isHeartbeatAware ( ) ) { this . heartbeatAware = true ; } if ( quorum . isPingAware ( ) ) { this . pingAware = true ; } } } | scan quorums for heartbeat - aware and ping - aware implementations and set corresponding flags |
9,006 | protected final void initSrcBuffer ( int sizeBytes ) { ChannelOptions config = channel . options ( ) ; src = ( S ) newByteBuffer ( sizeBytes , config . getOption ( DIRECT_BUF ) ) ; } | Initializes the src buffer . Should only be called by InboundHandler implementations that have a ByteBuffer as source . |
9,007 | public String getQuorumName ( String cacheName ) { CacheConfig cacheConfig = getCacheConfig ( cacheName ) ; if ( cacheConfig == null ) { return null ; } return cacheConfig . getQuorumName ( ) ; } | Gets the name of the quorum associated with specified cache |
9,008 | AcquireResult acquire ( AcquireInvocationKey key , boolean wait ) { SemaphoreEndpoint endpoint = key . endpoint ( ) ; SessionSemaphoreState state = sessionStates . get ( key . sessionId ( ) ) ; if ( state != null && state . containsInvocation ( endpoint . threadId ( ) , key . invocationUid ( ) ) ) { return new AcquireResult ( key . permits ( ) , Collections . < AcquireInvocationKey > emptyList ( ) ) ; } Collection < AcquireInvocationKey > cancelled = cancelWaitKeys ( endpoint , key . invocationUid ( ) ) ; if ( ! isAvailable ( key . permits ( ) ) ) { if ( wait ) { addWaitKey ( endpoint , key ) ; } return new AcquireResult ( 0 , cancelled ) ; } assignPermitsToInvocation ( endpoint , key . invocationUid ( ) , key . permits ( ) ) ; return new AcquireResult ( key . permits ( ) , cancelled ) ; } | Assigns permits to the endpoint if sufficient number of permits are available . If there are no sufficient number of permits and the second argument is true a wait key is created and added to the wait queue . Permits are not assigned if the acquire request is a retry of a successful acquire request of a session - aware proxy . Permits are assigned again if the acquire request is a retry of a successful acquire request of a sessionless proxy . If the acquire request is a retry of an endpoint that resides in the wait queue with the same invocation uid a duplicate wait key is added to the wait queue because cancelling the previous wait key can cause the caller to fail . If the acquire request is a new request of an endpoint that resides in the wait queue with a different invocation uid the existing wait key is cancelled because it means the caller has stopped waiting for response of the previous invocation . |
9,009 | ReleaseResult change ( SemaphoreEndpoint endpoint , UUID invocationUid , int permits ) { if ( permits == 0 ) { return ReleaseResult . failed ( Collections . < AcquireInvocationKey > emptyList ( ) ) ; } Collection < AcquireInvocationKey > cancelled = cancelWaitKeys ( endpoint , invocationUid ) ; long sessionId = endpoint . sessionId ( ) ; if ( sessionId != NO_SESSION_ID ) { SessionSemaphoreState state = sessionStates . get ( sessionId ) ; if ( state == null ) { state = new SessionSemaphoreState ( ) ; sessionStates . put ( sessionId , state ) ; } long threadId = endpoint . threadId ( ) ; if ( state . containsInvocation ( threadId , invocationUid ) ) { Collection < AcquireInvocationKey > c = Collections . emptyList ( ) ; return ReleaseResult . successful ( c , c ) ; } state . invocationRefUids . put ( threadId , Tuple2 . of ( invocationUid , permits ) ) ; } available += permits ; initialized = true ; Collection < AcquireInvocationKey > acquired = permits > 0 ? assignPermitsToWaitKeys ( ) : Collections . < AcquireInvocationKey > emptyList ( ) ; return ReleaseResult . successful ( acquired , cancelled ) ; } | Changes the number of permits by adding the given permit value . Permits are not changed if it is a retry of a previous successful change request of a session - aware proxy . Permits are changed again if it is a retry of a successful change request of a sessionless proxy . If number of permits increase new assignments can be done . Returns completed wait keys after successful change if there are any . Returns cancelled wait keys of the same endpoint if there are any . |
9,010 | protected void onSessionClose ( long sessionId , Map < Long , Object > responses ) { SessionSemaphoreState state = sessionStates . get ( sessionId ) ; if ( state != null ) { if ( state . acquiredPermits > 0 ) { SemaphoreEndpoint endpoint = new SemaphoreEndpoint ( sessionId , 0 ) ; ReleaseResult result = release ( endpoint , newUnsecureUUID ( ) , state . acquiredPermits ) ; assert result . cancelled . isEmpty ( ) ; for ( AcquireInvocationKey key : result . acquired ) { responses . put ( key . commitIndex ( ) , Boolean . TRUE ) ; } } sessionStates . remove ( sessionId ) ; } } | Releases permits of the closed session . |
9,011 | public static long roundCapacity ( long requestedCapacity ) { if ( requestedCapacity > MAX_LONG_CAPACITY ) { throw new IllegalArgumentException ( requestedCapacity + " is greater than max allowed capacity[" + MAX_LONG_CAPACITY + "]." ) ; } return Math . max ( MIN_CAPACITY , QuickMath . nextPowerOfTwo ( requestedCapacity ) ) ; } | Round the capacity to the next allowed value . |
9,012 | public static int nextCapacity ( int current ) { assert current > 0 && Long . bitCount ( current ) == 1 : "Capacity must be a power of two." ; if ( current < MIN_CAPACITY / 2 ) { current = MIN_CAPACITY / 2 ; } current <<= 1 ; if ( current < 0 ) { throw new RuntimeException ( "Maximum capacity exceeded." ) ; } return current ; } | Returns the next possible capacity counting from the current buffers size . |
9,013 | int getNodeId ( long nanoTime ) { int nodeId = this . nodeId ; if ( nodeId != NODE_ID_OUT_OF_RANGE && nextNodeIdUpdate <= nanoTime ) { int newNodeId = getNodeEngine ( ) . getClusterService ( ) . getMemberListJoinVersion ( ) ; assert newNodeId >= 0 : "newNodeId=" + newNodeId ; newNodeId += nodeIdOffset ; nextNodeIdUpdate = nanoTime + NODE_ID_UPDATE_INTERVAL_NS ; if ( newNodeId != nodeId ) { nodeId = newNodeId ; if ( ( nodeId & - 1 << BITS_NODE_ID ) != 0 ) { outOfRangeMembers . add ( getNodeEngine ( ) . getClusterService ( ) . getLocalMember ( ) . getUuid ( ) ) ; logger . severe ( "Node ID is out of range (" + nodeId + "), this member won't be able to generate IDs. " + "Cluster restart is recommended." ) ; nodeId = NODE_ID_OUT_OF_RANGE ; } this . nodeId = nodeId ; if ( logger . isFineEnabled ( ) ) { logger . fine ( "Node ID assigned to '" + name + "': " + nodeId ) ; } } } return nodeId ; } | package - visible for tests |
9,014 | public static JsonSchemaNode createSchema ( JsonParser parser ) throws IOException { JsonSchemaNode dummy = new JsonSchemaStructNode ( null ) ; JsonSchemaStructNode parent = ( JsonSchemaStructNode ) dummy ; JsonToken currentToken = parser . nextToken ( ) ; int nameLocation = - 1 ; if ( currentToken == null ) { return null ; } while ( currentToken != null ) { if ( currentToken . isStructStart ( ) ) { JsonSchemaStructNode structNode = new JsonSchemaStructNode ( parent ) ; JsonSchemaNameValue nameValue = new JsonSchemaNameValue ( nameLocation , structNode ) ; parent . addChild ( nameValue ) ; parent = structNode ; nameLocation = - 1 ; } else if ( currentToken == JsonToken . FIELD_NAME ) { nameLocation = ( int ) getTokenLocation ( parser ) ; } else if ( currentToken . isStructEnd ( ) ) { parent = parent . getParent ( ) ; nameLocation = - 1 ; } else { JsonSchemaTerminalNode terminalNode = new JsonSchemaTerminalNode ( parent ) ; terminalNode . setValueStartLocation ( ( int ) getTokenLocation ( parser ) ) ; JsonSchemaNameValue nameValue = new JsonSchemaNameValue ( nameLocation , terminalNode ) ; parent . addChild ( nameValue ) ; nameLocation = - 1 ; } currentToken = parser . nextToken ( ) ; } JsonSchemaNameValue nameValue = ( ( JsonSchemaStructNode ) dummy ) . getChild ( 0 ) ; if ( nameValue == null ) { return null ; } dummy = nameValue . getValue ( ) ; dummy . setParent ( null ) ; return dummy ; } | Creates a description out of a JsonValue . The parser must be pointing to the start of the input . |
9,015 | private static void checkMapMergePolicyWhenStatisticsAreDisabled ( String mergePolicyClass , List < Class > requiredMergeTypes ) { for ( Class < ? > requiredMergeType : requiredMergeTypes ) { if ( MergingLastStoredTime . class . isAssignableFrom ( requiredMergeType ) || MergingExpirationTime . class . isAssignableFrom ( requiredMergeType ) ) { throw new InvalidConfigurationException ( "The merge policy " + mergePolicyClass + " requires the merge type " + requiredMergeType . getName ( ) + ", which is just provided if the map statistics are enabled." ) ; } } } | Checks if the configured merge policy requires merge types which are just available if map statistics are enabled . |
9,016 | protected Throwable unwrap ( Throwable throwable ) { if ( throwable instanceof ExecutionException && throwable . getCause ( ) != null ) { return throwable . getCause ( ) ; } return throwable ; } | this method should not be needed ; but there is a difference between client and server how it handles async throwables |
9,017 | public final boolean complete ( Object value ) { for ( ; ; ) { final Object oldState = state ; if ( isDone ( oldState ) ) { warnIfSuspiciousDoubleCompletion ( oldState , value ) ; return false ; } if ( compareAndSetState ( oldState , value ) ) { onComplete ( ) ; unblockAll ( oldState , defaultExecutor ) ; return true ; } } } | Can be called multiple times but only the first answer will lead to the future getting triggered . All subsequent complete calls are ignored . |
9,018 | private void warnIfSuspiciousDoubleCompletion ( Object s0 , Object s1 ) { if ( s0 != s1 && ! ( s0 instanceof CancellationException ) && ! ( s1 instanceof CancellationException ) ) { logger . warning ( String . format ( "Future.complete(Object) on completed future. " + "Request: %s, current value: %s, offered value: %s" , invocationToString ( ) , s0 , s1 ) ) ; } } | received a response but before it cleans up itself it receives a HazelcastInstanceNotActiveException |
9,019 | static < K , V > CacheConfig < K , V > getCacheConfig ( HazelcastClientInstanceImpl client , String cacheName , String simpleCacheName ) { ClientMessage request = CacheGetConfigCodec . encodeRequest ( cacheName , simpleCacheName ) ; try { int partitionId = client . getClientPartitionService ( ) . getPartitionId ( cacheName ) ; ClientInvocation clientInvocation = new ClientInvocation ( client , request , cacheName , partitionId ) ; Future < ClientMessage > future = clientInvocation . invoke ( ) ; ClientMessage responseMessage = future . get ( ) ; SerializationService serializationService = client . getSerializationService ( ) ; return deserializeCacheConfig ( client , responseMessage , serializationService , clientInvocation ) ; } catch ( Exception e ) { throw rethrow ( e ) ; } } | Gets the cache configuration from the server . |
9,020 | static < K , V > CacheConfig < K , V > createCacheConfig ( HazelcastClientInstanceImpl client , CacheConfig < K , V > newCacheConfig ) { try { String nameWithPrefix = newCacheConfig . getNameWithPrefix ( ) ; int partitionId = client . getClientPartitionService ( ) . getPartitionId ( nameWithPrefix ) ; Object resolvedConfig = resolveCacheConfigWithRetry ( client , newCacheConfig , partitionId ) ; Data configData = client . getSerializationService ( ) . toData ( resolvedConfig ) ; ClientMessage request = CacheCreateConfigCodec . encodeRequest ( configData , true ) ; ClientInvocation clientInvocation = new ClientInvocation ( client , request , nameWithPrefix , partitionId ) ; Future < ClientMessage > future = clientInvocation . invoke ( ) ; final ClientMessage response = future . get ( ) ; final Data data = CacheCreateConfigCodec . decodeResponse ( response ) . response ; return resolveCacheConfig ( client , clientInvocation , data ) ; } catch ( Exception e ) { throw rethrow ( e ) ; } } | Creates a new cache configuration on Hazelcast members . |
9,021 | static int validateAndGetArrayQuantifierFromCurrentToken ( String token , String fullPath ) { String quantifier = extractArgumentsFromAttributeName ( token ) ; if ( quantifier == null ) { throw new IllegalArgumentException ( "Malformed quantifier in " + fullPath ) ; } int index = Integer . parseInt ( quantifier ) ; if ( index < 0 ) { throw new IllegalArgumentException ( "Array index " + index + " cannot be negative in " + fullPath ) ; } return index ; } | Extracts and validates the quantifier from the given path token |
9,022 | static int getPortableArrayCellPosition ( BufferObjectDataInput in , int streamPosition , int cellIndex ) throws IOException { return in . readInt ( streamPosition + cellIndex * Bits . INT_SIZE_IN_BYTES ) ; } | Calculates and reads the position of the Portable object stored in a Portable array under the given index . |
9,023 | static int getStreamPositionOfTheField ( FieldDefinition fd , BufferObjectDataInput in , int offset ) throws IOException { int pos = in . readInt ( offset + fd . getIndex ( ) * Bits . INT_SIZE_IN_BYTES ) ; short len = in . readShort ( pos ) ; return pos + Bits . SHORT_SIZE_IN_BYTES + len + 1 ; } | Calculates the position of the given field in the portable byte stream |
9,024 | static int getArrayLengthOfTheField ( FieldDefinition fd , BufferObjectDataInput in , int offset ) throws IOException { int originalPos = in . position ( ) ; try { int pos = getStreamPositionOfTheField ( fd , in , offset ) ; in . position ( pos ) ; return in . readInt ( ) ; } finally { in . position ( originalPos ) ; } } | Reads the length of the given array . It does not validate if the current position is actually an array - has to be taken care of by the caller . |
9,025 | static void validateFactoryAndClass ( FieldDefinition fd , int factoryId , int classId , String fullPath ) { if ( factoryId != fd . getFactoryId ( ) ) { throw new IllegalArgumentException ( "Invalid factoryId! Expected: " + fd . getFactoryId ( ) + ", Current: " + factoryId + " in path " + fullPath ) ; } if ( classId != fd . getClassId ( ) ) { throw new IllegalArgumentException ( "Invalid classId! Expected: " + fd . getClassId ( ) + ", Current: " + classId + " in path " + fullPath ) ; } } | Validates if the given factoryId and classId match the ones from the fieldDefinition |
9,026 | public static List < DiscoveryStrategyConfig > map ( List < AliasedDiscoveryConfig < ? > > aliasedDiscoveryConfigs ) { List < DiscoveryStrategyConfig > result = new ArrayList < DiscoveryStrategyConfig > ( ) ; for ( AliasedDiscoveryConfig config : aliasedDiscoveryConfigs ) { if ( config . isEnabled ( ) ) { result . add ( createDiscoveryStrategyConfig ( config ) ) ; } } return result ; } | Maps aliased discovery strategy configs into discovery strategy configs . |
9,027 | @ SuppressWarnings ( "checkstyle:npathcomplexity" ) public void verify ( ) { int countEnabled = 0 ; if ( getTcpIpConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getMulticastConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getAwsConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getGcpConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getAzureConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getKubernetesConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( getEurekaConfig ( ) . isEnabled ( ) ) { countEnabled ++ ; } if ( countEnabled > 1 ) { throw new InvalidConfigurationException ( "Multiple join configuration cannot be enabled at the same time. Enable only " + "one of: TCP/IP, Multicast, AWS, GCP, Azure, Kubernetes, or Eureka" ) ; } verifyDiscoveryProviderConfig ( ) ; } | Verifies this JoinConfig is valid . At most a single joiner should be active . |
9,028 | private void verifyDiscoveryProviderConfig ( ) { Collection < DiscoveryStrategyConfig > discoveryStrategyConfigs = discoveryConfig . getDiscoveryStrategyConfigs ( ) ; if ( discoveryStrategyConfigs . size ( ) > 0 ) { if ( getMulticastConfig ( ) . isEnabled ( ) ) { throw new InvalidConfigurationException ( "Multicast and DiscoveryProviders join can't be enabled at the same time" ) ; } } } | Verifies this JoinConfig is valid . When Discovery SPI enabled other discovery methods should be disabled |
9,029 | protected void removeAllRecordStoresOfAllMaps ( boolean onShutdown , boolean onRecordStoreDestroy ) { for ( PartitionContainer partitionContainer : partitionContainers ) { if ( partitionContainer != null ) { removeRecordStoresFromPartitionMatchingWith ( allRecordStores ( ) , partitionContainer . getPartitionId ( ) , onShutdown , onRecordStoreDestroy ) ; } } } | Removes all record stores from all partitions . |
9,030 | public static void unregisterCacheObject ( String cacheManagerName , String name , boolean stats ) { synchronized ( mBeanServer ) { ObjectName objectName = calculateObjectName ( cacheManagerName , name , stats ) ; Set < ObjectName > registeredObjectNames = mBeanServer . queryNames ( objectName , null ) ; if ( isRegistered ( cacheManagerName , name , stats ) ) { for ( ObjectName registeredObjectName : registeredObjectNames ) { try { mBeanServer . unregisterMBean ( registeredObjectName ) ; } catch ( InstanceNotFoundException e ) { ignore ( e ) ; } catch ( Exception e ) { throw new CacheException ( "Error unregistering object instance " + registeredObjectName + ". Error was " + e . getMessage ( ) , e ) ; } } } } } | UnRegisters the mxbean if registered already . |
9,031 | public MessageFlyweight set ( boolean value ) { buffer . putByte ( index + offset , ( byte ) ( value ? 1 : 0 ) ) ; index += Bits . BYTE_SIZE_IN_BYTES ; return this ; } | region SET Overloads |
9,032 | public boolean getBoolean ( ) { byte result = buffer . getByte ( index + offset ) ; index += Bits . BYTE_SIZE_IN_BYTES ; return result != 0 ; } | region GET Overloads |
9,033 | public static long readLongAttribute ( String attributeName , long defaultValue ) { try { String methodName = "get" + attributeName ; OperatingSystemMXBean systemMXBean = OPERATING_SYSTEM_MX_BEAN ; Method method = systemMXBean . getClass ( ) . getMethod ( methodName ) ; method . setAccessible ( true ) ; Object value = method . invoke ( systemMXBean ) ; if ( value == null ) { return defaultValue ; } if ( value instanceof Long ) { return ( Long ) value ; } if ( value instanceof Double ) { double v = ( Double ) value ; return Math . round ( v * PERCENTAGE_MULTIPLIER ) ; } if ( value instanceof Number ) { return ( ( Number ) value ) . longValue ( ) ; } } catch ( RuntimeException re ) { throw re ; } catch ( Exception ignored ) { ignore ( ignored ) ; } return defaultValue ; } | Reads a long attribute from OperatingSystemMXBean . |
9,034 | boolean addCompletedMigration ( MigrationInfo migrationInfo ) { if ( migrationInfo . getStatus ( ) != MigrationStatus . SUCCESS && migrationInfo . getStatus ( ) != MigrationStatus . FAILED ) { throw new IllegalArgumentException ( "Migration doesn't seem completed: " + migrationInfo ) ; } if ( migrationInfo . getInitialPartitionVersion ( ) <= 0 || migrationInfo . getPartitionVersionIncrement ( ) <= 0 ) { throw new IllegalArgumentException ( "Partition state versions are not set: " + migrationInfo ) ; } partitionServiceLock . lock ( ) ; try { boolean added = completedMigrations . add ( migrationInfo ) ; if ( added ) { stats . incrementCompletedMigrations ( ) ; } return added ; } finally { partitionServiceLock . unlock ( ) ; } } | Adds the migration to the set of completed migrations and increases the completed migration counter . Acquires the partition service lock to update the migrations . |
9,035 | private void evictCompletedMigrations ( Collection < MigrationInfo > migrations ) { partitionServiceLock . lock ( ) ; try { completedMigrations . removeAll ( migrations ) ; } finally { partitionServiceLock . unlock ( ) ; } } | Evicts completed migrations from the list |
9,036 | void triggerControlTask ( ) { migrationQueue . clear ( ) ; if ( ! node . getClusterService ( ) . isJoined ( ) ) { logger . fine ( "Node is not joined, will not trigger ControlTask" ) ; return ; } if ( ! node . isMaster ( ) ) { logger . fine ( "Node is not master, will not trigger ControlTask" ) ; return ; } migrationQueue . add ( new ControlTask ( ) ) ; if ( logger . isFinestEnabled ( ) ) { logger . finest ( "Migration queue is cleared and control task is scheduled" ) ; } } | Clears the migration queue and triggers the control task . Called on the master node . |
9,037 | void applyMigration ( InternalPartitionImpl partition , MigrationInfo migrationInfo ) { final PartitionReplica [ ] members = Arrays . copyOf ( partition . getReplicas ( ) , InternalPartition . MAX_REPLICA_COUNT ) ; if ( migrationInfo . getSourceCurrentReplicaIndex ( ) > - 1 ) { members [ migrationInfo . getSourceCurrentReplicaIndex ( ) ] = null ; } if ( migrationInfo . getDestinationCurrentReplicaIndex ( ) > - 1 ) { members [ migrationInfo . getDestinationCurrentReplicaIndex ( ) ] = null ; } members [ migrationInfo . getDestinationNewReplicaIndex ( ) ] = migrationInfo . getDestination ( ) ; if ( migrationInfo . getSourceNewReplicaIndex ( ) > - 1 ) { members [ migrationInfo . getSourceNewReplicaIndex ( ) ] = migrationInfo . getSource ( ) ; } partition . setReplicas ( members ) ; } | Mutates the partition state and applies the migration . |
9,038 | static Selector newSelector ( ILogger logger ) { checkNotNull ( logger , "logger" ) ; Selector selector ; try { selector = Selector . open ( ) ; } catch ( IOException e ) { throw new HazelcastException ( "Failed to open a Selector" , e ) ; } boolean optimize = Boolean . parseBoolean ( System . getProperty ( "hazelcast.io.optimizeselector" , "true" ) ) ; if ( optimize ) { optimize ( selector , logger ) ; } return selector ; } | Creates a new Selector and will optimize it if possible . |
9,039 | static SelectionKeysSet optimize ( Selector selector , ILogger logger ) { checkNotNull ( selector , "selector" ) ; checkNotNull ( logger , "logger" ) ; try { SelectionKeysSet set = new SelectionKeysSet ( ) ; Class < ? > selectorImplClass = findOptimizableSelectorClass ( selector ) ; if ( selectorImplClass == null ) { return null ; } Field selectedKeysField = selectorImplClass . getDeclaredField ( "selectedKeys" ) ; selectedKeysField . setAccessible ( true ) ; Field publicSelectedKeysField = selectorImplClass . getDeclaredField ( "publicSelectedKeys" ) ; publicSelectedKeysField . setAccessible ( true ) ; selectedKeysField . set ( selector , set ) ; publicSelectedKeysField . set ( selector , set ) ; logger . finest ( "Optimized Selector: " + selector . getClass ( ) . getName ( ) ) ; return set ; } catch ( Throwable t ) { logger . finest ( "Failed to optimize Selector: " + selector . getClass ( ) . getName ( ) , t ) ; return null ; } } | Tries to optimize the provided Selector . |
9,040 | private boolean isTryRecoverSucceeded ( ConcurrentMap < Integer , Long > brokenSequences ) { int numberOfBrokenSequences = brokenSequences . size ( ) ; InvokerWrapper invokerWrapper = context . getInvokerWrapper ( ) ; SubscriberContext subscriberContext = context . getSubscriberContext ( ) ; SubscriberContextSupport subscriberContextSupport = subscriberContext . getSubscriberContextSupport ( ) ; List < Future < Object > > futures = new ArrayList < > ( numberOfBrokenSequences ) ; for ( Map . Entry < Integer , Long > entry : brokenSequences . entrySet ( ) ) { Integer partitionId = entry . getKey ( ) ; Long sequence = entry . getValue ( ) ; Object recoveryOperation = subscriberContextSupport . createRecoveryOperation ( mapName , cacheId , sequence , partitionId ) ; Future < Object > future = ( Future < Object > ) invokerWrapper . invokeOnPartitionOwner ( recoveryOperation , partitionId ) ; futures . add ( future ) ; } Collection < Object > results = FutureUtil . returnWithDeadline ( futures , 1 , MINUTES ) ; int successCount = 0 ; for ( Object object : results ) { Boolean resolvedResponse = subscriberContextSupport . resolveResponseForRecoveryOperation ( object ) ; if ( TRUE . equals ( resolvedResponse ) ) { successCount ++ ; } } return successCount == numberOfBrokenSequences ; } | This tries to reset cursor position of the accumulator to the supplied sequence if that sequence is still there it will be succeeded otherwise query cache content stays inconsistent . |
9,041 | @ SuppressWarnings ( "checkstyle:magicnumber" ) private void firstHeartbeat ( long firstHeartbeatEstimateMillis ) { long stdDeviationMillis = firstHeartbeatEstimateMillis / 4 ; heartbeatHistory . add ( firstHeartbeatEstimateMillis - stdDeviationMillis ) ; heartbeatHistory . add ( firstHeartbeatEstimateMillis + stdDeviationMillis ) ; } | bootstrap with 2 entries with rather high standard deviation |
9,042 | private double phi ( long timestampMillis ) { long timeDiffMillis ; double meanMillis ; double stdDeviationMillis ; synchronized ( heartbeatHistory ) { long lastTimestampMillis = lastHeartbeatMillis ; if ( lastTimestampMillis == NO_HEARTBEAT_TIMESTAMP ) { return 0.0 ; } timeDiffMillis = timestampMillis - lastTimestampMillis ; meanMillis = heartbeatHistory . mean ( ) ; stdDeviationMillis = ensureValidStdDeviation ( heartbeatHistory . stdDeviation ( ) ) ; } return phi ( timeDiffMillis , meanMillis + acceptableHeartbeatPauseMillis , stdDeviationMillis ) ; } | The suspicion level of the accrual failure detector . |
9,043 | public static List < String > parseDelimitedString ( String value , char delimiter , boolean trim ) { if ( value == null ) { value = "" ; } List < String > list = new ArrayList < String > ( ) ; StringBuilder sb = new StringBuilder ( ) ; int expecting = ( CHAR | DELIMITER | START_QUOTE ) ; for ( int i = 0 ; i < value . length ( ) ; i ++ ) { char character = value . charAt ( i ) ; boolean isEscaped = isEscaped ( value , i ) ; boolean isDelimiter = isDelimiter ( delimiter , character , isEscaped ) ; boolean isQuote = isQuote ( character , isEscaped ) ; if ( isDelimiter && ( ( expecting & DELIMITER ) > 0 ) ) { addPart ( list , sb , trim ) ; sb . delete ( 0 , sb . length ( ) ) ; expecting = ( CHAR | DELIMITER | START_QUOTE ) ; } else if ( isQuote && ( ( expecting & START_QUOTE ) > 0 ) ) { sb . append ( character ) ; expecting = CHAR | END_QUOTE ; } else if ( isQuote && ( ( expecting & END_QUOTE ) > 0 ) ) { sb . append ( character ) ; expecting = ( CHAR | START_QUOTE | DELIMITER ) ; } else if ( ( expecting & CHAR ) > 0 ) { sb . append ( character ) ; } else { String message = String . format ( "Invalid delimited string [%s] for delimiter: %s" , value , delimiter ) ; throw new IllegalArgumentException ( message ) ; } } if ( sb . length ( ) > 0 ) { addPart ( list , sb , trim ) ; } return list ; } | Parses delimited string and returns an array containing the tokens . This parser obeys quotes so the delimiter character will be ignored if it is inside of a quote . This method assumes that the quote character is not included in the set of delimiter characters . |
9,044 | public static void validateResults ( Map < Integer , Object > results ) { for ( Object result : results . values ( ) ) { if ( result != null && result instanceof CacheClearResponse ) { Object response = ( ( CacheClearResponse ) result ) . getResponse ( ) ; if ( response instanceof Throwable ) { ExceptionUtil . sneakyThrow ( ( Throwable ) response ) ; } } } } | Cache clear response validator loop on results to validate that no exception exists on the result map . Throws the first exception in the map . |
9,045 | public static < K , V > void validateNotNull ( K key , V value ) { checkNotNull ( key , NULL_KEY_IS_NOT_ALLOWED ) ; checkNotNull ( value , NULL_VALUE_IS_NOT_ALLOWED ) ; } | Validates that key value pair are both not null . |
9,046 | public static < K , V > void validateNotNull ( K key , V value1 , V value2 ) { checkNotNull ( key , NULL_KEY_IS_NOT_ALLOWED ) ; checkNotNull ( value1 , NULL_VALUE_IS_NOT_ALLOWED ) ; checkNotNull ( value2 , NULL_VALUE_IS_NOT_ALLOWED ) ; } | Validates that key and multi values are not null . |
9,047 | public static < K , V > void validateNotNull ( Map < ? extends K , ? extends V > map ) { checkNotNull ( map , "map is null" ) ; boolean containsNullKey = false ; boolean containsNullValue = false ; try { containsNullKey = map . containsKey ( null ) ; } catch ( NullPointerException e ) { ignore ( e ) ; } try { containsNullValue = map . containsValue ( null ) ; } catch ( NullPointerException e ) { ignore ( e ) ; } if ( containsNullKey ) { throw new NullPointerException ( NULL_KEY_IS_NOT_ALLOWED ) ; } if ( containsNullValue ) { throw new NullPointerException ( NULL_VALUE_IS_NOT_ALLOWED ) ; } } | This validator ensures that no key or value is null in the provided map . |
9,048 | public static < K > void validateConfiguredTypes ( CacheConfig cacheConfig , K key ) throws ClassCastException { Class keyType = cacheConfig . getKeyType ( ) ; validateConfiguredKeyType ( keyType , key ) ; } | Validates that the configured key matches the provided key . |
9,049 | public static < K , V > void validateConfiguredTypes ( CacheConfig cacheConfig , K key , V value ) throws ClassCastException { Class keyType = cacheConfig . getKeyType ( ) ; Class valueType = cacheConfig . getValueType ( ) ; validateConfiguredKeyType ( keyType , key ) ; validateConfiguredValueType ( valueType , value ) ; } | Validates the configured key and value types matches the provided key value types . |
9,050 | public static < K > void validateConfiguredKeyType ( Class < K > keyType , K key ) throws ClassCastException { if ( Object . class != keyType ) { if ( ! keyType . isAssignableFrom ( key . getClass ( ) ) ) { throw new ClassCastException ( "Key '" + key + "' is not assignable to " + keyType ) ; } } } | Validates the key with key type . |
9,051 | public static < V > void validateConfiguredValueType ( Class < V > valueType , V value ) throws ClassCastException { if ( Object . class != valueType ) { if ( ! valueType . isAssignableFrom ( value . getClass ( ) ) ) { throw new ClassCastException ( "Value '" + value + "' is not assignable to " + valueType ) ; } } } | Validates the value with value type . |
9,052 | boolean extendLeaseTime ( String caller , long threadId , long leaseTime ) { if ( ! isLockedBy ( caller , threadId ) ) { return false ; } this . blockReads = true ; if ( expirationTime < Long . MAX_VALUE ) { setExpirationTime ( expirationTime - Clock . currentTimeMillis ( ) + leaseTime ) ; } return true ; } | This method is used to extend the already locked resource in the prepare phase of the transactions . It also marks the resource true to block reads . |
9,053 | void init ( String path ) { this . path = checkHasText ( path , "path cannot be null or empty" ) ; this . index = 0 ; this . offset = 0 ; this . nextSplit = StringUtil . indexOf ( path , '.' , 0 ) ; this . token = null ; if ( nextSplit == 0 ) { throw new IllegalArgumentException ( "The path cannot begin with a dot: " + path ) ; } } | Inits the cursor with the given path and sets the current position to the first token . |
9,054 | public void publishInternal ( Object message ) { topicStats . incrementPublishes ( ) ; topicService . publishMessage ( name , message , multithreaded ) ; } | Publishes the message and increases the local statistics for the number of published messages . |
9,055 | private Version getClusterOrNodeVersion ( ) { if ( node . getClusterService ( ) != null && ! node . getClusterService ( ) . getClusterVersion ( ) . isUnknown ( ) ) { return node . getClusterService ( ) . getClusterVersion ( ) ; } else { String overriddenClusterVersion = node . getProperties ( ) . getString ( GroupProperty . INIT_CLUSTER_VERSION ) ; return ( overriddenClusterVersion != null ) ? MemberVersion . of ( overriddenClusterVersion ) . asVersion ( ) : node . getVersion ( ) . asVersion ( ) ; } } | otherwise if not overridden use current node s codebase version |
9,056 | public void parse ( String string ) { if ( string == null ) { throw new NullPointerException ( "string is null" ) ; } int bufferSize = Math . max ( MIN_BUFFER_SIZE , Math . min ( DEFAULT_BUFFER_SIZE , string . length ( ) ) ) ; try { parse ( new StringReader ( string ) , bufferSize ) ; } catch ( IOException exception ) { throw new RuntimeException ( exception ) ; } } | Parses the given input string . The input must contain a valid JSON value optionally padded with whitespace . |
9,057 | private List < Member > getNonLocalReplicaAddresses ( ) { final Collection < Member > dataMembers = nodeEngine . getClusterService ( ) . getMembers ( DATA_MEMBER_SELECTOR ) ; final ArrayList < Member > nonLocalDataMembers = new ArrayList < Member > ( dataMembers ) ; nonLocalDataMembers . remove ( nodeEngine . getLocalMember ( ) ) ; return nonLocalDataMembers ; } | Return the list of non - local CRDT replicas in the cluster . |
9,058 | public void setIndexStats ( Map < String , LocalIndexStatsImpl > indexStats ) { this . mutableIndexStats . clear ( ) ; if ( indexStats != null ) { this . mutableIndexStats . putAll ( indexStats ) ; } } | Sets the per - index stats of this map stats to the given per - index stats . |
9,059 | private int getMatchingPoint ( String pattern , String itemName ) { int index = pattern . indexOf ( '*' ) ; if ( index == - 1 ) { return - 1 ; } String firstPart = pattern . substring ( 0 , index ) ; if ( ! itemName . startsWith ( firstPart ) ) { return - 1 ; } String secondPart = pattern . substring ( index + 1 ) ; if ( ! itemName . endsWith ( secondPart ) ) { return - 1 ; } return firstPart . length ( ) + secondPart . length ( ) ; } | This method returns the higher value the better the matching is . |
9,060 | private int getMatchingPoint ( String pattern , String itemName ) { int index = pattern . indexOf ( '*' ) ; if ( index == - 1 ) { return - 1 ; } String firstPart = pattern . substring ( 0 , index ) ; int indexFirstPart = itemName . indexOf ( firstPart , 0 ) ; if ( indexFirstPart == - 1 ) { return - 1 ; } String secondPart = pattern . substring ( index + 1 ) ; int indexSecondPart = itemName . indexOf ( secondPart , index + 1 ) ; if ( indexSecondPart == - 1 ) { return - 1 ; } return firstPart . length ( ) + secondPart . length ( ) ; } | This method returns higher values the better the matching is . |
9,061 | public JsonGetterContext getContext ( String queryPath ) { JsonGetterContext context = internalCache . get ( queryPath ) ; if ( context != null ) { return context ; } context = new JsonGetterContext ( queryPath ) ; JsonGetterContext previousContextValue = internalCache . putIfAbsent ( queryPath , context ) ; if ( previousContextValue == null ) { cleanupIfNeccessary ( context ) ; return context ; } else { return previousContextValue ; } } | Returns an existing or newly created context for given query path . If maximum cache size is reached then some entries are evicted . The newly created entry is not evicted . |
9,062 | private void addStatsOfNoDataIncludedMaps ( Map statsPerMap ) { ProxyService proxyService = nodeEngine . getProxyService ( ) ; Collection < String > mapNames = proxyService . getDistributedObjectNames ( SERVICE_NAME ) ; for ( String mapName : mapNames ) { if ( ! statsPerMap . containsKey ( mapName ) ) { statsPerMap . put ( mapName , EMPTY_LOCAL_MAP_STATS ) ; } } } | Some maps may have a proxy but no data has been put yet . Think of one created a proxy but not put any data in it . By calling this method we are returning an empty stats object for those maps . This is helpful to monitor those kind of maps . |
9,063 | private void addReplicaStatsOf ( RecordStore recordStore , LocalMapOnDemandCalculatedStats onDemandStats ) { if ( ! hasRecords ( recordStore ) ) { return ; } long backupEntryCount = 0 ; long backupEntryMemoryCost = 0 ; int totalBackupCount = recordStore . getMapContainer ( ) . getTotalBackupCount ( ) ; for ( int replicaNumber = 1 ; replicaNumber <= totalBackupCount ; replicaNumber ++ ) { int partitionId = recordStore . getPartitionId ( ) ; Address replicaAddress = getReplicaAddress ( partitionId , replicaNumber , totalBackupCount ) ; if ( ! isReplicaAvailable ( replicaAddress , totalBackupCount ) ) { printWarning ( partitionId , replicaNumber ) ; continue ; } if ( isReplicaOnThisNode ( replicaAddress ) ) { backupEntryMemoryCost += recordStore . getOwnedEntryCost ( ) ; backupEntryCount += recordStore . size ( ) ; } } if ( NATIVE != recordStore . getMapContainer ( ) . getMapConfig ( ) . getInMemoryFormat ( ) ) { onDemandStats . incrementHeapCost ( backupEntryMemoryCost ) ; } onDemandStats . incrementBackupEntryMemoryCost ( backupEntryMemoryCost ) ; onDemandStats . incrementBackupEntryCount ( backupEntryCount ) ; onDemandStats . setBackupCount ( recordStore . getMapContainer ( ) . getMapConfig ( ) . getTotalBackupCount ( ) ) ; } | Calculates and adds replica partition stats . |
9,064 | private Address getReplicaAddress ( int partitionId , int replicaNumber , int backupCount ) { IPartition partition = partitionService . getPartition ( partitionId ) ; Address replicaAddress = partition . getReplicaAddress ( replicaNumber ) ; if ( replicaAddress == null ) { replicaAddress = waitForReplicaAddress ( replicaNumber , partition , backupCount ) ; } return replicaAddress ; } | Gets replica address . Waits if necessary . |
9,065 | private Address waitForReplicaAddress ( int replica , IPartition partition , int backupCount ) { int tryCount = RETRY_COUNT ; Address replicaAddress = null ; while ( replicaAddress == null && partitionService . getMaxAllowedBackupCount ( ) >= backupCount && tryCount -- > 0 ) { sleep ( ) ; replicaAddress = partition . getReplicaAddress ( replica ) ; } return replicaAddress ; } | Waits partition table update to get replica address if current replica address is null . |
9,066 | public static < E1 > ConcurrentConveyorSingleQueue < E1 > concurrentConveyorSingleQueue ( E1 submitterGoneItem , QueuedPipe < E1 > queue ) { return new ConcurrentConveyorSingleQueue < E1 > ( submitterGoneItem , queue ) ; } | Creates a new concurrent conveyor with a single queue . |
9,067 | public Operation prepareReplicationOperation ( PartitionReplicationEvent event ) { if ( event . getReplicaIndex ( ) > 1 ) { return null ; } List < XATransactionDTO > migrationData = new ArrayList < XATransactionDTO > ( ) ; InternalPartitionService partitionService = nodeEngine . getPartitionService ( ) ; for ( Map . Entry < SerializableXID , List < XATransaction > > entry : transactions . entrySet ( ) ) { SerializableXID xid = entry . getKey ( ) ; int partitionId = partitionService . getPartitionId ( xid ) ; List < XATransaction > xaTransactionList = entry . getValue ( ) ; for ( XATransaction xaTransaction : xaTransactionList ) { if ( partitionId == event . getPartitionId ( ) ) { migrationData . add ( new XATransactionDTO ( xaTransaction ) ) ; } } } if ( migrationData . isEmpty ( ) ) { return null ; } else { return new XaReplicationOperation ( migrationData , event . getPartitionId ( ) , event . getReplicaIndex ( ) ) ; } } | Migration related methods |
9,068 | private static Method getMethod ( Object source , String methodName , String name ) { try { Method method = source . getClass ( ) . getMethod ( methodName ) ; method . setAccessible ( true ) ; return method ; } catch ( Exception e ) { if ( LOGGER . isFinestEnabled ( ) ) { LOGGER . log ( Level . FINEST , "Unable to register OperatingSystemMXBean method " + methodName + " used for probe " + name , e ) ; } return null ; } } | Returns a method from the given source object . |
9,069 | public void setDiscoveryStrategyConfigs ( List < DiscoveryStrategyConfig > discoveryStrategyConfigs ) { this . discoveryStrategyConfigs = discoveryStrategyConfigs == null ? new ArrayList < DiscoveryStrategyConfig > ( 1 ) : discoveryStrategyConfigs ; } | Sets the strategy configurations for this discovery config . |
9,070 | static MapKeyLoader . Role assignRole ( boolean isPartitionOwner , boolean isMapNamePartition , boolean isMapNamePartitionFirstReplica ) { if ( isMapNamePartition ) { if ( isPartitionOwner ) { return MapKeyLoader . Role . SENDER ; } else { if ( isMapNamePartitionFirstReplica ) { return MapKeyLoader . Role . SENDER_BACKUP ; } else { return MapKeyLoader . Role . NONE ; } } } else { return isPartitionOwner ? MapKeyLoader . Role . RECEIVER : MapKeyLoader . Role . NONE ; } } | Returns the role for the map key loader based on the passed parameters . The partition owner of the map name partition is the sender . The first replica of the map name partition is the sender backup . Other partition owners are receivers and other partition replicas do not have a role . |
9,071 | public static String [ ] parseOutCompositeIndexComponents ( String name ) { String [ ] components = COMMA_PATTERN . split ( name , - 1 ) ; if ( components . length == 1 ) { return null ; } if ( components . length > MAX_INDEX_COMPONENTS ) { throw new IllegalArgumentException ( "Too many composite index attributes: " + name ) ; } Set < String > seenComponents = new HashSet < String > ( components . length ) ; for ( int i = 0 ; i < components . length ; ++ i ) { String component = PredicateUtils . canonicalizeAttribute ( components [ i ] ) ; components [ i ] = component ; if ( component . isEmpty ( ) ) { throw new IllegalArgumentException ( "Empty composite index attribute: " + name ) ; } if ( ! seenComponents . add ( component ) ) { throw new IllegalArgumentException ( "Duplicate composite index attribute: " + name ) ; } } return components ; } | Parses the given index name into components . |
9,072 | @ SuppressWarnings ( "unchecked" ) public < P extends DiagnosticsPlugin > P getPlugin ( Class < P > pluginClass ) { return ( P ) pluginsMap . get ( pluginClass ) ; } | Gets the plugin for a given plugin class . This method should be used if the plugin instance is required within some data - structure outside of the Diagnostics . |
9,073 | public boolean register ( Invocation invocation ) { final long callId ; boolean force = invocation . op . isUrgent ( ) || invocation . isRetryCandidate ( ) ; try { callId = force ? callIdSequence . forceNext ( ) : callIdSequence . next ( ) ; } catch ( HazelcastOverloadException e ) { throw new HazelcastOverloadException ( "Failed to start invocation due to overload: " + invocation , e ) ; } try { setCallId ( invocation . op , callId ) ; } catch ( IllegalStateException e ) { callIdSequence . complete ( ) ; throw e ; } invocations . put ( callId , invocation ) ; if ( ! alive ) { invocation . notifyError ( new HazelcastInstanceNotActiveException ( ) ) ; return false ; } return true ; } | Registers an invocation . |
9,074 | public static Permission getPermission ( String name , String serviceName , String ... actions ) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP . get ( serviceName ) ; if ( permissionFactory == null ) { throw new IllegalArgumentException ( "No permissions found for service: " + serviceName ) ; } return permissionFactory . create ( name , actions ) ; } | Creates a permission |
9,075 | protected final SplitBrainMergeCheckResult sendSplitBrainJoinMessageAndCheckResponse ( Address target , SplitBrainJoinMessage request ) { SplitBrainJoinMessage response = sendSplitBrainJoinMessage ( target , request ) ; return clusterService . getClusterJoinManager ( ) . shouldMerge ( response ) ; } | Sends a split brain join request to the target address and checks the response to see if this node should merge to the target address . |
9,076 | private SplitBrainJoinMessage sendSplitBrainJoinMessage ( Address target , SplitBrainJoinMessage request ) { if ( logger . isFineEnabled ( ) ) { logger . fine ( "Sending SplitBrainJoinMessage to " + target ) ; } Connection conn = node . getEndpointManager ( MEMBER ) . getOrConnect ( target , true ) ; long timeout = SPLIT_BRAIN_CONN_TIMEOUT_MILLIS ; while ( conn == null ) { timeout -= SPLIT_BRAIN_SLEEP_TIME_MILLIS ; if ( timeout < 0 ) { logger . fine ( "Returning null timeout<0, " + timeout ) ; return null ; } try { Thread . sleep ( SPLIT_BRAIN_SLEEP_TIME_MILLIS ) ; } catch ( InterruptedException e ) { currentThread ( ) . interrupt ( ) ; return null ; } conn = node . getEndpointManager ( MEMBER ) . getConnection ( target ) ; } NodeEngine nodeEngine = node . nodeEngine ; Future future = nodeEngine . getOperationService ( ) . createInvocationBuilder ( ClusterServiceImpl . SERVICE_NAME , new SplitBrainMergeValidationOp ( request ) , target ) . setTryCount ( 1 ) . invoke ( ) ; try { return ( SplitBrainJoinMessage ) future . get ( SPLIT_BRAIN_JOIN_CHECK_TIMEOUT_SECONDS , TimeUnit . SECONDS ) ; } catch ( TimeoutException e ) { logger . fine ( "Timeout during join check!" , e ) ; } catch ( Exception e ) { logger . warning ( "Error during join check!" , e ) ; } return null ; } | Sends a split brain join request to the target address and returns the response . |
9,077 | public static void setExpirationTimes ( long operationTTLMillis , long operationMaxIdleMillis , Record record , MapConfig mapConfig , boolean consultMapConfig ) { long ttlMillis = pickTTLMillis ( operationTTLMillis , record . getTtl ( ) , mapConfig , consultMapConfig ) ; long maxIdleMillis = pickMaxIdleMillis ( operationMaxIdleMillis , record . getMaxIdle ( ) , mapConfig , consultMapConfig ) ; record . setTtl ( ttlMillis ) ; record . setMaxIdle ( maxIdleMillis ) ; setExpirationTime ( record ) ; } | Updates records TTL and expiration time . |
9,078 | private static long pickTTLMillis ( long operationTTLMillis , long existingTTLMillis , MapConfig mapConfig , boolean consultMapConfig ) { if ( operationTTLMillis > 0 ) { return checkedTime ( operationTTLMillis ) ; } if ( consultMapConfig && operationTTLMillis < 0 && mapConfig . getTimeToLiveSeconds ( ) > 0 ) { return checkedTime ( SECONDS . toMillis ( mapConfig . getTimeToLiveSeconds ( ) ) ) ; } if ( operationTTLMillis < 0 ) { return checkedTime ( existingTTLMillis ) ; } return Long . MAX_VALUE ; } | Decides if TTL millis should to be set on record . |
9,079 | public static long calculateExpirationWithDelay ( long timeInMillis , long delayMillis , boolean backup ) { checkNotNegative ( timeInMillis , "timeInMillis can't be negative" ) ; if ( backup ) { long delayedTime = timeInMillis + delayMillis ; if ( delayedTime < 0 ) { return Long . MAX_VALUE ; } else { return delayedTime ; } } return timeInMillis ; } | On backup partitions this method delays key s expiration . |
9,080 | AcquireResult acquire ( LockInvocationKey key , boolean wait ) { LockEndpoint endpoint = key . endpoint ( ) ; UUID invocationUid = key . invocationUid ( ) ; RaftLockOwnershipState memorized = ownerInvocationRefUids . get ( Tuple2 . of ( endpoint , invocationUid ) ) ; if ( memorized != null ) { AcquireStatus status = memorized . isLocked ( ) ? SUCCESSFUL : FAILED ; return new AcquireResult ( status , memorized . getFence ( ) , Collections . < LockInvocationKey > emptyList ( ) ) ; } if ( owner == null ) { owner = key ; } if ( endpoint . equals ( owner . endpoint ( ) ) ) { if ( lockCount == lockCountLimit ) { ownerInvocationRefUids . put ( Tuple2 . of ( endpoint , invocationUid ) , NOT_LOCKED ) ; return AcquireResult . failed ( Collections . < LockInvocationKey > emptyList ( ) ) ; } lockCount ++ ; ownerInvocationRefUids . put ( Tuple2 . of ( endpoint , invocationUid ) , lockOwnershipState ( ) ) ; return AcquireResult . acquired ( owner . commitIndex ( ) ) ; } Collection < LockInvocationKey > cancelledWaitKeys = cancelWaitKeys ( endpoint , invocationUid ) ; if ( wait ) { addWaitKey ( endpoint , key ) ; return AcquireResult . waitKeyAdded ( cancelledWaitKeys ) ; } return AcquireResult . failed ( cancelledWaitKeys ) ; } | Assigns the lock to the endpoint if the lock is not held . Lock count is incremented if the endpoint already holds the lock . If some other endpoint holds the lock and the second argument is true a wait key is created and added to the wait queue . Lock count is not incremented if the lock request is a retry of the lock holder . If the lock request is a retry of a lock endpoint that resides in the wait queue with the same invocation uid a retry wait key wait key is attached to the original wait key . If the lock request is a new request of a lock endpoint that resides in the wait queue with a different invocation uid the existing wait key is cancelled because it means the caller has stopped waiting for response of the previous invocation . If the invocation uid is same with one of the previous invocations of the current lock owner memorized result of the previous invocation is returned . |
9,081 | protected void onSessionClose ( long sessionId , Map < Long , Object > responses ) { removeInvocationRefUids ( sessionId ) ; if ( owner != null && owner . sessionId ( ) == sessionId ) { ReleaseResult result = doRelease ( owner . endpoint ( ) , newUnsecureUUID ( ) , lockCount ) ; for ( LockInvocationKey key : result . completedWaitKeys ( ) ) { responses . put ( key . commitIndex ( ) , result . ownership ( ) . getFence ( ) ) ; } } } | Releases the lock if the current lock holder s session is closed . |
9,082 | protected Collection < Long > getActivelyAttachedSessions ( ) { return owner != null ? Collections . singleton ( owner . sessionId ( ) ) : Collections . < Long > emptyList ( ) ; } | Returns session id of the current lock holder or an empty collection if the lock is not held |
9,083 | private boolean containsKeyAndValue ( Object key ) { ReplicatedRecord replicatedRecord = getStorage ( ) . get ( marshall ( key ) ) ; return replicatedRecord != null && replicatedRecord . getValue ( ) != null ; } | IMPORTANT >> Increments hit counter |
9,084 | public Collection < Map . Entry < Address , List < Integer > > > getPartitions ( PartitionTableView partitionTableView ) { Map < Address , List < Integer > > partitionsMap = new HashMap < Address , List < Integer > > ( ) ; int partitionCount = partitionTableView . getLength ( ) ; for ( int partitionId = 0 ; partitionId < partitionCount ; partitionId ++ ) { PartitionReplica owner = partitionTableView . getReplica ( partitionId , 0 ) ; if ( owner == null ) { partitionsMap . clear ( ) ; return partitionsMap . entrySet ( ) ; } Address clientOwnerAddress = clientAddressOf ( owner . address ( ) ) ; if ( clientOwnerAddress == null ) { partitionsMap . clear ( ) ; return partitionsMap . entrySet ( ) ; } List < Integer > indexes = partitionsMap . get ( clientOwnerAddress ) ; if ( indexes == null ) { indexes = new LinkedList < Integer > ( ) ; partitionsMap . put ( clientOwnerAddress , indexes ) ; } indexes . add ( partitionId ) ; } return partitionsMap . entrySet ( ) ; } | If any partition does not have an owner this method returns empty collection |
9,085 | public final void migrateTo ( MemoryAllocator newMalloc ) { baseAddress = move ( baseAddress , capacity ( ) , malloc , newMalloc ) ; malloc = newMalloc ; auxMalloc = null ; } | Migrates the backing memory region to a new allocator freeing the current region . Memory allocated by the new allocator must be accessible by the same accessor as the current one . |
9,086 | protected final SlotAssignmentResult ensure0 ( long key1 , long key2 ) { assertValid ( ) ; final long size = size ( ) ; if ( size == expansionThreshold ( ) ) { resizeTo ( CapacityUtil . nextCapacity ( capacity ( ) ) ) ; } long slot = keyHash ( key1 , key2 ) & mask ( ) ; while ( isSlotAssigned ( slot ) ) { if ( equal ( key1OfSlot ( slot ) , key2OfSlot ( slot ) , key1 , key2 ) ) { slotAssignmentResult . setAddress ( valueAddrOfSlot ( slot ) ) ; slotAssignmentResult . setNew ( false ) ; return slotAssignmentResult ; } slot = ( slot + 1 ) & mask ( ) ; } setSize ( size + 1 ) ; putKey ( baseAddress , slot , key1 , key2 ) ; slotAssignmentResult . setAddress ( valueAddrOfSlot ( slot ) ) ; slotAssignmentResult . setNew ( true ) ; return slotAssignmentResult ; } | These protected final methods will be called from the subclasses |
9,087 | protected void resizeTo ( long newCapacity ) { final long oldCapacity = capacity ( ) ; final long oldAllocatedSize = HEADER_SIZE + oldCapacity * slotLength ; final MemoryAllocator oldMalloc ; final long oldAddress ; if ( auxMalloc != null ) { final long size = size ( ) ; oldAddress = move ( baseAddress , oldCapacity , malloc , auxMalloc ) ; oldMalloc = auxMalloc ; auxAllocateAndAdjustFields ( oldAddress , size , oldCapacity , newCapacity ) ; } else { oldMalloc = malloc ; oldAddress = baseAddress ; allocateArrayAndAdjustFields ( size ( ) , newCapacity ) ; } rehash ( oldCapacity , oldAddress ) ; oldMalloc . free ( oldAddress - HEADER_SIZE , oldAllocatedSize ) ; } | Allocates a new slot array with the requested size and moves all the assigned slots from the current array into the new one . |
9,088 | private long move ( long fromBaseAddress , long capacity , MemoryAllocator fromMalloc , MemoryAllocator toMalloc ) { final long allocatedSize = HEADER_SIZE + capacity * slotLength ; final long toBaseAddress = toMalloc . allocate ( allocatedSize ) + HEADER_SIZE ; mem . copyMemory ( fromBaseAddress - HEADER_SIZE , toBaseAddress - HEADER_SIZE , allocatedSize ) ; fromMalloc . free ( fromBaseAddress - HEADER_SIZE , allocatedSize ) ; return toBaseAddress ; } | Copies a block from one allocator to another then frees the source block . |
9,089 | public void addGetTimeNanos ( long duration ) { for ( ; ; ) { long nanos = getCacheTimeTakenNanos ; if ( nanos <= Long . MAX_VALUE - duration ) { if ( GET_CACHE_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , nanos + duration ) ) { return ; } } else { if ( GET_CACHE_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , duration ) ) { clear ( ) ; return ; } } } } | Increments the getCache time accumulator . |
9,090 | public void addPutTimeNanos ( long duration ) { for ( ; ; ) { long nanos = putTimeTakenNanos ; if ( nanos <= Long . MAX_VALUE - duration ) { if ( PUT_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , nanos + duration ) ) { return ; } } else { if ( PUT_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , duration ) ) { clear ( ) ; return ; } } } } | Increments the put time accumulator . |
9,091 | public void addRemoveTimeNanos ( long duration ) { for ( ; ; ) { long nanos = removeTimeTakenNanos ; if ( nanos <= Long . MAX_VALUE - duration ) { if ( REMOVE_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , nanos + duration ) ) { return ; } } else { if ( REMOVE_TIME_TAKEN_NANOS . compareAndSet ( this , nanos , duration ) ) { clear ( ) ; return ; } } } } | Increments the remove time accumulator . |
9,092 | @ SuppressFBWarnings ( "NP_BOOLEAN_RETURN_NULL" ) static Boolean isObjectLayoutCompressedOopsOrNull ( ) { if ( ! UNSAFE_AVAILABLE ) { return null ; } Integer referenceSize = ReferenceSizeEstimator . getReferenceSizeOrNull ( ) ; if ( referenceSize == null ) { return null ; } return referenceSize != UNSAFE . addressSize ( ) ; } | Fallback when checking CompressedOopsEnabled . |
9,093 | private static PortablePosition navigateToPathTokenWithoutQuantifier ( PortableNavigatorContext ctx , PortablePathCursor path ) throws IOException { if ( path . isLastToken ( ) ) { return createPositionForReadAccess ( ctx , path ) ; } else { if ( ! navigateContextToNextPortableTokenFromPortableField ( ctx ) ) { return nilNotLeafPosition ( ) ; } } return null ; } | Token without quantifier . It means it s just a simple field not an array . |
9,094 | private static PortablePosition navigateToPathTokenWithAnyQuantifierInPortableArray ( PortableNavigatorContext ctx , PortablePathCursor path , NavigationFrame frame ) throws IOException { if ( frame == null ) { int len = getArrayLengthOfTheField ( ctx ) ; PortablePosition result = doValidateArrayLengthForAnyQuantifier ( len , path . isLastToken ( ) ) ; if ( result != null ) { return result ; } ctx . populateAnyNavigationFrames ( path . index ( ) , len ) ; int cellIndex = 0 ; result = doNavigateToPortableArrayCell ( ctx , path , cellIndex ) ; if ( result != null ) { return result ; } } else { PortablePosition result = doNavigateToPortableArrayCell ( ctx , path , frame . arrayIndex ) ; if ( result != null ) { return result ; } } return null ; } | navigation in PORTABLE array |
9,095 | private static PortablePosition navigateToPathTokenWithAnyQuantifierInPrimitiveArray ( PortableNavigatorContext ctx , PortablePathCursor path , NavigationFrame frame ) throws IOException { if ( frame == null ) { if ( path . isLastToken ( ) ) { int len = getArrayLengthOfTheField ( ctx ) ; PortablePosition result = doValidateArrayLengthForAnyQuantifier ( len , path . isLastToken ( ) ) ; if ( result != null ) { return result ; } ctx . populateAnyNavigationFrames ( path . index ( ) , len ) ; return createPositionForReadAccess ( ctx , path , 0 ) ; } throw createWrongUseOfAnyOperationException ( ctx , path . path ( ) ) ; } else { if ( path . isLastToken ( ) ) { return createPositionForReadAccess ( ctx , path , frame . arrayIndex ) ; } throw createWrongUseOfAnyOperationException ( ctx , path . path ( ) ) ; } } | navigation in PRIMITIVE array |
9,096 | private static boolean navigateContextToNextPortableTokenFromPortableField ( PortableNavigatorContext ctx ) throws IOException { BufferObjectDataInput in = ctx . getIn ( ) ; int pos = getStreamPositionOfTheField ( ctx ) ; in . position ( pos ) ; boolean isNull = in . readBoolean ( ) ; if ( isNull ) { return false ; } int factoryId = in . readInt ( ) ; int classId = in . readInt ( ) ; int versionId = in . readInt ( ) ; ctx . advanceContextToNextPortableToken ( factoryId , classId , versionId ) ; return true ; } | returns true if managed to advance false if advance failed due to null field |
9,097 | private static void navigateContextToNextPortableTokenFromPortableArrayCell ( PortableNavigatorContext ctx , PortablePathCursor path , int index ) throws IOException { BufferObjectDataInput in = ctx . getIn ( ) ; int pos = getStreamPositionOfTheField ( ctx ) ; in . position ( pos ) ; in . readInt ( ) ; int factoryId = in . readInt ( ) ; int classId = in . readInt ( ) ; validateFactoryAndClass ( ctx . getCurrentFieldDefinition ( ) , factoryId , classId , path . path ( ) ) ; final int cellOffset = in . position ( ) + index * Bits . INT_SIZE_IN_BYTES ; in . position ( cellOffset ) ; int portablePosition = in . readInt ( ) ; in . position ( portablePosition ) ; int versionId = in . readInt ( ) ; ctx . advanceContextToNextPortableToken ( factoryId , classId , versionId ) ; } | this navigation always succeeds since the caller validates if the index is inbound |
9,098 | private static PortablePosition createPositionForReadAccess ( PortableNavigatorContext ctx , PortablePathCursor path ) throws IOException { int notArrayCellAccessIndex = - 1 ; return createPositionForReadAccess ( ctx , path , notArrayCellAccessIndex ) ; } | Special case of the position creation where there s no quantifier so the index does not count . |
9,099 | public String handleCommand ( final String command ) throws InterruptedException { if ( lock . tryLock ( 1 , TimeUnit . SECONDS ) ) { try { return doHandleCommand ( command ) ; } finally { lock . unlock ( ) ; } } return "'" + command + "' execution is timed out!" ; } | Runs a command on the console . Will not run exit quit shutdown their upper - case or mixed case counterparts . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.