idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
21,600
public void publishExpired ( Cache < K , V > cache , K key , V value ) { publish ( cache , EventType . EXPIRED , key , value , null , false ) ; }
Publishes a expire event for the entry to all of the interested listeners .
21,601
public void awaitSynchronous ( ) { List < CompletableFuture < Void > > futures = pending . get ( ) ; if ( futures . isEmpty ( ) ) { return ; } try { CompletableFuture . allOf ( futures . toArray ( new CompletableFuture < ? > [ 0 ] ) ) . join ( ) ; } catch ( CompletionException e ) { logger . log ( Level . WARNING , null , e ) ; } finally { futures . clear ( ) ; } }
Blocks until all of the synchronous listeners have finished processing the events this thread published .
21,602
@ SuppressWarnings ( "PMD.LinguisticNaming" ) boolean setAt ( int item , int seedIndex ) { int hash = seeded ( item , seedIndex ) ; int index = hash >>> tableShift ; long previous = table [ index ] ; table [ index ] |= bitmask ( hash ) ; return ( table [ index ] != previous ) ; }
Sets the membership flag for the computed bit location .
21,603
static int seeded ( int item , int i ) { long hash = SEED [ i ] * item ; hash += hash >>> 32 ; return ( int ) hash ; }
Applies the independent hash function for the given seed index .
21,604
private void reads ( ) { int index = random . nextInt ( ) ; for ( ; ; ) { Integer key = ints [ index ++ & MASK ] ; cache . get ( key ) ; calls . increment ( ) ; } }
Spins forever reading from the cache .
21,605
private void writes ( ) { int index = random . nextInt ( ) ; for ( ; ; ) { Integer key = ints [ index ++ & MASK ] ; cache . put ( key , Boolean . TRUE ) ; calls . increment ( ) ; } }
Spins forever writing into the cache .
21,606
@ SuppressWarnings ( "unchecked" ) private Segment < K , V > segmentForHash ( int h ) { long u = ( ( ( h >>> segmentShift ) & segmentMask ) << SSHIFT ) + SBASE ; return ( Segment < K , V > ) UNSAFE . getObjectVolatile ( segments , u ) ; }
Get the segment for the given hash
21,607
@ SuppressWarnings ( "unchecked" ) static final < K , V > HashEntry < K , V > entryForHash ( Segment < K , V > seg , int h ) { HashEntry < K , V > [ ] tab ; return ( seg == null || ( tab = seg . table ) == null ) ? null : ( HashEntry < K , V > ) UNSAFE . getObjectVolatile ( tab , ( ( long ) ( ( ( tab . length - 1 ) & h ) ) << TSHIFT ) + TBASE ) ; }
Gets the table entry for the given segment and hash
21,608
private void onLirWarmupMiss ( Node node ) { node . moveToTop ( StackType . S ) ; node . status = Status . LIR ; sizeHot ++ ; }
Records a miss when the hot set is not full .
21,609
private void onHirWarmupMiss ( Node node ) { node . status = Status . HIR_RESIDENT ; node . moveToTop ( StackType . Q ) ; }
Records a miss when the cold set is not full .
21,610
private void onFullMiss ( Node node ) { node . status = Status . HIR_RESIDENT ; if ( residentSize >= maximumSize ) { evict ( ) ; } boolean isInStack = node . isInStack ( StackType . S ) ; node . moveToTop ( StackType . S ) ; if ( isInStack ) { node . status = Status . LIR ; sizeHot ++ ; Node bottom = headS . prevS ; checkState ( bottom . status == Status . LIR ) ; bottom . status = Status . HIR_RESIDENT ; bottom . removeFrom ( StackType . S ) ; bottom . moveToTop ( StackType . Q ) ; sizeHot -- ; pruneStack ( ) ; } else { node . moveToTop ( StackType . Q ) ; } }
Records a miss when the hot and cold set are full .
21,611
private void printLirs ( ) { System . out . println ( "** LIRS stack TOP **" ) ; for ( Node n = headS . nextS ; n != headS ; n = n . nextS ) { checkState ( n . isInS ) ; if ( n . status == Status . HIR_NON_RESIDENT ) { System . out . println ( "<NR> " + n . key ) ; } else if ( n . status == Status . HIR_RESIDENT ) { System . out . println ( "<RH> " + n . key ) ; } else { System . out . println ( "<RL> " + n . key ) ; } } System . out . println ( "** LIRS stack BOTTOM **" ) ; System . out . println ( "\n** LIRS queue END **" ) ; for ( Node n = headQ . nextQ ; n != headQ ; n = n . nextQ ) { checkState ( n . isInQ ) ; System . out . println ( n . key ) ; } System . out . println ( "** LIRS queue front **" ) ; System . out . println ( "\nLIRS EVICTED PAGE sequence:" ) ; for ( int i = 0 ; i < evicted . size ( ) ; i ++ ) { System . out . println ( "<" + i + "> " + evicted . get ( i ) ) ; } }
Prints out the internal state of the policy .
21,612
private void onMiss ( long key ) { Node node = new Node ( key ) ; node . type = QueueType . HOT ; node . appendToTail ( headHot ) ; data . put ( key , node ) ; sizeHot ++ ; if ( sizeHot > maxHot ) { Node demoted = headHot . next ; demoted . remove ( ) ; sizeHot -- ; demoted . appendToTail ( headCold ) ; demoted . type = QueueType . COLD ; sizeCold ++ ; evict ( ) ; } }
Adds the entry to the cache as HOT overflowing to the COLD queue and evicts if necessary .
21,613
protected Stream < String > lines ( ) throws IOException { InputStream input = readFiles ( ) ; Reader reader = new InputStreamReader ( input , UTF_8 ) ; return new BufferedReader ( reader ) . lines ( ) . map ( String :: trim ) . onClose ( ( ) -> Closeables . closeQuietly ( input ) ) ; }
Returns a stream of each line in the trace file .
21,614
private String getRequestUrl ( String line ) { int end = line . length ( ) - 2 ; while ( line . charAt ( end ) != ' ' ) { end -- ; } int start = end - 1 ; while ( line . charAt ( start ) != ' ' ) { start -- ; } return line . substring ( start + 1 , end ) ; }
Returns the request URL .
21,615
private String getPath ( String url ) { int index = url . indexOf ( '/' , 7 ) ; if ( index == - 1 ) { return url ; } String cleansed = url . substring ( index + 1 ) ; for ( int i = 0 ; i < SEARCH_LIST . length ; i ++ ) { cleansed = StringUtils . replace ( cleansed , SEARCH_LIST [ i ] , REPLACEMENT_LIST [ i ] ) ; } return cleansed ; }
Returns the path segment of the URL .
21,616
public boolean isAllowed ( String path ) { for ( String filter : STARTS_WITH_FILTER ) { if ( path . startsWith ( filter ) ) { return false ; } } for ( String filter : CONTAINS_FILTER ) { if ( path . contains ( filter ) ) { return false ; } } return true ; }
Returns if the path should be included . The request is ignored if it is a search query a page revision related to users or user management or talk pages .
21,617
private void addFieldAndGetter ( String varName ) { context . nodeSubtype . addField ( NODE , varName ) . addMethod ( newGetter ( Strength . STRONG , NODE , varName , Visibility . IMMEDIATE ) ) . addMethod ( newSetter ( NODE , varName , Visibility . IMMEDIATE ) ) ; }
Adds a simple field accessor and mutator for the variable .
21,618
public void print ( ) throws IOException { results . sort ( comparator ( ) ) ; String report = assemble ( results ) ; String output = settings . report ( ) . output ( ) ; if ( output . equalsIgnoreCase ( "console" ) ) { System . out . println ( report ) ; } else { Files . write ( Paths . get ( output ) , report . getBytes ( UTF_8 ) ) ; } }
Writes the report to the output destination .
21,619
private Comparator < PolicyStats > comparator ( ) { Comparator < PolicyStats > comparator = makeComparator ( ) ; return settings . report ( ) . ascending ( ) ? comparator : comparator . reversed ( ) ; }
Returns a comparator that sorts by the specified column .
21,620
private void addConstructorByKey ( ) { context . constructorByKey = MethodSpec . constructorBuilder ( ) . addParameter ( keySpec ) ; context . constructorByKey . addParameter ( keyRefQueueSpec ) ; addCommonParameters ( context . constructorByKey ) ; if ( isBaseClass ( ) ) { callSiblingConstructor ( ) ; } else { callParentByKey ( ) ; } }
Adds the constructor by key to the node type .
21,621
private void addConstructorByKeyRef ( ) { context . constructorByKeyRef = MethodSpec . constructorBuilder ( ) . addParameter ( keyRefSpec ) ; addCommonParameters ( context . constructorByKeyRef ) ; if ( isBaseClass ( ) ) { assignKeyRefAndValue ( ) ; } else { callParentByKeyRef ( ) ; } }
Adds the constructor by key reference to the node type .
21,622
private void onHit ( Node node ) { policyStats . recordHit ( ) ; int newCount = node . freq . count + 1 ; FrequencyNode freqN = ( node . freq . next . count == newCount ) ? node . freq . next : new FrequencyNode ( newCount , node . freq ) ; node . remove ( ) ; if ( node . freq . isEmpty ( ) ) { node . freq . remove ( ) ; } node . freq = freqN ; node . append ( ) ; }
Moves the entry to the next higher frequency list creating it if necessary .
21,623
private void onMiss ( long key ) { FrequencyNode freq1 = ( freq0 . next . count == 1 ) ? freq0 . next : new FrequencyNode ( 1 , freq0 ) ; Node node = new Node ( key , freq1 ) ; policyStats . recordMiss ( ) ; data . put ( key , node ) ; node . append ( ) ; evict ( node ) ; }
Adds the entry creating an initial frequency list of 1 if necessary and evicts if needed .
21,624
Node nextVictim ( Node candidate ) { if ( policy == EvictionPolicy . MFU ) { return freq0 . prev . nextNode . next ; } Node victim = freq0 . next . nextNode . next ; if ( victim == candidate ) { victim = ( victim . next == victim . prev ) ? victim . freq . next . nextNode . next : victim . next ; } return victim ; }
Returns the next victim excluding the newly added candidate . This exclusion is required so that a candidate has a fair chance to be used rather than always rejected due to existing entries having a high frequency from the distant past .
21,625
private void evictEntry ( Node node ) { data . remove ( node . key ) ; node . remove ( ) ; if ( node . freq . isEmpty ( ) ) { node . freq . remove ( ) ; } }
Removes the entry .
21,626
@ GuardedBy ( "evictionLock" ) void setMaximumSize ( long maximum ) { requireArgument ( maximum >= 0 ) ; if ( maximum == maximum ( ) ) { return ; } long max = Math . min ( maximum , MAXIMUM_CAPACITY ) ; long window = max - ( long ) ( PERCENT_MAIN * max ) ; long mainProtected = ( long ) ( PERCENT_MAIN_PROTECTED * ( max - window ) ) ; setMaximum ( max ) ; setWindowMaximum ( window ) ; setMainProtectedMaximum ( mainProtected ) ; setHitsInSample ( 0 ) ; setMissesInSample ( 0 ) ; setStepSize ( - HILL_CLIMBER_STEP_PERCENT * max ) ; if ( ( frequencySketch ( ) != null ) && ! isWeighted ( ) && ( weightedSize ( ) >= ( max >>> 1 ) ) ) { frequencySketch ( ) . ensureCapacity ( max ) ; } }
Sets the maximum weighted size of the cache . The caller may need to perform a maintenance cycle to eagerly evicts entries until the cache shrinks to the appropriate size .
21,627
@ GuardedBy ( "evictionLock" ) int evictFromWindow ( ) { int candidates = 0 ; Node < K , V > node = accessOrderWindowDeque ( ) . peek ( ) ; while ( windowWeightedSize ( ) > windowMaximum ( ) ) { if ( node == null ) { break ; } Node < K , V > next = node . getNextInAccessOrder ( ) ; if ( node . getWeight ( ) != 0 ) { node . makeMainProbation ( ) ; accessOrderWindowDeque ( ) . remove ( node ) ; accessOrderProbationDeque ( ) . add ( node ) ; candidates ++ ; setWindowWeightedSize ( windowWeightedSize ( ) - node . getPolicyWeight ( ) ) ; } node = next ; } return candidates ; }
Evicts entries from the window space into the main space while the window size exceeds a maximum .
21,628
@ GuardedBy ( "evictionLock" ) boolean admit ( K candidateKey , K victimKey ) { int victimFreq = frequencySketch ( ) . frequency ( victimKey ) ; int candidateFreq = frequencySketch ( ) . frequency ( candidateKey ) ; if ( candidateFreq > victimFreq ) { return true ; } else if ( candidateFreq <= 5 ) { return false ; } int random = ThreadLocalRandom . current ( ) . nextInt ( ) ; return ( ( random & 127 ) == 0 ) ; }
Determines if the candidate should be accepted into the main space as determined by its frequency relative to the victim . A small amount of randomness is used to protect against hash collision attacks where the victim s frequency is artificially raised so that no new entries are admitted .
21,629
@ GuardedBy ( "evictionLock" ) void expireEntries ( ) { long now = expirationTicker ( ) . read ( ) ; expireAfterAccessEntries ( now ) ; expireAfterWriteEntries ( now ) ; expireVariableEntries ( now ) ; }
Expires entries that have expired by access write or variable .
21,630
@ GuardedBy ( "evictionLock" ) void expireAfterAccessEntries ( long now ) { if ( ! expiresAfterAccess ( ) ) { return ; } expireAfterAccessEntries ( accessOrderWindowDeque ( ) , now ) ; if ( evicts ( ) ) { expireAfterAccessEntries ( accessOrderProbationDeque ( ) , now ) ; expireAfterAccessEntries ( accessOrderProtectedDeque ( ) , now ) ; } }
Expires entries in the access - order queue .
21,631
@ GuardedBy ( "evictionLock" ) void expireAfterAccessEntries ( AccessOrderDeque < Node < K , V > > accessOrderDeque , long now ) { long duration = expiresAfterAccessNanos ( ) ; for ( ; ; ) { Node < K , V > node = accessOrderDeque . peekFirst ( ) ; if ( ( node == null ) || ( ( now - node . getAccessTime ( ) ) < duration ) ) { return ; } evictEntry ( node , RemovalCause . EXPIRED , now ) ; } }
Expires entries in an access - order queue .
21,632
@ GuardedBy ( "evictionLock" ) void expireAfterWriteEntries ( long now ) { if ( ! expiresAfterWrite ( ) ) { return ; } long duration = expiresAfterWriteNanos ( ) ; for ( ; ; ) { final Node < K , V > node = writeOrderDeque ( ) . peekFirst ( ) ; if ( ( node == null ) || ( ( now - node . getWriteTime ( ) ) < duration ) ) { break ; } evictEntry ( node , RemovalCause . EXPIRED , now ) ; } }
Expires entries on the write - order queue .
21,633
@ SuppressWarnings ( "ShortCircuitBoolean" ) boolean hasExpired ( Node < K , V > node , long now ) { return ( expiresAfterAccess ( ) && ( now - node . getAccessTime ( ) >= expiresAfterAccessNanos ( ) ) ) | ( expiresAfterWrite ( ) && ( now - node . getWriteTime ( ) >= expiresAfterWriteNanos ( ) ) ) | ( expiresVariable ( ) && ( now - node . getVariableTime ( ) >= 0 ) ) ; }
Returns if the entry has expired .
21,634
@ GuardedBy ( "evictionLock" ) void decreaseWindow ( ) { if ( windowMaximum ( ) <= 1 ) { return ; } long quota = Math . min ( - adjustment ( ) , Math . max ( 0 , windowMaximum ( ) - 1 ) ) ; setMainProtectedMaximum ( mainProtectedMaximum ( ) + quota ) ; setWindowMaximum ( windowMaximum ( ) - quota ) ; for ( int i = 0 ; i < QUEUE_TRANSFER_THRESHOLD ; i ++ ) { Node < K , V > candidate = accessOrderWindowDeque ( ) . peek ( ) ; if ( candidate == null ) { break ; } int weight = candidate . getPolicyWeight ( ) ; if ( quota < weight ) { break ; } quota -= weight ; setMainProtectedWeightedSize ( mainProtectedWeightedSize ( ) + weight ) ; setWindowWeightedSize ( windowWeightedSize ( ) - weight ) ; accessOrderWindowDeque ( ) . remove ( candidate ) ; accessOrderProbationDeque ( ) . add ( candidate ) ; candidate . makeMainProbation ( ) ; } setMainProtectedMaximum ( mainProtectedMaximum ( ) - quota ) ; setWindowMaximum ( windowMaximum ( ) + quota ) ; setAdjustment ( - quota ) ; }
Decreases the size of the admission window and increases the main s protected region .
21,635
@ GuardedBy ( "evictionLock" ) void demoteFromMainProtected ( ) { long mainProtectedMaximum = mainProtectedMaximum ( ) ; long mainProtectedWeightedSize = mainProtectedWeightedSize ( ) ; if ( mainProtectedWeightedSize <= mainProtectedMaximum ) { return ; } for ( int i = 0 ; i < QUEUE_TRANSFER_THRESHOLD ; i ++ ) { if ( mainProtectedWeightedSize <= mainProtectedMaximum ) { break ; } Node < K , V > demoted = accessOrderProtectedDeque ( ) . poll ( ) ; if ( demoted == null ) { break ; } demoted . makeMainProbation ( ) ; accessOrderProbationDeque ( ) . add ( demoted ) ; mainProtectedWeightedSize -= demoted . getPolicyWeight ( ) ; } setMainProtectedWeightedSize ( mainProtectedWeightedSize ) ; }
Transfers the nodes from the protected to the probation region if it exceeds the maximum .
21,636
@ SuppressWarnings ( "FutureReturnValueIgnored" ) void refreshIfNeeded ( Node < K , V > node , long now ) { if ( ! refreshAfterWrite ( ) ) { return ; } K key ; V oldValue ; long oldWriteTime = node . getWriteTime ( ) ; long refreshWriteTime = ( now + ASYNC_EXPIRY ) ; if ( ( ( now - oldWriteTime ) > refreshAfterWriteNanos ( ) ) && ( ( key = node . getKey ( ) ) != null ) && ( ( oldValue = node . getValue ( ) ) != null ) && node . casWriteTime ( oldWriteTime , refreshWriteTime ) ) { try { CompletableFuture < V > refreshFuture ; long startTime = statsTicker ( ) . read ( ) ; if ( isAsync ) { @ SuppressWarnings ( "unchecked" ) CompletableFuture < V > future = ( CompletableFuture < V > ) oldValue ; if ( Async . isReady ( future ) ) { @ SuppressWarnings ( "NullAway" ) CompletableFuture < V > refresh = future . thenCompose ( value -> cacheLoader . asyncReload ( key , value , executor ) ) ; refreshFuture = refresh ; } else { node . casWriteTime ( refreshWriteTime , oldWriteTime ) ; return ; } } else { @ SuppressWarnings ( "NullAway" ) CompletableFuture < V > refresh = cacheLoader . asyncReload ( key , oldValue , executor ) ; refreshFuture = refresh ; } refreshFuture . whenComplete ( ( newValue , error ) -> { long loadTime = statsTicker ( ) . read ( ) - startTime ; if ( error != null ) { logger . log ( Level . WARNING , "Exception thrown during refresh" , error ) ; node . casWriteTime ( refreshWriteTime , oldWriteTime ) ; statsCounter ( ) . recordLoadFailure ( loadTime ) ; return ; } @ SuppressWarnings ( "unchecked" ) V value = ( isAsync && ( newValue != null ) ) ? ( V ) refreshFuture : newValue ; boolean [ ] discard = new boolean [ 1 ] ; compute ( key , ( k , currentValue ) -> { if ( currentValue == null ) { return value ; } else if ( ( currentValue == oldValue ) && ( node . getWriteTime ( ) == refreshWriteTime ) ) { return value ; } discard [ 0 ] = true ; return currentValue ; } , false , false , true ) ; if ( discard [ 0 ] && hasRemovalListener ( ) ) { notifyRemoval ( key , value , RemovalCause . REPLACED ) ; } if ( newValue == null ) { statsCounter ( ) . recordLoadFailure ( loadTime ) ; } else { statsCounter ( ) . recordLoadSuccess ( loadTime ) ; } } ) ; } catch ( Throwable t ) { node . casWriteTime ( refreshWriteTime , oldWriteTime ) ; logger . log ( Level . SEVERE , "Exception thrown when submitting refresh task" , t ) ; } } }
Asynchronously refreshes the entry if eligible .
21,637
void scheduleAfterWrite ( ) { for ( ; ; ) { switch ( drainStatus ( ) ) { case IDLE : casDrainStatus ( IDLE , REQUIRED ) ; scheduleDrainBuffers ( ) ; return ; case REQUIRED : scheduleDrainBuffers ( ) ; return ; case PROCESSING_TO_IDLE : if ( casDrainStatus ( PROCESSING_TO_IDLE , PROCESSING_TO_REQUIRED ) ) { return ; } continue ; case PROCESSING_TO_REQUIRED : return ; default : throw new IllegalStateException ( ) ; } } }
Conditionally schedules the asynchronous maintenance task after a write operation . If the task status was IDLE or REQUIRED then the maintenance task is scheduled immediately . If it is already processing then it is set to transition to REQUIRED upon completion so that a new execution is triggered by the next operation .
21,638
void scheduleDrainBuffers ( ) { if ( drainStatus ( ) >= PROCESSING_TO_IDLE ) { return ; } if ( evictionLock . tryLock ( ) ) { try { int drainStatus = drainStatus ( ) ; if ( drainStatus >= PROCESSING_TO_IDLE ) { return ; } lazySetDrainStatus ( PROCESSING_TO_IDLE ) ; executor ( ) . execute ( drainBuffersTask ) ; } catch ( Throwable t ) { logger . log ( Level . WARNING , "Exception thrown when submitting maintenance task" , t ) ; maintenance ( null ) ; } finally { evictionLock . unlock ( ) ; } } }
Attempts to schedule an asynchronous task to apply the pending operations to the page replacement policy . If the executor rejects the task then it is run directly .
21,639
@ GuardedBy ( "evictionLock" ) void drainKeyReferences ( ) { if ( ! collectKeys ( ) ) { return ; } Reference < ? extends K > keyRef ; while ( ( keyRef = keyReferenceQueue ( ) . poll ( ) ) != null ) { Node < K , V > node = data . get ( keyRef ) ; if ( node != null ) { evictEntry ( node , RemovalCause . COLLECTED , 0L ) ; } } }
Drains the weak key references queue .
21,640
@ GuardedBy ( "evictionLock" ) void onAccess ( Node < K , V > node ) { if ( evicts ( ) ) { K key = node . getKey ( ) ; if ( key == null ) { return ; } frequencySketch ( ) . increment ( key ) ; if ( node . inWindow ( ) ) { reorder ( accessOrderWindowDeque ( ) , node ) ; } else if ( node . inMainProbation ( ) ) { reorderProbation ( node ) ; } else { reorder ( accessOrderProtectedDeque ( ) , node ) ; } setHitsInSample ( hitsInSample ( ) + 1 ) ; } else if ( expiresAfterAccess ( ) ) { reorder ( accessOrderWindowDeque ( ) , node ) ; } if ( expiresVariable ( ) ) { timerWheel ( ) . reschedule ( node ) ; } }
Updates the node s location in the page replacement policy .
21,641
@ GuardedBy ( "evictionLock" ) void reorderProbation ( Node < K , V > node ) { if ( ! accessOrderProbationDeque ( ) . contains ( node ) ) { return ; } else if ( node . getPolicyWeight ( ) > mainProtectedMaximum ( ) ) { return ; } setMainProtectedWeightedSize ( mainProtectedWeightedSize ( ) + node . getPolicyWeight ( ) ) ; accessOrderProbationDeque ( ) . remove ( node ) ; accessOrderProtectedDeque ( ) . add ( node ) ; node . makeMainProtected ( ) ; }
Promote the node from probation to protected on an access .
21,642
static < K , V > void reorder ( LinkedDeque < Node < K , V > > deque , Node < K , V > node ) { if ( deque . contains ( node ) ) { deque . moveToBack ( node ) ; } }
Updates the node s location in the policy s deque .
21,643
@ GuardedBy ( "evictionLock" ) void drainWriteBuffer ( ) { if ( ! buffersWrites ( ) ) { return ; } for ( int i = 0 ; i < WRITE_BUFFER_MAX ; i ++ ) { Runnable task = writeBuffer ( ) . poll ( ) ; if ( task == null ) { return ; } task . run ( ) ; } lazySetDrainStatus ( PROCESSING_TO_REQUIRED ) ; }
Drains the write buffer .
21,644
static < K , V > SerializationProxy < K , V > makeSerializationProxy ( BoundedLocalCache < ? , ? > cache , boolean isWeighted ) { SerializationProxy < K , V > proxy = new SerializationProxy < > ( ) ; proxy . weakKeys = cache . collectKeys ( ) ; proxy . weakValues = cache . nodeFactory . weakValues ( ) ; proxy . softValues = cache . nodeFactory . softValues ( ) ; proxy . isRecordingStats = cache . isRecordingStats ( ) ; proxy . removalListener = cache . removalListener ( ) ; proxy . ticker = cache . expirationTicker ( ) ; proxy . writer = cache . writer ; if ( cache . expiresAfterAccess ( ) ) { proxy . expiresAfterAccessNanos = cache . expiresAfterAccessNanos ( ) ; } if ( cache . expiresAfterWrite ( ) ) { proxy . expiresAfterWriteNanos = cache . expiresAfterWriteNanos ( ) ; } if ( cache . expiresVariable ( ) ) { proxy . expiry = cache . expiry ( ) ; } if ( cache . evicts ( ) ) { if ( isWeighted ) { proxy . weigher = cache . weigher ; proxy . maximumWeight = cache . maximum ( ) ; } else { proxy . maximumSize = cache . maximum ( ) ; } } return proxy ; }
Creates a serialization proxy based on the common configuration shared by all cache types .
21,645
public void advance ( long currentTimeNanos ) { long previousTimeNanos = nanos ; try { nanos = currentTimeNanos ; for ( int i = 0 ; i < SHIFT . length ; i ++ ) { long previousTicks = ( previousTimeNanos >> SHIFT [ i ] ) ; long currentTicks = ( currentTimeNanos >> SHIFT [ i ] ) ; if ( ( currentTicks - previousTicks ) <= 0L ) { break ; } expire ( i , previousTicks , currentTicks ) ; } } catch ( Throwable t ) { nanos = previousTimeNanos ; throw t ; } }
Advances the timer and evicts entries that have expired .
21,646
void expire ( int index , long previousTicks , long currentTicks ) { Node < K , V > [ ] timerWheel = wheel [ index ] ; int start , end ; if ( ( currentTicks - previousTicks ) >= timerWheel . length ) { end = timerWheel . length ; start = 0 ; } else { long mask = SPANS [ index ] - 1 ; start = ( int ) ( previousTicks & mask ) ; end = 1 + ( int ) ( currentTicks & mask ) ; } int mask = timerWheel . length - 1 ; for ( int i = start ; i < end ; i ++ ) { Node < K , V > sentinel = timerWheel [ ( i & mask ) ] ; Node < K , V > prev = sentinel . getPreviousInVariableOrder ( ) ; Node < K , V > node = sentinel . getNextInVariableOrder ( ) ; sentinel . setPreviousInVariableOrder ( sentinel ) ; sentinel . setNextInVariableOrder ( sentinel ) ; while ( node != sentinel ) { Node < K , V > next = node . getNextInVariableOrder ( ) ; node . setPreviousInVariableOrder ( null ) ; node . setNextInVariableOrder ( null ) ; try { if ( ( ( node . getVariableTime ( ) - nanos ) > 0 ) || ! cache . evictEntry ( node , RemovalCause . EXPIRED , nanos ) ) { Node < K , V > newSentinel = findBucket ( node . getVariableTime ( ) ) ; link ( newSentinel , node ) ; } node = next ; } catch ( Throwable t ) { node . setPreviousInVariableOrder ( sentinel . getPreviousInVariableOrder ( ) ) ; node . setNextInVariableOrder ( next ) ; sentinel . getPreviousInVariableOrder ( ) . setNextInVariableOrder ( node ) ; sentinel . setPreviousInVariableOrder ( prev ) ; throw t ; } } } }
Expires entries or reschedules into the proper bucket if still active .
21,647
Node < K , V > findBucket ( long time ) { long duration = time - nanos ; int length = wheel . length - 1 ; for ( int i = 0 ; i < length ; i ++ ) { if ( duration < SPANS [ i + 1 ] ) { int ticks = ( int ) ( time >> SHIFT [ i ] ) ; int index = ticks & ( wheel [ i ] . length - 1 ) ; return wheel [ i ] [ index ] ; } } return wheel [ length ] [ 0 ] ; }
Determines the bucket that the timer event should be added to .
21,648
void link ( Node < K , V > sentinel , Node < K , V > node ) { node . setPreviousInVariableOrder ( sentinel . getPreviousInVariableOrder ( ) ) ; node . setNextInVariableOrder ( sentinel ) ; sentinel . getPreviousInVariableOrder ( ) . setNextInVariableOrder ( node ) ; sentinel . setPreviousInVariableOrder ( node ) ; }
Adds the entry at the tail of the bucket s list .
21,649
void unlink ( Node < K , V > node ) { Node < K , V > next = node . getNextInVariableOrder ( ) ; if ( next != null ) { Node < K , V > prev = node . getPreviousInVariableOrder ( ) ; next . setPreviousInVariableOrder ( prev ) ; prev . setNextInVariableOrder ( next ) ; } }
Removes the entry from its bucket if scheduled .
21,650
private void process ( long key ) { IntPriorityQueue times = accessTimes . get ( key ) ; int lastAccess = times . dequeueInt ( ) ; boolean found = data . remove ( lastAccess ) ; if ( times . isEmpty ( ) ) { data . add ( infiniteTimestamp -- ) ; accessTimes . remove ( key ) ; } else { data . add ( times . firstInt ( ) ) ; } if ( found ) { policyStats . recordHit ( ) ; } else { policyStats . recordMiss ( ) ; if ( data . size ( ) > maximumSize ) { evict ( ) ; } } }
Performs the cache operations for the given key .
21,651
void parseOption ( String option ) { if ( option . isEmpty ( ) ) { return ; } @ SuppressWarnings ( "StringSplitter" ) String [ ] keyAndValue = option . split ( SPLIT_KEY_VALUE ) ; requireArgument ( keyAndValue . length <= 2 , "key-value pair %s with more than one equals sign" , option ) ; String key = keyAndValue [ 0 ] . trim ( ) ; String value = ( keyAndValue . length == 1 ) ? null : keyAndValue [ 1 ] . trim ( ) ; configure ( key , value ) ; }
Parses and applies the configuration option .
21,652
public static Set < String > cacheNames ( Config config ) { return config . hasPath ( "caffeine.jcache" ) ? Collections . unmodifiableSet ( config . getObject ( "caffeine.jcache" ) . keySet ( ) ) : Collections . emptySet ( ) ; }
Retrieves the names of the caches defined in the configuration resource .
21,653
public static < K , V > CaffeineConfiguration < K , V > defaults ( Config config ) { return new Configurator < K , V > ( config , "default" ) . configure ( ) ; }
Retrieves the default cache settings from the configuration resource .
21,654
public static < K , V > Optional < CaffeineConfiguration < K , V > > from ( Config config , String cacheName ) { CaffeineConfiguration < K , V > configuration = null ; try { if ( config . hasPath ( "caffeine.jcache." + cacheName ) ) { configuration = new Configurator < K , V > ( config , cacheName ) . configure ( ) ; } } catch ( ConfigException . BadPath e ) { logger . log ( Level . WARNING , "Failed to load cache configuration" , e ) ; } return Optional . ofNullable ( configuration ) ; }
Retrieves the cache s settings from the configuration resource if defined .
21,655
void linkFirst ( final E e ) { final E f = first ; first = e ; if ( f == null ) { last = e ; } else { setPrevious ( f , e ) ; setNext ( e , f ) ; } }
Links the element to the front of the deque so that it becomes the first element .
21,656
protected void tryReset ( boolean added ) { additions += step ; if ( ! added ) { return ; } if ( additions < period ) { return ; } int count = 0 ; for ( int i = 0 ; i < table . length ; i ++ ) { count += Long . bitCount ( table [ i ] & ONE_MASK ) ; table [ i ] = ( table [ i ] >>> 1 ) & RESET_MASK ; } additions = ( additions >>> 1 ) - ( count >>> 2 ) ; doorkeeper . clear ( ) ; }
Reduces every counter by half of its original value . To reduce the truncation error the sample is reduced by the number of counters with an odd value .
21,657
@ SuppressWarnings ( "PMD.AvoidDeeplyNestedIfStmts" ) private V getOrLoad ( K key ) { boolean statsEnabled = statistics . isEnabled ( ) ; long start = statsEnabled ? ticker . read ( ) : 0L ; long millis = 0L ; Expirable < V > expirable = cache . getIfPresent ( key ) ; if ( ( expirable != null ) && ! expirable . isEternal ( ) ) { millis = nanosToMillis ( ( start == 0L ) ? ticker . read ( ) : start ) ; if ( expirable . hasExpired ( millis ) ) { Expirable < V > expired = expirable ; cache . asMap ( ) . computeIfPresent ( key , ( k , e ) -> { if ( e == expired ) { dispatcher . publishExpired ( this , key , expired . get ( ) ) ; statistics . recordEvictions ( 1 ) ; return null ; } return e ; } ) ; expirable = null ; } } if ( expirable == null ) { expirable = cache . get ( key ) ; statistics . recordMisses ( 1L ) ; } else { statistics . recordHits ( 1L ) ; } V value = null ; if ( expirable != null ) { setAccessExpirationTime ( expirable , millis ) ; value = copyValue ( expirable ) ; } if ( statsEnabled ) { statistics . recordGetTime ( ticker . read ( ) - start ) ; } return value ; }
Retrieves the value from the cache loading it if necessary .
21,658
@ SuppressWarnings ( "PMD.AvoidCatchingNPE" ) private Map < K , V > getAll ( Set < ? extends K > keys , boolean updateAccessTime ) { requireNotClosed ( ) ; boolean statsEnabled = statistics . isEnabled ( ) ; long start = statsEnabled ? ticker . read ( ) : 0L ; try { Map < K , Expirable < V > > entries = getAndFilterExpiredEntries ( keys , updateAccessTime ) ; if ( entries . size ( ) != keys . size ( ) ) { List < K > keysToLoad = keys . stream ( ) . filter ( key -> ! entries . containsKey ( key ) ) . collect ( Collectors . < K > toList ( ) ) ; entries . putAll ( cache . getAll ( keysToLoad ) ) ; } Map < K , V > result = copyMap ( entries ) ; if ( statsEnabled ) { statistics . recordGetTime ( ticker . read ( ) - start ) ; } return result ; } catch ( NullPointerException | IllegalStateException | ClassCastException | CacheException e ) { throw e ; } catch ( RuntimeException e ) { throw new CacheException ( e ) ; } finally { dispatcher . awaitSynchronous ( ) ; } }
Returns the entries loading if necessary and optionally updates their access expiry time .
21,659
private void addTimeConstructorAssignment ( MethodSpec . Builder constructor , String field ) { constructor . addStatement ( "$T.UNSAFE.putLong(this, $N, $N)" , UNSAFE_ACCESS , offsetName ( field ) , "now" ) ; }
Adds a long constructor assignment .
21,660
private static String encode ( String className ) { return Feature . makeEnumName ( className ) . replaceFirst ( "STRONG_KEYS" , "P" ) . replaceFirst ( "WEAK_KEYS" , "F" ) . replaceFirst ( "_STRONG_VALUES" , "S" ) . replaceFirst ( "_WEAK_VALUES" , "W" ) . replaceFirst ( "_SOFT_VALUES" , "D" ) . replaceFirst ( "_EXPIRE_ACCESS" , "A" ) . replaceFirst ( "_EXPIRE_WRITE" , "W" ) . replaceFirst ( "_REFRESH_WRITE" , "R" ) . replaceFirst ( "_MAXIMUM" , "M" ) . replaceFirst ( "_WEIGHT" , "W" ) . replaceFirst ( "_SIZE" , "S" ) ; }
Returns an encoded form of the class name for compact use .
21,661
public static Set < Policy > policies ( BasicSettings settings ) { return settings . policies ( ) . stream ( ) . flatMap ( name -> policy ( settings , name ) . stream ( ) ) . collect ( toSet ( ) ) ; }
Returns all of the policies that have been configured for simulation .
21,662
public static Set < Policy > policy ( BasicSettings settings , String name ) { Function < Config , Set < Policy > > factory = FACTORIES . get ( name . toLowerCase ( US ) ) ; checkNotNull ( factory , "%s not found" , name ) ; return factory . apply ( settings . config ( ) ) ; }
Returns all of the policy variations that have been configured .
21,663
public static LongStream generate ( BasicSettings settings ) { int events = settings . synthetic ( ) . events ( ) ; switch ( settings . synthetic ( ) . distribution ( ) . toLowerCase ( US ) ) { case "counter" : return counter ( settings . synthetic ( ) . counter ( ) . start ( ) , events ) ; case "exponential" : return exponential ( settings . synthetic ( ) . exponential ( ) . mean ( ) , events ) ; case "hotspot" : HotspotSettings hotspot = settings . synthetic ( ) . hotspot ( ) ; return Synthetic . hotspot ( hotspot . lowerBound ( ) , hotspot . upperBound ( ) , hotspot . hotOpnFraction ( ) , hotspot . hotsetFraction ( ) , events ) ; case "zipfian" : return zipfian ( settings . synthetic ( ) . zipfian ( ) . items ( ) , settings . synthetic ( ) . zipfian ( ) . constant ( ) , events ) ; case "scrambled-zipfian" : return scrambledZipfian ( settings . synthetic ( ) . zipfian ( ) . items ( ) , settings . synthetic ( ) . zipfian ( ) . constant ( ) , events ) ; case "skewed-zipfian-latest" : return skewedZipfianLatest ( settings . synthetic ( ) . zipfian ( ) . items ( ) , events ) ; case "uniform" : UniformSettings uniform = settings . synthetic ( ) . uniform ( ) ; return uniform ( uniform . lowerBound ( ) , uniform . upperBound ( ) , events ) ; default : throw new IllegalStateException ( "Unknown distribution: " + settings . synthetic ( ) . distribution ( ) ) ; } }
Returns a sequence of events based on the setting s distribution .
21,664
public static LongStream hotspot ( int lowerBound , int upperBound , double hotsetFraction , double hotOpnFraction , int events ) { return generate ( new HotspotIntegerGenerator ( lowerBound , upperBound , hotsetFraction , hotOpnFraction ) , events ) ; }
Returns a sequence of events resembling a hotspot distribution where x% of operations access y% of data items . The parameters specify the bounds for the numbers the percentage of the of the interval which comprises the hot set and the percentage of operations that access the hot set . Numbers of the hot set are always smaller than any number in the cold set . Elements from the hot set and the cold set are chose using a uniform distribution .
21,665
public static LongStream zipfian ( int items , double constant , int events ) { return generate ( new ZipfianGenerator ( items , constant ) , events ) ; }
Returns a sequence of events where some items are more popular than others according to a zipfian distribution .
21,666
public static LongStream uniform ( int lowerBound , int upperBound , int events ) { return generate ( new UniformLongGenerator ( lowerBound , upperBound ) , events ) ; }
Returns a sequence of events where items are selected uniformly randomly from the interval inclusively .
21,667
private static LongStream generate ( NumberGenerator generator , long count ) { return LongStream . range ( 0 , count ) . map ( ignored -> generator . nextValue ( ) . longValue ( ) ) ; }
Returns a sequence of items constructed by the generator .
21,668
private void onMiss ( long key ) { for ( int i = 0 ; i < gain ; i ++ ) { admittor . record ( key ) ; } Node node = new Node ( key ) ; node . appendToTail ( head ) ; data . put ( key , node ) ; evict ( node ) ; }
Adds the entry evicting if necessary .
21,669
private void evict ( Node candidate ) { if ( data . size ( ) > maximumSize ) { Node evict ; Node victim = head . next ; if ( admittor . admit ( candidate . key , victim . key ) ) { evict = victim ; } else if ( adapt ( candidate ) ) { evict = victim ; } else { evict = candidate ; feedback . put ( candidate . key ) ; } data . remove ( evict . key ) ; evict . remove ( ) ; policyStats . recordEviction ( ) ; } }
If the size exceeds the maximum then the candidate and victim are evaluated and one is evicted .
21,670
public static String offsetName ( String varName ) { return CaseFormat . LOWER_CAMEL . to ( CaseFormat . UPPER_UNDERSCORE , varName ) + "_OFFSET" ; }
Returns the offset constant to this variable .
21,671
public static FieldSpec newFieldOffset ( String className , String varName ) { String fieldName = CaseFormat . LOWER_CAMEL . to ( CaseFormat . UPPER_UNDERSCORE , varName ) ; return FieldSpec . builder ( long . class , offsetName ( varName ) , Modifier . PROTECTED , Modifier . STATIC , Modifier . FINAL ) . initializer ( "$T.objectFieldOffset($T.class, $L.$L)" , UNSAFE_ACCESS , ClassName . bestGuess ( className ) , LOCAL_CACHE_FACTORY , fieldName ) . build ( ) ; }
Creates a public static field with an Unsafe address offset .
21,672
public < K , V > CacheProxy < K , V > tryToCreateFromExternalSettings ( String cacheName ) { Optional < CaffeineConfiguration < K , V > > configuration = TypesafeConfigurator . from ( rootConfig , cacheName ) ; return configuration . isPresent ( ) ? createCache ( cacheName , configuration . get ( ) ) : null ; }
Returns a newly created cache instance if a definition is found in the external settings file .
21,673
public < K , V > CacheProxy < K , V > createCache ( String cacheName , Configuration < K , V > configuration ) { CaffeineConfiguration < K , V > config = resolveConfigurationFor ( configuration ) ; return new Builder < > ( cacheName , config ) . build ( ) ; }
Returns a fully constructed cache based on the cache
21,674
@ SuppressWarnings ( "PMD.AccessorMethodGeneration" ) private < K , V > CaffeineConfiguration < K , V > resolveConfigurationFor ( Configuration < K , V > configuration ) { if ( configuration instanceof CaffeineConfiguration < ? , ? > ) { return new CaffeineConfiguration < > ( ( CaffeineConfiguration < K , V > ) configuration ) ; } CaffeineConfiguration < K , V > template = TypesafeConfigurator . defaults ( rootConfig ) ; if ( configuration instanceof CompleteConfiguration < ? , ? > ) { CompleteConfiguration < K , V > complete = ( CompleteConfiguration < K , V > ) configuration ; template . setReadThrough ( complete . isReadThrough ( ) ) ; template . setWriteThrough ( complete . isWriteThrough ( ) ) ; template . setManagementEnabled ( complete . isManagementEnabled ( ) ) ; template . setStatisticsEnabled ( complete . isStatisticsEnabled ( ) ) ; template . getCacheEntryListenerConfigurations ( ) . forEach ( template :: removeCacheEntryListenerConfiguration ) ; complete . getCacheEntryListenerConfigurations ( ) . forEach ( template :: addCacheEntryListenerConfiguration ) ; template . setCacheLoaderFactory ( complete . getCacheLoaderFactory ( ) ) ; template . setCacheWriterFactory ( complete . getCacheWriterFactory ( ) ) ; template . setExpiryPolicyFactory ( complete . getExpiryPolicyFactory ( ) ) ; } template . setTypes ( configuration . getKeyType ( ) , configuration . getValueType ( ) ) ; template . setStoreByValue ( configuration . isStoreByValue ( ) ) ; return template ; }
Copies the configuration and overlays it on top of the default settings .
21,675
private void broadcast ( ) { try ( LongStream events = eventStream ( ) ) { LongArrayList batch = new LongArrayList ( batchSize ) ; for ( PrimitiveIterator . OfLong i = events . iterator ( ) ; i . hasNext ( ) ; ) { batch . add ( i . nextLong ( ) ) ; if ( batch . size ( ) == batchSize ) { router . route ( batch , self ( ) ) ; batch = new LongArrayList ( batchSize ) ; } } router . route ( batch , self ( ) ) ; router . route ( FINISH , self ( ) ) ; } catch ( Exception e ) { context ( ) . system ( ) . log ( ) . error ( e , "" ) ; context ( ) . stop ( self ( ) ) ; } }
Broadcast the trace events to all of the policy actors .
21,676
private List < Routee > makeRoutes ( ) { return Registry . policies ( settings ) . stream ( ) . map ( policy -> { ActorRef actorRef = context ( ) . actorOf ( Props . create ( PolicyActor . class , policy ) ) ; context ( ) . watch ( actorRef ) ; return new ActorRefRoutee ( actorRef ) ; } ) . collect ( toList ( ) ) ; }
Returns the actors to broadcast trace events to .
21,677
private void reportStats ( PolicyStats stats ) throws IOException { reporter . add ( stats ) ; if ( -- remaining == 0 ) { reporter . print ( ) ; context ( ) . stop ( self ( ) ) ; System . out . println ( "Executed in " + stopwatch ) ; } }
Add the stats to the reporter print if completed and stop the simulator .
21,678
public static boolean isWifiConnected ( Context context ) { ConnectivityManager connectivityManager = ( ConnectivityManager ) context . getSystemService ( Context . CONNECTIVITY_SERVICE ) ; NetworkInfo networkInfo = connectivityManager . getActiveNetworkInfo ( ) ; return networkInfo != null && networkInfo . getType ( ) == ConnectivityManager . TYPE_WIFI ; }
This method requires the caller to hold the permission ACCESS_NETWORK_STATE .
21,679
public static Activity scanForActivity ( Context context ) { if ( context == null ) return null ; if ( context instanceof Activity ) { return ( Activity ) context ; } else if ( context instanceof ContextWrapper ) { return scanForActivity ( ( ( ContextWrapper ) context ) . getBaseContext ( ) ) ; } return null ; }
Get activity from context object
21,680
public static AppCompatActivity getAppCompActivity ( Context context ) { if ( context == null ) return null ; if ( context instanceof AppCompatActivity ) { return ( AppCompatActivity ) context ; } else if ( context instanceof ContextThemeWrapper ) { return getAppCompActivity ( ( ( ContextThemeWrapper ) context ) . getBaseContext ( ) ) ; } return null ; }
Get AppCompatActivity from context
21,681
public static void clearSavedProgress ( Context context , Object url ) { if ( url == null ) { SharedPreferences spn = context . getSharedPreferences ( "JZVD_PROGRESS" , Context . MODE_PRIVATE ) ; spn . edit ( ) . clear ( ) . apply ( ) ; } else { SharedPreferences spn = context . getSharedPreferences ( "JZVD_PROGRESS" , Context . MODE_PRIVATE ) ; spn . edit ( ) . putLong ( "newVersion:" + url . toString ( ) , 0 ) . apply ( ) ; } }
if url == null clear all progress
21,682
public static byte [ ] copyOf ( byte [ ] src , int length ) { byte [ ] dest = new byte [ length ] ; System . arraycopy ( src , 0 , dest , 0 , Math . min ( src . length , length ) ) ; return dest ; }
byte array copy .
21,683
private void logMeasureResult ( MeasureResult measureResult , long timeWindow , long leastWindowCount , double averageExceptionRate , double leastWindowExceptionRateMultiple ) { if ( measureResult == null ) { return ; } MeasureModel measureModel = measureResult . getMeasureModel ( ) ; String appName = measureModel . getAppName ( ) ; if ( ! LOGGER . isDebugEnabled ( appName ) ) { return ; } String service = measureModel . getService ( ) ; List < InvocationStat > stats = measureModel . getInvocationStats ( ) ; List < MeasureResultDetail > details = measureResult . getAllMeasureResultDetails ( ) ; StringBuilder info = new StringBuilder ( ) ; info . append ( "measure info: service[" + service + "];stats{" ) ; for ( InvocationStat stat : stats ) { info . append ( stat . getDimension ( ) . getIp ( ) ) ; info . append ( "," ) ; } if ( stats . size ( ) > 0 ) { info . deleteCharAt ( info . length ( ) - 1 ) ; } info . append ( "};details{" ) ; info . append ( "timeWindow[" + timeWindow + "];leastWindowCount[" + leastWindowCount + "];averageExceptionRate[" + averageExceptionRate + "];leastWindowExceptionRateMultiple[" + leastWindowExceptionRateMultiple + "];" ) ; info . append ( "detail[" ) ; for ( MeasureResultDetail detail : details ) { String ip = detail . getInvocationStatDimension ( ) . getIp ( ) ; double abnormalRate = detail . getAbnormalRate ( ) ; long invocationLeastWindowCount = detail . getLeastWindowCount ( ) ; String measureState = detail . getMeasureState ( ) . name ( ) ; info . append ( "(ip:" + ip + ",abnormalRate:" + abnormalRate + ",invocationLeastWindowCount:" + invocationLeastWindowCount + ",measureState:" + measureState + ")" ) ; } info . append ( "]" ) ; LOGGER . debugWithApp ( appName , info . toString ( ) ) ; }
Print the measurement result details for each time window .
21,684
public static RpcReferenceContext lastReferenceContext ( boolean clear ) { try { RpcInvokeContext invokeCtx = RpcInvokeContext . getContext ( ) ; RpcReferenceContext referenceCtx = ( RpcReferenceContext ) invokeCtx . get ( RemotingConstants . INVOKE_CTX_RPC_REF_CTX ) ; if ( referenceCtx != null ) { String resultCode = ( String ) invokeCtx . get ( RemotingConstants . INVOKE_CTX_RPC_RESULT_CODE ) ; if ( resultCode != null ) { referenceCtx . setResultCode ( ResultCodeEnum . getResultCode ( resultCode ) ) ; } } return referenceCtx ; } finally { if ( clear ) { clearReferenceContext ( ) ; } } }
get the last reference invoke information
21,685
public static RpcServiceContext currentServiceContext ( boolean clear ) { try { RpcInvokeContext invokeCtx = RpcInvokeContext . getContext ( ) ; return ( RpcServiceContext ) invokeCtx . get ( RemotingConstants . INVOKE_CTX_RPC_SER_CTX ) ; } finally { if ( clear ) { clearServiceContext ( ) ; } } }
get current service context
21,686
public static byte getSerializeTypeByContentType ( String contentType ) throws SofaRpcException { if ( StringUtils . isNotBlank ( contentType ) ) { String ct = contentType . toLowerCase ( ) ; if ( ct . contains ( "text/plain" ) || ct . contains ( "text/html" ) || ct . contains ( "application/json" ) ) { return getSerializeTypeByName ( RpcConstants . SERIALIZE_JSON ) ; } else if ( ct . contains ( RpcConstants . SERIALIZE_PROTOBUF ) ) { return getSerializeTypeByName ( RpcConstants . SERIALIZE_PROTOBUF ) ; } else if ( ct . contains ( RpcConstants . SERIALIZE_HESSIAN ) ) { return getSerializeTypeByName ( RpcConstants . SERIALIZE_HESSIAN2 ) ; } } throw new SofaRpcException ( RpcErrorType . SERVER_DESERIALIZE , "Unsupported content type " + contentType + " in http protocol, please set HTTP HEAD: '" + RemotingConstants . HEAD_SERIALIZE_TYPE + "'." ) ; }
Parse serialize type from content type
21,687
public void addConfigListener ( AbstractInterfaceConfig config , ConfigListener listener ) { if ( listener != null ) { RegistryUtils . initOrAddList ( configListenerMap , config , listener ) ; } }
Add config listener .
21,688
static List < ProviderInfo > convertUrlsToProviders ( String providerPath , List < ChildData > currentData ) throws UnsupportedEncodingException { List < ProviderInfo > providerInfos = new ArrayList < ProviderInfo > ( ) ; if ( CommonUtils . isEmpty ( currentData ) ) { return providerInfos ; } for ( ChildData childData : currentData ) { providerInfos . add ( convertUrlToProvider ( providerPath , childData ) ) ; } return providerInfos ; }
Convert url to provider list .
21,689
public boolean checkService ( String serviceName , String methodName ) { Invoker invoker = invokerMap . get ( serviceName ) ; return invoker instanceof ProviderProxyInvoker && getMethod ( serviceName , methodName ) != null ; }
Check service exists
21,690
public synchronized T refer ( ) { if ( proxyIns != null ) { return proxyIns ; } referenceConfig = new ReferenceConfig < T > ( ) ; covert ( consumerConfig , referenceConfig ) ; proxyIns = referenceConfig . get ( ) ; return proxyIns ; }
Refer t .
21,691
static boolean needLoad ( String moduleLoadList , String moduleName ) { String [ ] activatedModules = StringUtils . splitWithCommaOrSemicolon ( moduleLoadList ) ; boolean match = false ; for ( String activatedModule : activatedModules ) { if ( StringUtils . ALL . equals ( activatedModule ) ) { match = true ; } else if ( activatedModule . equals ( moduleName ) ) { match = true ; } else if ( match && ( activatedModule . equals ( "!" + moduleName ) || activatedModule . equals ( "-" + moduleName ) ) ) { match = false ; break ; } } return match ; }
parse module load config
21,692
public static void putAppConfig ( String appName , FaultToleranceConfig value ) { if ( appName == null ) { if ( LOGGER . isWarnEnabled ( ) ) { LOGGER . warn ( "App name is null when put fault-tolerance config" ) ; } return ; } if ( value != null ) { APP_CONFIGS . put ( appName , value ) ; if ( LOGGER . isInfoEnabled ( appName ) ) { LOGGER . infoWithApp ( appName , "Get a new resource, value[" + value + "]" ) ; } } else { APP_CONFIGS . remove ( appName ) ; if ( LOGGER . isInfoEnabled ( appName ) ) { LOGGER . infoWithApp ( appName , "Remove a resource, key[" + appName + "]" ) ; } } calcEnable ( ) ; }
Put app config .
21,693
public static FaultToleranceConfig getConfig ( String appName ) { if ( appName == null ) { return DEFAULT_CFG ; } else { FaultToleranceConfig config = APP_CONFIGS . get ( appName ) ; return config == null ? DEFAULT_CFG : config ; } }
Get config if absent else return default
21,694
private boolean valueEquals ( V leftValue , V rightValue ) { if ( leftValue == rightValue ) { return true ; } if ( leftValue == null || rightValue == null ) { return false ; } return leftValue . equals ( rightValue ) ; }
Value equals .
21,695
public static void removeInvocationStat ( InvocationStatDimension statDimension ) { InvocationStat invocationStat = ALL_STATS . remove ( statDimension ) ; if ( invocationStat != null ) { for ( InvocationStatListener listener : LISTENERS ) { listener . onRemoveInvocationStat ( invocationStat ) ; } } }
Remove dimension stat by dimension
21,696
public static boolean checkModified ( String address , String lastDigest ) { String newDigest = calMD5Checksum ( address ) ; return ! StringUtils . equals ( newDigest , lastDigest ) ; }
Check file s digest .
21,697
public void setKey ( String key ) { if ( ! isValidParamKey ( key ) ) { throw ExceptionUtils . buildRuntime ( "param.key" , key , "key can not start with " + RpcConstants . HIDE_KEY_PREFIX + " and " + RpcConstants . INTERNAL_KEY_PREFIX ) ; } this . key = key ; }
Sets key .
21,698
public static String convertConsumerToUrl ( ConsumerConfig consumerConfig ) { StringBuilder sb = new StringBuilder ( 200 ) ; String host = SystemInfo . getLocalHost ( ) ; sb . append ( consumerConfig . getProtocol ( ) ) . append ( "://" ) . append ( host ) . append ( "?version=1.0" ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_UNIQUEID , consumerConfig . getUniqueId ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_PID , RpcRuntimeContext . PID ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_TIMEOUT , consumerConfig . getTimeout ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_ID , consumerConfig . getId ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_GENERIC , consumerConfig . isGeneric ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_INTERFACE , consumerConfig . getInterfaceId ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_APP_NAME , consumerConfig . getAppName ( ) ) ) . append ( getKeyPairs ( RpcConstants . CONFIG_KEY_SERIALIZATION , consumerConfig . getSerialization ( ) ) ) . append ( getKeyPairs ( ProviderInfoAttrs . ATTR_START_TIME , RpcRuntimeContext . now ( ) ) ) . append ( convertMap2Pair ( consumerConfig . getParameters ( ) ) ) ; addCommonAttrs ( sb ) ; return sb . toString ( ) ; }
Convert consumer to url .
21,699
public static void processWarmUpWeight ( ProviderInfo providerInfo ) { String warmupTimeStr = providerInfo . getStaticAttr ( ProviderInfoAttrs . ATTR_WARMUP_TIME ) ; String warmupWeightStr = providerInfo . getStaticAttr ( ProviderInfoAttrs . ATTR_WARMUP_WEIGHT ) ; String startTimeStr = providerInfo . getStaticAttr ( ProviderInfoAttrs . ATTR_START_TIME ) ; if ( StringUtils . isNotBlank ( warmupTimeStr ) && StringUtils . isNotBlank ( warmupWeightStr ) && StringUtils . isNotBlank ( startTimeStr ) ) { long warmupTime = CommonUtils . parseLong ( warmupTimeStr , 0 ) ; int warmupWeight = CommonUtils . parseInt ( warmupWeightStr , Integer . parseInt ( providerInfo . getStaticAttr ( ProviderInfoAttrs . ATTR_WEIGHT ) ) ) ; long startTime = CommonUtils . parseLong ( startTimeStr , 0 ) ; long warmupEndTime = startTime + warmupTime ; providerInfo . setDynamicAttr ( ProviderInfoAttrs . ATTR_WARMUP_WEIGHT , warmupWeight ) ; providerInfo . setDynamicAttr ( ProviderInfoAttrs . ATTR_WARM_UP_END_TIME , warmupEndTime ) ; providerInfo . setStatus ( ProviderStatus . WARMING_UP ) ; } providerInfo . getStaticAttrs ( ) . remove ( ProviderInfoAttrs . ATTR_WARMUP_TIME ) ; providerInfo . getStaticAttrs ( ) . remove ( ProviderInfoAttrs . ATTR_WARMUP_WEIGHT ) ; }
Read the warmUp weight parameter decide whether to switch the state to the preheating period and set the corresponding parameters during the preheating period .