idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
8,400 | private static void getLocalAndInheritedMethods ( PackageElement pkg , TypeElement type , SetMultimap < String , ExecutableElement > methods ) { for ( TypeMirror superInterface : type . getInterfaces ( ) ) { getLocalAndInheritedMethods ( pkg , MoreTypes . asTypeElement ( superInterface ) , methods ) ; } if ( type . getSuperclass ( ) . getKind ( ) != TypeKind . NONE ) { getLocalAndInheritedMethods ( pkg , MoreTypes . asTypeElement ( type . getSuperclass ( ) ) , methods ) ; } for ( ExecutableElement method : ElementFilter . methodsIn ( type . getEnclosedElements ( ) ) ) { if ( ! method . getModifiers ( ) . contains ( Modifier . STATIC ) && methodVisibleFromPackage ( method , pkg ) ) { methods . put ( method . getSimpleName ( ) . toString ( ) , method ) ; } } } | always precede those in descendant types . |
8,401 | private void reportError ( Element e , String msg , Object ... msgParams ) { String formattedMessage = String . format ( msg , msgParams ) ; processingEnv . getMessager ( ) . printMessage ( Diagnostic . Kind . ERROR , formattedMessage , e ) ; } | Issue a compilation error . This method does not throw an exception since we want to continue processing and perhaps report other errors . |
8,402 | private AbortProcessingException abortWithError ( String msg , Element e ) { reportError ( e , msg ) ; return new AbortProcessingException ( ) ; } | Issue a compilation error and return an exception that when thrown will cause the processing of this class to be abandoned . This does not prevent the processing of other classes . |
8,403 | private static Optional < Integer > invariableHash ( AnnotationValue annotationValue ) { Object value = annotationValue . getValue ( ) ; if ( value instanceof String || Primitives . isWrapperType ( value . getClass ( ) ) ) { return Optional . of ( value . hashCode ( ) ) ; } else if ( value instanceof List < ? > ) { @ SuppressWarnings ( "unchecked" ) List < ? extends AnnotationValue > list = ( List < ? extends AnnotationValue > ) value ; return invariableHash ( list ) ; } else { return Optional . empty ( ) ; } } | Returns the hashCode of the given AnnotationValue if that hashCode is guaranteed to be always the same . The hashCode of a String or primitive type never changes . The hashCode of a Class or an enum constant does potentially change in different runs of the same program . The hashCode of an array doesn t change if the hashCodes of its elements don t . Although we could have a similar rule for nested annotation values we currently don t . |
8,404 | private static ImmutableMap < String , Integer > invariableHashes ( ImmutableMap < String , Member > members , ImmutableSet < String > parameters ) { ImmutableMap . Builder < String , Integer > builder = ImmutableMap . builder ( ) ; for ( String element : members . keySet ( ) ) { if ( ! parameters . contains ( element ) ) { Member member = members . get ( element ) ; AnnotationValue annotationValue = member . method . getDefaultValue ( ) ; Optional < Integer > invariableHash = invariableHash ( annotationValue ) ; if ( invariableHash . isPresent ( ) ) { builder . put ( element , ( element . hashCode ( ) * 127 ) ^ invariableHash . get ( ) ) ; } } } return builder . build ( ) ; } | Returns a map from the names of members with invariable hashCodes to the values of those hashCodes . |
8,405 | private static DelimiterBasedFrameDecoder wordDecoder ( ) { return new DelimiterBasedFrameDecoder ( 256 , IntStream . of ( '\r' , '\n' , ' ' , '\t' , '.' , ',' , ';' , ':' , '-' ) . mapToObj ( delim -> wrappedBuffer ( new byte [ ] { ( byte ) delim } ) ) . toArray ( ByteBuf [ ] :: new ) ) ; } | skip things that aren t words roughly |
8,406 | public static < K , V , C extends IMap < K , V > > C monitor ( MeterRegistry registry , C cache , Iterable < Tag > tags ) { new HazelcastCacheMetrics ( cache , tags ) . bindTo ( registry ) ; return cache ; } | Record metrics on a Hazelcast cache . |
8,407 | public double totalTime ( TimeUnit unit ) { T obj2 = ref . get ( ) ; if ( obj2 != null ) { double prevLast = lastTime ; lastTime = Math . max ( TimeUtils . convert ( totalTimeFunction . applyAsDouble ( obj2 ) , totalTimeFunctionUnit , baseTimeUnit ( ) ) , 0 ) ; total . getCurrent ( ) . add ( lastTime - prevLast ) ; } return TimeUtils . convert ( total . poll ( ) , baseTimeUnit ( ) , unit ) ; } | The total time of all occurrences of the timed event . |
8,408 | private static void appendIndentedNewLine ( int indentLevel , StringBuilder stringBuilder ) { stringBuilder . append ( "\n" ) ; for ( int i = 0 ; i < indentLevel ; i ++ ) { stringBuilder . append ( " " ) ; } } | Print a new line with indention at the beginning of the new line . |
8,409 | Meter register ( Meter . Id id , Meter . Type type , Iterable < Measurement > measurements ) { return registerMeterIfNecessary ( Meter . class , id , id2 -> newMeter ( id2 , type , measurements ) , NoopMeter :: new ) ; } | Register a custom meter type . |
8,410 | public < T > T gauge ( String name , T obj , ToDoubleFunction < T > valueFunction ) { return gauge ( name , emptyList ( ) , obj , valueFunction ) ; } | Register a gauge that reports the value of the object . |
8,411 | public void close ( ) { if ( closed . compareAndSet ( false , true ) ) { synchronized ( meterMapLock ) { for ( Meter meter : meterMap . values ( ) ) { meter . close ( ) ; } } } } | Closes this registry releasing any resources in the process . Once closed this registry will no longer accept new meters and any publishing activity will cease . |
8,412 | public final void remove ( MeterRegistry registry ) { for ( ; ; ) { if ( childrenGuard . compareAndSet ( false , true ) ) { try { Map < MeterRegistry , T > newChildren = new IdentityHashMap < > ( children ) ; newChildren . remove ( registry ) ; this . children = newChildren ; break ; } finally { childrenGuard . set ( false ) ; } } } } | Does nothing . New registries added to the composite are automatically reflected in each meter belonging to the composite . |
8,413 | public void record ( double sample , TimeUnit timeUnit ) { rotate ( ) ; final long sampleNanos = ( long ) TimeUtils . convert ( sample , timeUnit , TimeUnit . NANOSECONDS ) ; for ( AtomicLong max : ringBuffer ) { updateMax ( max , sampleNanos ) ; } } | For use by timer implementations . |
8,414 | public void record ( double sample ) { rotate ( ) ; long sampleLong = Double . doubleToLongBits ( sample ) ; for ( AtomicLong max : ringBuffer ) { updateMax ( max , sampleLong ) ; } } | For use by distribution summary implementations . |
8,415 | public static NavigableSet < Long > buckets ( DistributionStatisticConfig distributionStatisticConfig ) { return PERCENTILE_BUCKETS . subSet ( distributionStatisticConfig . getMinimumExpectedValue ( ) , true , distributionStatisticConfig . getMaximumExpectedValue ( ) , true ) ; } | Pick values from a static set of percentile buckets that yields a decent error bound on most real world timers and distribution summaries because monitoring systems like Prometheus require us to report the same buckets at every interval regardless of where actual samples have been observed . |
8,416 | public RequiredSearch name ( String exactName ) { this . nameMatches = n -> n . equals ( exactName ) ; this . exactNameMatch = exactName ; return this ; } | Meter contains a tag with the exact name . |
8,417 | public static Ehcache monitor ( MeterRegistry registry , Ehcache cache , Iterable < Tag > tags ) { new EhCache2Metrics ( cache , tags ) . bindTo ( registry ) ; return cache ; } | Record metrics on an EhCache cache . |
8,418 | public DistributionStatisticConfig merge ( DistributionStatisticConfig parent ) { return DistributionStatisticConfig . builder ( ) . percentilesHistogram ( this . percentileHistogram == null ? parent . percentileHistogram : this . percentileHistogram ) . percentiles ( this . percentiles == null ? parent . percentiles : this . percentiles ) . sla ( this . sla == null ? parent . sla : this . sla ) . percentilePrecision ( this . percentilePrecision == null ? parent . percentilePrecision : this . percentilePrecision ) . minimumExpectedValue ( this . minimumExpectedValue == null ? parent . minimumExpectedValue : this . minimumExpectedValue ) . maximumExpectedValue ( this . maximumExpectedValue == null ? parent . maximumExpectedValue : this . maximumExpectedValue ) . expiry ( this . expiry == null ? parent . expiry : this . expiry ) . bufferLength ( this . bufferLength == null ? parent . bufferLength : this . bufferLength ) . build ( ) ; } | Merges two configurations . Any options that are non - null in this configuration take precedence . Any options that are non - null in the parent are used otherwise . |
8,419 | Double resettableFunctionalCounter ( String functionalCounterKey , DoubleSupplier function ) { Double result = function . getAsDouble ( ) ; Double previousResult = previousValueCacheMap . getOrDefault ( functionalCounterKey , 0D ) ; Double beforeResetValue = beforeResetValuesCacheMap . getOrDefault ( functionalCounterKey , 0D ) ; Double correctedValue = result + beforeResetValue ; if ( correctedValue < previousResult ) { beforeResetValuesCacheMap . put ( functionalCounterKey , previousResult ) ; correctedValue = previousResult + result ; } previousValueCacheMap . put ( functionalCounterKey , correctedValue ) ; return correctedValue ; } | Function that makes sure functional counter values survive pg_stat_reset calls . |
8,420 | public double max ( TimeUnit unit ) { return TimeUtils . convert ( max . poll ( ) , TimeUnit . MILLISECONDS , unit ) ; } | The StatsD agent will likely compute max with a different window so the value may not match what you see here . This value is not exported to the agent and is only for diagnostic use . |
8,421 | public Stream < Tag > stream ( ) { return StreamSupport . stream ( Spliterators . spliteratorUnknownSize ( iterator ( ) , Spliterator . ORDERED | Spliterator . DISTINCT | Spliterator . NONNULL | Spliterator . SORTED ) , false ) ; } | Return a stream of the contained tags . |
8,422 | public String tagValue ( String value ) { String formattedValue = StringEscapeUtils . escapeJson ( delegate . tagValue ( value ) ) ; return StringUtils . truncate ( formattedValue , TAG_VALUE_MAX_LENGTH ) ; } | Dimension value can be any non - empty UTF - 8 string with a maximum length < = 256 characters . |
8,423 | public static String truncate ( String string , int maxLength ) { if ( string . length ( ) > maxLength ) { return string . substring ( 0 , maxLength ) ; } return string ; } | Truncate the String to the max length . |
8,424 | private void registerNotificationListener ( String type , BiConsumer < ObjectName , Tags > perObject ) { NotificationListener notificationListener = ( notification , handback ) -> { MBeanServerNotification mbs = ( MBeanServerNotification ) notification ; ObjectName o = mbs . getMBeanName ( ) ; perObject . accept ( o , Tags . concat ( tags , nameTag ( o ) ) ) ; } ; NotificationFilter filter = ( NotificationFilter ) notification -> { if ( ! MBeanServerNotification . REGISTRATION_NOTIFICATION . equals ( notification . getType ( ) ) ) return false ; ObjectName obj = ( ( MBeanServerNotification ) notification ) . getMBeanName ( ) ; return obj . getDomain ( ) . equals ( JMX_DOMAIN ) && obj . getKeyProperty ( "type" ) . equals ( type ) ; } ; try { mBeanServer . addNotificationListener ( MBeanServerDelegate . DELEGATE_NAME , notificationListener , filter , null ) ; notificationListenerCleanUpRunnables . add ( ( ) -> { try { mBeanServer . removeNotificationListener ( MBeanServerDelegate . DELEGATE_NAME , notificationListener ) ; } catch ( InstanceNotFoundException | ListenerNotFoundException ignored ) { } } ) ; } catch ( InstanceNotFoundException e ) { throw new RuntimeException ( "Error registering Kafka MBean listener" , e ) ; } } | This notification listener should remain indefinitely since new Kafka consumers can be added at any time . |
8,425 | public Long getValue ( Meter . Type meterType ) { if ( meterType == Meter . Type . DISTRIBUTION_SUMMARY ) { return getDistributionSummaryValue ( ) ; } if ( meterType == Meter . Type . TIMER ) { return getTimerValue ( ) ; } return null ; } | Return the underlying value of the SLA in form suitable to apply to the given meter type . |
8,426 | public static HistogramGauges registerWithCommonFormat ( Timer timer , MeterRegistry registry ) { Meter . Id id = timer . getId ( ) ; return HistogramGauges . register ( timer , registry , percentile -> id . getName ( ) + ".percentile" , percentile -> Tags . concat ( id . getTagsAsIterable ( ) , "phi" , DoubleFormat . decimalOrNan ( percentile . percentile ( ) ) ) , percentile -> percentile . value ( timer . baseTimeUnit ( ) ) , bucket -> id . getName ( ) + ".histogram" , bucket -> Tags . concat ( id . getTagsAsIterable ( ) , "le" , bucket . bucket ( ) != Long . MAX_VALUE ? DoubleFormat . decimalOrWhole ( bucket . bucket ( timer . baseTimeUnit ( ) ) ) : "+Inf" ) ) ; } | Register a set of gauges for percentiles and histogram buckets that follow a common format when the monitoring system doesn t have an opinion about the structure of this data . |
8,427 | private void postMetricMetadata ( String metricName , DatadogMetricMetadata metadata ) { if ( verifiedMetadata . contains ( metricName ) ) return ; try { httpClient . put ( config . uri ( ) + "/api/v1/metrics/" + URLEncoder . encode ( metricName , "UTF-8" ) + "?api_key=" + config . apiKey ( ) + "&application_key=" + config . applicationKey ( ) ) . withJsonContent ( metadata . editMetadataBody ( ) ) . send ( ) . onSuccess ( response -> verifiedMetadata . add ( metricName ) ) . onError ( response -> { if ( logger . isErrorEnabled ( ) ) { String msg = response . body ( ) ; if ( ! msg . contains ( "metric_name not found" ) ) { logger . error ( "failed to send metric metadata to datadog: {}" , msg ) ; } } } ) ; } catch ( Throwable e ) { logger . warn ( "failed to send metric metadata to datadog" , e ) ; } } | Set up metric metadata once per time series |
8,428 | public static ScanCursor of ( String cursor ) { ScanCursor scanCursor = new ScanCursor ( ) ; scanCursor . setCursor ( cursor ) ; return scanCursor ; } | Creates a Scan - Cursor reference . |
8,429 | public static < T > T awaitOrCancel ( RedisFuture < T > cmd , long timeout , TimeUnit unit ) { try { if ( ! cmd . await ( timeout , unit ) ) { cmd . cancel ( true ) ; throw ExceptionFactory . createTimeoutException ( Duration . ofNanos ( unit . toNanos ( timeout ) ) ) ; } return cmd . get ( ) ; } catch ( RuntimeException e ) { throw e ; } catch ( ExecutionException e ) { if ( e . getCause ( ) instanceof RedisCommandExecutionException ) { throw ExceptionFactory . createExecutionException ( e . getCause ( ) . getMessage ( ) , e . getCause ( ) ) ; } if ( e . getCause ( ) instanceof RedisCommandTimeoutException ) { throw new RedisCommandTimeoutException ( e . getCause ( ) ) ; } throw new RedisException ( e . getCause ( ) ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new RedisCommandInterruptedException ( e ) ; } catch ( Exception e ) { throw ExceptionFactory . createExecutionException ( null , e ) ; } } | Wait until futures are complete or the supplied timeout is reached . Commands are canceled if the timeout is reached but the command is not finished . |
8,430 | public void registerCloseables ( final Collection < Closeable > registry , Closeable ... closeables ) { registry . addAll ( Arrays . asList ( closeables ) ) ; addListener ( resource -> { for ( Closeable closeable : closeables ) { if ( closeable == RedisChannelHandler . this ) { continue ; } try { if ( closeable instanceof AsyncCloseable ) { ( ( AsyncCloseable ) closeable ) . closeAsync ( ) ; } else { closeable . close ( ) ; } } catch ( IOException e ) { if ( debugEnabled ) { logger . debug ( e . toString ( ) , e ) ; } } } registry . removeAll ( Arrays . asList ( closeables ) ) ; } ) ; } | Register Closeable resources . Internal access only . |
8,431 | protected List < RedisFuture < Void > > resubscribe ( ) { List < RedisFuture < Void > > result = new ArrayList < > ( ) ; if ( endpoint . hasChannelSubscriptions ( ) ) { result . add ( async ( ) . subscribe ( toArray ( endpoint . getChannels ( ) ) ) ) ; } if ( endpoint . hasPatternSubscriptions ( ) ) { result . add ( async ( ) . psubscribe ( toArray ( endpoint . getPatterns ( ) ) ) ) ; } return result ; } | Re - subscribe to all previously subscribed channels and patterns . |
8,432 | private static Map < TypeVariable < ? > , Type > mergeMaps ( TypeDiscoverer < ? > parent , Map < TypeVariable < ? > , Type > map ) { Map < TypeVariable < ? > , Type > typeVariableMap = new HashMap < TypeVariable < ? > , Type > ( ) ; typeVariableMap . putAll ( map ) ; typeVariableMap . putAll ( parent . getTypeVariableMap ( ) ) ; return typeVariableMap ; } | Merges the type variable maps of the given parent with the new map . |
8,433 | static ScanCursor getContinuationCursor ( ScanCursor scanCursor ) { if ( ScanCursor . INITIAL . equals ( scanCursor ) ) { return scanCursor ; } assertClusterScanCursor ( scanCursor ) ; ClusterScanCursor clusterScanCursor = ( ClusterScanCursor ) scanCursor ; if ( clusterScanCursor . isScanOnCurrentNodeFinished ( ) ) { return ScanCursor . INITIAL ; } return scanCursor ; } | Retrieve the cursor to continue the scan . |
8,434 | private static List < String > getNodeIds ( StatefulRedisClusterConnection < ? , ? > connection ) { List < String > nodeIds = new ArrayList < > ( ) ; PartitionAccessor partitionAccessor = new PartitionAccessor ( connection . getPartitions ( ) ) ; for ( RedisClusterNode redisClusterNode : partitionAccessor . getMasters ( ) ) { if ( connection . getReadFrom ( ) != null ) { List < RedisNodeDescription > readCandidates = ( List ) partitionAccessor . getReadCandidates ( redisClusterNode ) ; List < RedisNodeDescription > selection = connection . getReadFrom ( ) . select ( new ReadFrom . Nodes ( ) { public List < RedisNodeDescription > getNodes ( ) { return readCandidates ; } public Iterator < RedisNodeDescription > iterator ( ) { return readCandidates . iterator ( ) ; } } ) ; if ( ! selection . isEmpty ( ) ) { RedisClusterNode selectedNode = ( RedisClusterNode ) selection . get ( 0 ) ; nodeIds . add ( selectedNode . getNodeId ( ) ) ; continue ; } } nodeIds . add ( redisClusterNode . getNodeId ( ) ) ; } return nodeIds ; } | Retrieve a list of node Ids to use for the SCAN operation . |
8,435 | private static String [ ] getHostAndPortFromBracketedHost ( String hostPortString ) { LettuceAssert . isTrue ( hostPortString . charAt ( 0 ) == '[' , String . format ( "Bracketed host-port string must start with a bracket: %s" , hostPortString ) ) ; int colonIndex = hostPortString . indexOf ( ':' ) ; int closeBracketIndex = hostPortString . lastIndexOf ( ']' ) ; LettuceAssert . isTrue ( colonIndex > - 1 && closeBracketIndex > colonIndex , String . format ( "Invalid bracketed host/port: %s" , hostPortString ) ) ; String host = hostPortString . substring ( 1 , closeBracketIndex ) ; if ( closeBracketIndex + 1 == hostPortString . length ( ) ) { return new String [ ] { host , "" } ; } else { LettuceAssert . isTrue ( hostPortString . charAt ( closeBracketIndex + 1 ) == ':' , "Only a colon may follow a close bracket: " + hostPortString ) ; for ( int i = closeBracketIndex + 2 ; i < hostPortString . length ( ) ; ++ i ) { LettuceAssert . isTrue ( Character . isDigit ( hostPortString . charAt ( i ) ) , String . format ( "Port must be numeric: %s" , hostPortString ) ) ; } return new String [ ] { host , hostPortString . substring ( closeBracketIndex + 2 ) } ; } } | Parses a bracketed host - port string throwing IllegalArgumentException if parsing fails . |
8,436 | public < X extends Throwable > V getValueOrElseThrow ( Supplier < ? extends X > exceptionSupplier ) throws X { LettuceAssert . notNull ( exceptionSupplier , "Supplier function must not be null" ) ; if ( hasValue ( ) ) { return value ; } throw exceptionSupplier . get ( ) ; } | Return the contained value if present otherwise throw an exception to be created by the provided supplier . |
8,437 | static boolean isChanged ( Collection < RedisNodeDescription > o1 , Collection < RedisNodeDescription > o2 ) { if ( o1 . size ( ) != o2 . size ( ) ) { return true ; } for ( RedisNodeDescription base : o2 ) { if ( ! essentiallyEqualsTo ( base , findNodeByUri ( o1 , base . getUri ( ) ) ) ) { return true ; } } return false ; } | Check if properties changed . |
8,438 | private List < ConnectionFuture < StatefulRedisPubSubConnection < String , String > > > potentiallyConnectSentinels ( ) { List < ConnectionFuture < StatefulRedisPubSubConnection < String , String > > > connectionFutures = new ArrayList < > ( ) ; for ( RedisURI sentinel : sentinels ) { if ( pubSubConnections . containsKey ( sentinel ) ) { continue ; } ConnectionFuture < StatefulRedisPubSubConnection < String , String > > future = redisClient . connectPubSubAsync ( CODEC , sentinel ) ; pubSubConnections . put ( sentinel , future ) ; future . whenComplete ( ( connection , throwable ) -> { if ( throwable != null || closed ) { pubSubConnections . remove ( sentinel ) ; } if ( closed ) { connection . closeAsync ( ) ; } } ) ; connectionFutures . add ( future ) ; } return connectionFutures ; } | Inspect whether additional Sentinel connections are required based on the which Sentinels are currently connected . |
8,439 | public static Collection < Class < ? > > getNoValueTypes ( ) { return REACTIVE_WRAPPERS . entrySet ( ) . stream ( ) . filter ( entry -> entry . getValue ( ) . isNoValue ( ) ) . map ( Entry :: getKey ) . collect ( Collectors . toList ( ) ) ; } | Returns a collection of No - Value wrapper types . |
8,440 | public static Collection < Class < ? > > getSingleValueTypes ( ) { return REACTIVE_WRAPPERS . entrySet ( ) . stream ( ) . filter ( entry -> ! entry . getValue ( ) . isMultiValue ( ) ) . map ( Entry :: getKey ) . collect ( Collectors . toList ( ) ) ; } | Returns a collection of Single - Value wrapper types . |
8,441 | @ SuppressWarnings ( "unchecked" ) public static < K , V , T > RedisCommand < K , V , T > unwrap ( RedisCommand < K , V , T > wrapped ) { RedisCommand < K , V , T > result = wrapped ; while ( result instanceof DecoratedCommand < ? , ? , ? > ) { result = ( ( DecoratedCommand < K , V , T > ) result ) . getDelegate ( ) ; } return result ; } | Unwrap a wrapped command . |
8,442 | @ SuppressWarnings ( "unchecked" ) public static < T extends Type > T unwrap ( T type ) { Type unwrapped = type ; while ( unwrapped instanceof SerializableTypeProxy ) { unwrapped = ( ( SerializableTypeProxy ) type ) . getTypeProvider ( ) . getType ( ) ; } return ( T ) unwrapped ; } | Unwrap the given type effectively returning the original non - serializable type . |
8,443 | public static RedisURI create ( String host , int port ) { return Builder . redis ( host , port ) . build ( ) ; } | Create a Redis URI from host and port . |
8,444 | public static RedisURI create ( String uri ) { LettuceAssert . notEmpty ( uri , "URI must not be empty" ) ; return create ( URI . create ( uri ) ) ; } | Create a Redis URI from an URI string . |
8,445 | public void setPassword ( CharSequence password ) { LettuceAssert . notNull ( password , "Password must not be null" ) ; this . password = password . toString ( ) . toCharArray ( ) ; } | Sets the password . Use empty string to skip authentication . |
8,446 | public void setPassword ( char [ ] password ) { LettuceAssert . notNull ( password , "Password must not be null" ) ; this . password = Arrays . copyOf ( password , password . length ) ; } | Sets the password . Use empty char array to skip authentication . |
8,447 | public void setTimeout ( Duration timeout ) { LettuceAssert . notNull ( timeout , "Timeout must not be null" ) ; LettuceAssert . isTrue ( ! timeout . isNegative ( ) , "Timeout must be greater or equal 0" ) ; this . timeout = timeout ; } | Sets the command timeout for synchronous command execution . |
8,448 | public URI toURI ( ) { String scheme = getScheme ( ) ; String authority = getAuthority ( scheme ) ; String queryString = getQueryString ( ) ; String uri = scheme + "://" + authority ; if ( ! queryString . isEmpty ( ) ) { uri += "?" + queryString ; } return URI . create ( uri ) ; } | Creates an URI based on the RedisURI . |
8,449 | private boolean isUnresolvableTypeVariable ( ) { if ( this . type instanceof TypeVariable ) { if ( this . variableResolver == null ) { return true ; } TypeVariable < ? > variable = ( TypeVariable < ? > ) this . type ; ResolvableType resolved = this . variableResolver . resolveVariable ( variable ) ; if ( resolved == null || resolved . isUnresolvableTypeVariable ( ) ) { return true ; } } return false ; } | Determine whether the underlying type is a type variable that cannot be resolved through the associated variable resolver . |
8,450 | private < K , V > CompletableFuture < StatefulRedisClusterPubSubConnection < K , V > > connectClusterPubSubAsync ( RedisCodec < K , V > codec ) { if ( partitions == null ) { return Futures . failed ( new IllegalStateException ( "Partitions not initialized. Initialize via RedisClusterClient.getPartitions()." ) ) ; } activateTopologyRefreshIfNeeded ( ) ; logger . debug ( "connectClusterPubSub(" + initialUris + ")" ) ; Mono < SocketAddress > socketAddressSupplier = getSocketAddressSupplier ( TopologyComparators :: sortByClientCount ) ; PubSubClusterEndpoint < K , V > endpoint = new PubSubClusterEndpoint < K , V > ( clientOptions , clientResources ) ; RedisChannelWriter writer = endpoint ; if ( CommandExpiryWriter . isSupported ( clientOptions ) ) { writer = new CommandExpiryWriter ( writer , clientOptions , clientResources ) ; } ClusterDistributionChannelWriter clusterWriter = new ClusterDistributionChannelWriter ( clientOptions , writer , clusterTopologyRefreshScheduler ) ; StatefulRedisClusterPubSubConnectionImpl < K , V > connection = new StatefulRedisClusterPubSubConnectionImpl < > ( endpoint , clusterWriter , codec , timeout ) ; ClusterPubSubConnectionProvider < K , V > pooledClusterConnectionProvider = new ClusterPubSubConnectionProvider < > ( this , clusterWriter , codec , connection . getUpstreamListener ( ) , clusterTopologyRefreshScheduler ) ; clusterWriter . setClusterConnectionProvider ( pooledClusterConnectionProvider ) ; connection . setPartitions ( partitions ) ; Supplier < CommandHandler > commandHandlerSupplier = ( ) -> new PubSubCommandHandler < > ( clientOptions , clientResources , codec , endpoint ) ; Mono < StatefulRedisClusterPubSubConnectionImpl < K , V > > connectionMono = Mono . defer ( ( ) -> connect ( socketAddressSupplier , codec , endpoint , connection , commandHandlerSupplier ) ) ; for ( int i = 1 ; i < getConnectionAttempts ( ) ; i ++ ) { connectionMono = connectionMono . onErrorResume ( t -> connect ( socketAddressSupplier , codec , endpoint , connection , commandHandlerSupplier ) ) ; } return connectionMono . flatMap ( c -> c . reactive ( ) . command ( ) . collectList ( ) . map ( CommandDetailParser :: parse ) . doOnNext ( detail -> c . setState ( new RedisState ( detail ) ) ) . doOnError ( e -> c . setState ( new RedisState ( Collections . emptyList ( ) ) ) ) . then ( Mono . just ( c ) ) . onErrorResume ( RedisCommandExecutionException . class , e -> Mono . just ( c ) ) ) . doOnNext ( c -> connection . registerCloseables ( closeableResources , clusterWriter , pooledClusterConnectionProvider ) ) . map ( it -> ( StatefulRedisClusterPubSubConnection < K , V > ) it ) . toFuture ( ) ; } | Create a clustered connection with command distributor . |
8,451 | public void reloadPartitions ( ) { if ( partitions == null ) { initializePartitions ( ) ; partitions . updateCache ( ) ; } else { Partitions loadedPartitions = loadPartitions ( ) ; if ( TopologyComparators . isChanged ( getPartitions ( ) , loadedPartitions ) ) { logger . debug ( "Using a new cluster topology" ) ; List < RedisClusterNode > before = new ArrayList < RedisClusterNode > ( getPartitions ( ) ) ; List < RedisClusterNode > after = new ArrayList < RedisClusterNode > ( loadedPartitions ) ; getResources ( ) . eventBus ( ) . publish ( new ClusterTopologyChangedEvent ( before , after ) ) ; } this . partitions . reload ( loadedPartitions . getPartitions ( ) ) ; } updatePartitionsInConnections ( ) ; } | Reload partitions and re - initialize the distribution table . |
8,452 | public < T extends Commands > T getCommands ( Class < T > commandInterface ) { LettuceAssert . notNull ( commandInterface , "Redis Command Interface must not be null" ) ; RedisCommandsMetadata metadata = new DefaultRedisCommandsMetadata ( commandInterface ) ; InvocationProxyFactory factory = new InvocationProxyFactory ( ) ; factory . addInterface ( commandInterface ) ; BatchAwareCommandLookupStrategy lookupStrategy = new BatchAwareCommandLookupStrategy ( new CompositeCommandLookupStrategy ( ) , metadata ) ; factory . addInterceptor ( new DefaultMethodInvokingInterceptor ( ) ) ; factory . addInterceptor ( new CommandFactoryExecutorMethodInterceptor ( metadata , lookupStrategy ) ) ; return factory . createProxy ( commandInterface . getClassLoader ( ) ) ; } | Returns a Redis Commands interface instance for the given interface . |
8,453 | static void checkForKqueueLibrary ( ) { LettuceAssert . assertState ( KQUEUE_ENABLED , String . format ( "kqueue use is disabled via System properties (%s)" , KQUEUE_ENABLED_KEY ) ) ; LettuceAssert . assertState ( isAvailable ( ) , "netty-transport-native-kqueue is not available. Make sure netty-transport-native-kqueue library on the class path and supported by your operating system." ) ; } | Check whether the kqueue library is available on the class path . |
8,454 | public static RedisInstance parse ( List < ? > roleOutput ) { LettuceAssert . isTrue ( roleOutput != null && ! roleOutput . isEmpty ( ) , "Empty role output" ) ; LettuceAssert . isTrue ( roleOutput . get ( 0 ) instanceof String && ROLE_MAPPING . containsKey ( roleOutput . get ( 0 ) ) , "First role element must be a string (any of " + ROLE_MAPPING . keySet ( ) + ")" ) ; RedisInstance . Role role = ROLE_MAPPING . get ( roleOutput . get ( 0 ) ) ; switch ( role ) { case MASTER : return parseMaster ( roleOutput ) ; case SLAVE : return parseReplica ( roleOutput ) ; case SENTINEL : return parseSentinel ( roleOutput ) ; } return null ; } | Parse the output of the Redis ROLE command and convert to a RedisInstance . |
8,455 | public static String digest ( ByteBuffer script ) { try { MessageDigest md = MessageDigest . getInstance ( "SHA1" ) ; md . update ( script ) ; return new String ( Base16 . encode ( md . digest ( ) , false ) ) ; } catch ( NoSuchAlgorithmException e ) { throw new RedisException ( "JVM does not support SHA1" ) ; } } | Create SHA1 digest from Lua script . |
8,456 | public void setAutoFlushCommands ( boolean autoFlush ) { synchronized ( stateLock ) { this . autoFlushCommands = autoFlush ; connectionProvider . forEach ( connection -> connection . setAutoFlushCommands ( autoFlush ) ) ; } } | Disable or enable auto - flush behavior for all connections . |
8,457 | public static int getSlot ( ByteBuffer key ) { int limit = key . limit ( ) ; int position = key . position ( ) ; int start = indexOf ( key , SUBKEY_START ) ; if ( start != - 1 ) { int end = indexOf ( key , start + 1 , SUBKEY_END ) ; if ( end != - 1 && end != start + 1 ) { key . position ( start + 1 ) . limit ( end ) ; } } try { if ( key . hasArray ( ) ) { return CRC16 . crc16 ( key . array ( ) , key . position ( ) , key . limit ( ) - key . position ( ) ) % SLOT_COUNT ; } return CRC16 . crc16 ( key ) % SLOT_COUNT ; } finally { key . position ( position ) . limit ( limit ) ; } } | Calculate the slot from the given key . |
8,458 | static < K , V > Map < Integer , List < K > > partition ( RedisCodec < K , V > codec , Iterable < K > keys ) { Map < Integer , List < K > > partitioned = new HashMap < > ( ) ; for ( K key : keys ) { int slot = getSlot ( codec . encodeKey ( key ) ) ; if ( ! partitioned . containsKey ( slot ) ) { partitioned . put ( slot , new ArrayList < > ( ) ) ; } Collection < K > list = partitioned . get ( slot ) ; list . add ( key ) ; } return partitioned ; } | Partition keys by slot - hash . The resulting map honors order of the keys . |
8,459 | static < K > Map < K , Integer > getSlots ( Map < Integer , ? extends Iterable < K > > partitioned ) { Map < K , Integer > result = new HashMap < > ( ) ; for ( Map . Entry < Integer , ? extends Iterable < K > > entry : partitioned . entrySet ( ) ) { for ( K key : entry . getValue ( ) ) { result . put ( key , entry . getKey ( ) ) ; } } return result ; } | Create mapping between the Key and hash slot . |
8,460 | public < A extends Annotation > A getAnnotation ( Class < A > annotationClass ) { return method . getAnnotation ( annotationClass ) ; } | Lookup a method annotation . |
8,461 | private ByteBuffer encodeAndAllocateBuffer ( String key ) { if ( key == null ) { return ByteBuffer . wrap ( EMPTY ) ; } CharsetEncoder encoder = CharsetUtil . encoder ( charset ) ; ByteBuffer buffer = ByteBuffer . allocate ( ( int ) ( encoder . maxBytesPerChar ( ) * key . length ( ) ) ) ; ByteBuf byteBuf = Unpooled . wrappedBuffer ( buffer ) ; byteBuf . clear ( ) ; encode ( key , byteBuf ) ; buffer . limit ( byteBuf . writerIndex ( ) ) ; return buffer ; } | Compatibility implementation . |
8,462 | public void updateCache ( ) { synchronized ( partitions ) { if ( partitions . isEmpty ( ) ) { this . slotCache = EMPTY ; this . nodeReadView = Collections . emptyList ( ) ; return ; } RedisClusterNode [ ] slotCache = new RedisClusterNode [ SlotHash . SLOT_COUNT ] ; List < RedisClusterNode > readView = new ArrayList < > ( partitions . size ( ) ) ; for ( RedisClusterNode partition : partitions ) { readView . add ( partition ) ; for ( Integer integer : partition . getSlots ( ) ) { slotCache [ integer . intValue ( ) ] = partition ; } } this . slotCache = slotCache ; this . nodeReadView = Collections . unmodifiableCollection ( readView ) ; } } | Update the partition cache . Updates are necessary after the partition details have changed . |
8,463 | public void reload ( List < RedisClusterNode > partitions ) { LettuceAssert . noNullElements ( partitions , "Partitions must not contain null elements" ) ; synchronized ( this . partitions ) { this . partitions . clear ( ) ; this . partitions . addAll ( partitions ) ; updateCache ( ) ; } } | Update partitions and rebuild slot cache . |
8,464 | @ SuppressWarnings ( "unchecked" ) public static < K , V > ByteBuffer encodeFirstKey ( CommandArgs < K , V > commandArgs ) { for ( SingularArgument singularArgument : commandArgs . singularArguments ) { if ( singularArgument instanceof CommandArgs . KeyArgument ) { return commandArgs . codec . encodeKey ( ( ( CommandArgs . KeyArgument < K , V > ) singularArgument ) . key ) ; } } return null ; } | Get the first encoded key for cluster command routing . |
8,465 | public CommandArgs < K , V > addKey ( K key ) { singularArguments . add ( KeyArgument . of ( key , codec ) ) ; return this ; } | Adds a key argument . |
8,466 | public CommandArgs < K , V > addKeys ( Iterable < K > keys ) { LettuceAssert . notNull ( keys , "Keys must not be null" ) ; for ( K key : keys ) { addKey ( key ) ; } return this ; } | Add multiple key arguments . |
8,467 | public CommandArgs < K , V > addValue ( V value ) { singularArguments . add ( ValueArgument . of ( value , codec ) ) ; return this ; } | Add a value argument . |
8,468 | public CommandArgs < K , V > addValues ( Iterable < V > values ) { LettuceAssert . notNull ( values , "Values must not be null" ) ; for ( V value : values ) { addValue ( value ) ; } return this ; } | Add multiple value arguments . |
8,469 | public CommandArgs < K , V > add ( String s ) { singularArguments . add ( StringArgument . of ( s ) ) ; return this ; } | Add a string argument . The argument is represented as bulk string . |
8,470 | public CommandArgs < K , V > add ( char [ ] cs ) { singularArguments . add ( CharArrayArgument . of ( cs ) ) ; return this ; } | Add a string as char - array . The argument is represented as bulk string . |
8,471 | public CommandArgs < K , V > add ( double n ) { singularArguments . add ( DoubleArgument . of ( n ) ) ; return this ; } | Add a double argument . |
8,472 | public CommandArgs < K , V > add ( byte [ ] value ) { singularArguments . add ( BytesArgument . of ( value ) ) ; return this ; } | Add a byte - array argument . The argument is represented as bulk string . |
8,473 | public static char [ ] encode ( byte [ ] src , boolean upper ) { char [ ] table = upper ? Base16 . upper : Base16 . lower ; char [ ] dst = new char [ src . length * 2 ] ; for ( int si = 0 , di = 0 ; si < src . length ; si ++ ) { byte b = src [ si ] ; dst [ di ++ ] = table [ ( b & 0xf0 ) >>> 4 ] ; dst [ di ++ ] = table [ ( b & 0x0f ) ] ; } return dst ; } | Encode bytes to base16 chars . |
8,474 | public static List < RedisClusterNode > sortByUri ( Iterable < RedisClusterNode > clusterNodes ) { LettuceAssert . notNull ( clusterNodes , "Cluster nodes must not be null" ) ; List < RedisClusterNode > ordered = LettuceLists . newList ( clusterNodes ) ; ordered . sort ( ( o1 , o2 ) -> RedisURIComparator . INSTANCE . compare ( o1 . getUri ( ) , o2 . getUri ( ) ) ) ; return ordered ; } | Sort partitions by RedisURI . |
8,475 | public static List < RedisClusterNode > sortByClientCount ( Iterable < RedisClusterNode > clusterNodes ) { LettuceAssert . notNull ( clusterNodes , "Cluster nodes must not be null" ) ; List < RedisClusterNode > ordered = LettuceLists . newList ( clusterNodes ) ; ordered . sort ( ClientCountComparator . INSTANCE ) ; return ordered ; } | Sort partitions by client count . |
8,476 | public static List < RedisClusterNode > sortByLatency ( Iterable < RedisClusterNode > clusterNodes ) { List < RedisClusterNode > ordered = LettuceLists . newList ( clusterNodes ) ; ordered . sort ( LatencyComparator . INSTANCE ) ; return ordered ; } | Sort partitions by latency . |
8,477 | public static boolean isChanged ( Partitions o1 , Partitions o2 ) { if ( o1 . size ( ) != o2 . size ( ) ) { return true ; } for ( RedisClusterNode base : o2 ) { if ( ! essentiallyEqualsTo ( base , o1 . getPartitionByNodeId ( base . getNodeId ( ) ) ) ) { return true ; } } return false ; } | Check if properties changed which are essential for cluster operations . |
8,478 | private static Map < TypeVariable < ? > , Type > getTypeVariableMap ( Class < ? > type ) { return getTypeVariableMap ( type , new HashSet < Type > ( ) ) ; } | Little helper to allow us to create a generified map actually just to satisfy the compiler . |
8,479 | public Set < RedisURI > getClusterNodes ( ) { Set < RedisURI > result = new HashSet < > ( ) ; Map < String , RedisURI > knownUris = new HashMap < > ( ) ; for ( NodeTopologyView view : views ) { knownUris . put ( view . getNodeId ( ) , view . getRedisURI ( ) ) ; } for ( NodeTopologyView view : views ) { for ( RedisClusterNode redisClusterNode : view . getPartitions ( ) ) { if ( knownUris . containsKey ( redisClusterNode . getNodeId ( ) ) ) { result . add ( knownUris . get ( redisClusterNode . getNodeId ( ) ) ) ; } else { result . add ( redisClusterNode . getUri ( ) ) ; } } } return result ; } | Return cluster node URI s using the topology query sources and partitions . |
8,480 | private boolean consumeResponse ( ByteBuf buffer ) { PristineFallbackCommand command = this . fallbackCommand ; if ( command == null || ! command . isDone ( ) ) { if ( debugEnabled ) { logger . debug ( "{} Consuming response using FallbackCommand" , logPrefix ( ) ) ; } if ( command == null ) { command = new PristineFallbackCommand ( ) ; this . fallbackCommand = command ; } if ( ! decode ( buffer , command . getOutput ( ) ) ) { return false ; } if ( isProtectedMode ( command ) ) { onProtectedMode ( command . getOutput ( ) . getError ( ) ) ; } } return true ; } | Consume a response without having a command on the stack . |
8,481 | private void discardReadBytesIfNecessary ( ByteBuf buffer ) { float usedRatio = ( float ) buffer . readerIndex ( ) / buffer . capacity ( ) ; if ( usedRatio >= discardReadBytesRatio && buffer . refCnt ( ) != 0 ) { buffer . discardReadBytes ( ) ; } } | Try to discard read bytes when buffer usage reach a higher usage ratio . |
8,482 | private void add ( State [ ] stack , State state ) { if ( stackElements != 0 ) { System . arraycopy ( stack , 0 , stack , 1 , stackElements ) ; } stack [ 0 ] = state ; stackElements ++ ; } | Add a state as tail element . This method shifts the whole stack if the stack is not empty . |
8,483 | static void checkForEpollLibrary ( ) { LettuceAssert . assertState ( EPOLL_ENABLED , String . format ( "epoll use is disabled via System properties (%s)" , EPOLL_ENABLED_KEY ) ) ; LettuceAssert . assertState ( isAvailable ( ) , "netty-transport-native-epoll is not available. Make sure netty-transport-native-epoll library on the class path and supported by your operating system." ) ; } | Check whether the Epoll library is available on the class path . |
8,484 | protected void connectionBuilder ( Mono < SocketAddress > socketAddressSupplier , ConnectionBuilder connectionBuilder , RedisURI redisURI ) { Bootstrap redisBootstrap = new Bootstrap ( ) ; redisBootstrap . option ( ChannelOption . WRITE_BUFFER_HIGH_WATER_MARK , 32 * 1024 ) ; redisBootstrap . option ( ChannelOption . WRITE_BUFFER_LOW_WATER_MARK , 8 * 1024 ) ; redisBootstrap . option ( ChannelOption . ALLOCATOR , BUF_ALLOCATOR ) ; SocketOptions socketOptions = getOptions ( ) . getSocketOptions ( ) ; redisBootstrap . option ( ChannelOption . CONNECT_TIMEOUT_MILLIS , Math . toIntExact ( socketOptions . getConnectTimeout ( ) . toMillis ( ) ) ) ; if ( LettuceStrings . isEmpty ( redisURI . getSocket ( ) ) ) { redisBootstrap . option ( ChannelOption . SO_KEEPALIVE , socketOptions . isKeepAlive ( ) ) ; redisBootstrap . option ( ChannelOption . TCP_NODELAY , socketOptions . isTcpNoDelay ( ) ) ; } connectionBuilder . timeout ( redisURI . getTimeout ( ) ) ; connectionBuilder . password ( redisURI . getPassword ( ) ) ; connectionBuilder . bootstrap ( redisBootstrap ) ; connectionBuilder . channelGroup ( channels ) . connectionEvents ( connectionEvents ) . timer ( timer ) ; connectionBuilder . socketAddressSupplier ( socketAddressSupplier ) ; } | Populate connection builder with necessary resources . |
8,485 | public void reset ( ) { if ( debugEnabled ) { logger . debug ( "{} reset()" , logPrefix ( ) ) ; } if ( channel != null ) { channel . pipeline ( ) . fireUserEventTriggered ( new ConnectionEvents . Reset ( ) ) ; } cancelBufferedCommands ( "Reset" ) ; } | Reset the writer state . Queued commands will be canceled and the internal state will be reset . This is useful when the internal state machine gets out of sync with the connection . |
8,486 | public void initialState ( ) { commandBuffer . clear ( ) ; Channel currentChannel = this . channel ; if ( currentChannel != null ) { ChannelFuture close = currentChannel . close ( ) ; if ( currentChannel . isOpen ( ) ) { close . syncUninterruptibly ( ) ; } } } | Reset the command - handler to the initial not - connected state . |
8,487 | private static List < RedisCommand < ? , ? , ? > > drainCommands ( Queue < ? extends RedisCommand < ? , ? , ? > > source ) { List < RedisCommand < ? , ? , ? > > target = new ArrayList < > ( source . size ( ) ) ; RedisCommand < ? , ? , ? > cmd ; while ( ( cmd = source . poll ( ) ) != null ) { if ( ! cmd . isDone ( ) ) { target . add ( cmd ) ; } } return target ; } | Drain commands from a queue and return only active commands . |
8,488 | static long addCap ( long a , long b ) { long res = a + b ; if ( res < 0L ) { return Long . MAX_VALUE ; } return res ; } | Cap an addition to Long . MAX_VALUE |
8,489 | public static < T > boolean request ( AtomicLongFieldUpdater < T > updater , T instance , long toAdd ) { if ( validate ( toAdd ) ) { addCap ( updater , instance , toAdd ) ; return true ; } return false ; } | Concurrent addition bound to Long . MAX_VALUE . Any concurrent write will happen before this operation . |
8,490 | public final void accept ( T value ) { if ( GATE_UPDATER . get ( this ) == GATE_CLOSED ) { onDrop ( value ) ; return ; } onAccept ( value ) ; onNotification ( ) ; } | Notification callback method accepting a connection for a value . Triggers emission if the gate is open and the current call to this method is the last expected notification . |
8,491 | public final void accept ( Throwable throwable ) { if ( GATE_UPDATER . get ( this ) == GATE_CLOSED ) { onDrop ( throwable ) ; return ; } onError ( throwable ) ; onNotification ( ) ; } | Notification callback method accepting a connection error . Triggers emission if the gate is open and the current call to this method is the last expected notification . |
8,492 | @ SuppressWarnings ( "unchecked" ) public CompletableFuture < Void > close ( ) { this . closed = true ; List < CompletableFuture < Void > > futures = new ArrayList < > ( ) ; forEach ( ( connectionKey , closeable ) -> { futures . add ( closeable . closeAsync ( ) ) ; connections . remove ( connectionKey ) ; } ) ; return Futures . allOf ( futures ) ; } | Close all connections . Pending connections are closed using future chaining . |
8,493 | public void forEach ( Consumer < ? super T > action ) { connections . values ( ) . forEach ( sync -> sync . doWithConnection ( action ) ) ; } | Execute an action for all established and pending connections . |
8,494 | public InetAddress [ ] resolve ( String host ) throws UnknownHostException { if ( ipStringToBytes ( host ) != null ) { return new InetAddress [ ] { InetAddress . getByAddress ( ipStringToBytes ( host ) ) } ; } List < InetAddress > inetAddresses = new ArrayList < > ( ) ; try { resolve ( host , inetAddresses ) ; } catch ( NamingException e ) { throw new UnknownHostException ( String . format ( "Cannot resolve %s to a hostname because of %s" , host , e ) ) ; } if ( inetAddresses . isEmpty ( ) ) { throw new UnknownHostException ( String . format ( "Cannot resolve %s to a hostname" , host ) ) ; } return inetAddresses . toArray ( new InetAddress [ inetAddresses . size ( ) ] ) ; } | Perform hostname to address resolution . |
8,495 | private void resolve ( String hostname , List < InetAddress > inetAddresses ) throws NamingException , UnknownHostException { if ( preferIpv6 || ( ! preferIpv4 && ! preferIpv6 ) ) { inetAddresses . addAll ( resolve ( hostname , "AAAA" ) ) ; inetAddresses . addAll ( resolve ( hostname , "A" ) ) ; } else { inetAddresses . addAll ( resolve ( hostname , "A" ) ) ; inetAddresses . addAll ( resolve ( hostname , "AAAA" ) ) ; } if ( inetAddresses . isEmpty ( ) ) { inetAddresses . addAll ( resolveCname ( hostname ) ) ; } } | Resolve a hostname |
8,496 | @ SuppressWarnings ( "rawtypes" ) private List < InetAddress > resolve ( String hostname , String attrName ) throws NamingException , UnknownHostException { Attributes attrs = context . getAttributes ( hostname , new String [ ] { attrName } ) ; List < InetAddress > inetAddresses = new ArrayList < > ( ) ; Attribute attr = attrs . get ( attrName ) ; if ( attr != null && attr . size ( ) > 0 ) { NamingEnumeration e = attr . getAll ( ) ; while ( e . hasMore ( ) ) { InetAddress inetAddress = InetAddress . getByName ( "" + e . next ( ) ) ; inetAddresses . add ( InetAddress . getByAddress ( hostname , inetAddress . getAddress ( ) ) ) ; } } return inetAddresses ; } | Resolve an attribute for a hostname . |
8,497 | public V next ( ) { Collection < ? extends V > collection = this . collection ; V offset = this . offset ; if ( offset != null ) { boolean accept = false ; for ( V element : collection ) { if ( element == offset ) { accept = true ; continue ; } if ( accept ) { return this . offset = element ; } } } return this . offset = collection . iterator ( ) . next ( ) ; } | Returns the next item . |
8,498 | public void addInterface ( Class < ? > ifc ) { LettuceAssert . notNull ( ifc , "Interface type must not be null" ) ; LettuceAssert . isTrue ( ifc . isInterface ( ) , "Type must be an interface" ) ; this . interfaces . add ( ifc ) ; } | Add a interface type that should be implemented by the resulting invocation proxy . |
8,499 | static void registerIn ( ConversionService conversionService ) { LettuceAssert . notNull ( conversionService , "ConversionService must not be null!" ) ; if ( ReactiveTypes . isAvailable ( ReactiveLibrary . PROJECT_REACTOR ) ) { if ( ReactiveTypes . isAvailable ( ReactiveLibrary . RXJAVA1 ) ) { conversionService . addConverter ( PublisherToRxJava1CompletableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1CompletableToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1CompletableToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava1SingleAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1SingleToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1SingleToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1SingleToFluxAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava1ObservableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1ObservableToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1ObservableToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1ObservableToFluxAdapter . INSTANCE ) ; } if ( ReactiveTypes . isAvailable ( ReactiveLibrary . RXJAVA2 ) ) { conversionService . addConverter ( PublisherToRxJava2CompletableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2CompletableToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2CompletableToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava2SingleAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2SingleToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2SingleToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2SingleToFluxAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava2ObservableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2ObservableToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2ObservableToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2ObservableToFluxAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava2FlowableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2FlowableToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToRxJava2MaybeAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2MaybeToPublisherAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2MaybeToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2MaybeToFluxAdapter . INSTANCE ) ; } conversionService . addConverter ( PublisherToMonoAdapter . INSTANCE ) ; conversionService . addConverter ( PublisherToFluxAdapter . INSTANCE ) ; if ( ReactiveTypes . isAvailable ( ReactiveLibrary . RXJAVA1 ) ) { conversionService . addConverter ( RxJava1SingleToObservableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava1ObservableToSingleAdapter . INSTANCE ) ; } if ( ReactiveTypes . isAvailable ( ReactiveLibrary . RXJAVA2 ) ) { conversionService . addConverter ( RxJava2SingleToObservableAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2ObservableToSingleAdapter . INSTANCE ) ; conversionService . addConverter ( RxJava2ObservableToMaybeAdapter . INSTANCE ) ; } } } | Register adapters in the conversion service . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.